code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
UpperCAmelCase = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
UpperCAmelCase = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCAmelCase = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCAmelCase = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : Union[str, Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
__lowercase =k.replace(lowercase__, lowercase__ )
return k
def __UpperCamelCase ( lowercase__ : dict, lowercase__ : dict ):
'''simple docstring'''
__lowercase =BigBirdPegasusConfig(**lowercase__ )
__lowercase =BigBirdPegasusForConditionalGeneration(lowercase__ )
__lowercase =torch_model.state_dict()
__lowercase ={}
# separating decoder weights
__lowercase ={k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
__lowercase ={k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items(), 'tf -> hf conversion' ):
__lowercase =[k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
__lowercase =DECODER_PATTERNS
__lowercase =rename_state_dict_key(lowercase__, lowercase__ )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__lowercase =v.T
__lowercase =torch.from_numpy(lowercase__ )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items(), 'tf -> hf conversion' ):
__lowercase =[k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
__lowercase =REMAINING_PATTERNS
__lowercase =rename_state_dict_key(lowercase__, lowercase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__lowercase =v.T
__lowercase =torch.from_numpy(lowercase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__lowercase =mapping['model.embed_positions.weight']
__lowercase =mapping.pop('model.embed_positions.weight' )
__lowercase , __lowercase =torch_model.load_state_dict(lowercase__, strict=lowercase__ )
__lowercase =[
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
__lowercase =tf.train.list_variables(lowercase__ )
__lowercase ={}
__lowercase =['global_step']
for name, shape in tqdm(lowercase__, desc='converting tf checkpoint to dict' ):
__lowercase =any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowercase =tf.train.load_variable(lowercase__, lowercase__ )
__lowercase =array
return tf_weights
def __UpperCamelCase ( lowercase__ : str, lowercase__ : str, lowercase__ : dict ):
'''simple docstring'''
__lowercase =get_tf_weights_as_numpy(lowercase__ )
__lowercase =convert_bigbird_pegasus(lowercase__, lowercase__ )
torch_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 141 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case ( self : int ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str]=0 ):
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
__lowercase =torch.manual_seed(__lowercase )
else:
__lowercase =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase =floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self : int ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self : str ):
"""simple docstring"""
self._test_save_load_local()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 141 | 1 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Optional[Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 350 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 0 |
from __future__ import annotations
from typing import Any
class a__ ( UpperCAmelCase ):
"""simple docstring"""
pass
class a__ :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase__ : Any ) ->None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = data
SCREAMING_SNAKE_CASE : Node | None = None
def __iter__( self : List[Any] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self
SCREAMING_SNAKE_CASE : Optional[Any] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCAmelCase__ )
yield node.data
SCREAMING_SNAKE_CASE : Tuple = node.next_node
@property
def _lowercase ( self : Dict ) ->bool:
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = Node(1)
UpperCAmelCase__ : Tuple = Node(2)
UpperCAmelCase__ : List[str] = Node(3)
UpperCAmelCase__ : Union[str, Any] = Node(4)
print(root_node.has_loop) # False
UpperCAmelCase__ : Dict = root_node.next_node
print(root_node.has_loop) # True
UpperCAmelCase__ : List[str] = Node(5)
UpperCAmelCase__ : Tuple = Node(6)
UpperCAmelCase__ : Optional[Any] = Node(5)
UpperCAmelCase__ : Tuple = Node(6)
print(root_node.has_loop) # False
UpperCAmelCase__ : List[Any] = Node(1)
print(root_node.has_loop) # False
| 245 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowercase ( _A ) -> List[Tuple[int, ...]]:
SCREAMING_SNAKE_CASE : Optional[int] = []
if isinstance(_A , _A ):
for v in tree.values():
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __lowercase ( _A , _A ) -> Tuple[int, ...]:
SCREAMING_SNAKE_CASE : List[Any] = []
for d in reversed(_A ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE : Tuple = flat_idx // d
return tuple(reversed(_A ) )
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A = None , _A = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_A ) -> None:
SCREAMING_SNAKE_CASE : int = True
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE : Any = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [s == 0 for s in start]
reduce_edge_list(_A )
if end_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(_A , _A )]
reduce_edge_list(_A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_A ) == 0:
return [()]
elif len(_A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = []
SCREAMING_SNAKE_CASE : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_A , _A ):
if s == e:
path_list.append(slice(_A , s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(_A )
SCREAMING_SNAKE_CASE : List[str] = len(_A )
# start == end, and we're done
if divergence_idx == len(_A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx]
return tuple(
path + (slice(_A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : Tuple = end[divergence_idx]
return tuple(
path + (slice(_A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A ) -> torch.Tensor:
SCREAMING_SNAKE_CASE : Tuple = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE : Union[str, Any] = list(_flat_idx_to_idx(_A , _A ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE : Any = list(_flat_idx_to_idx(flat_end - 1 , _A ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE : List[Any] = _get_minimal_slice_set(
_A , _A , _A , )
SCREAMING_SNAKE_CASE : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowercase ( _A , _A , _A , _A , _A = False , _A = None , _A = False , ) -> Any:
if not (len(_A ) > 0):
raise ValueError("""Must provide at least one input""" )
SCREAMING_SNAKE_CASE : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_A )]
SCREAMING_SNAKE_CASE : str = tuple([max(_A ) for s in zip(*_A )] )
def _prep_inputs(_A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs , _A )
SCREAMING_SNAKE_CASE : Optional[int] = None
if _out is not None:
SCREAMING_SNAKE_CASE : Optional[int] = tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
SCREAMING_SNAKE_CASE : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = prepped_outputs
for _ in range(_A ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE : int = _select_chunk
else:
SCREAMING_SNAKE_CASE : Optional[int] = partial(
_chunk_slice , flat_start=_A , flat_end=min(_A , i + chunk_size ) , no_batch_dims=len(_A ) , )
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_A , _A )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE : Tuple = layer(**_A )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE : List[str] = tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _A )
# Put the chunk in its pre-allocated space
if isinstance(_A , _A ):
def assign(_A , _A ) -> None:
for k, v in da.items():
if isinstance(_A , _A ):
assign(_A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = da[k]
assign(_A , _A )
elif isinstance(_A , _A ):
for xa, xa in zip(_A , _A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE : str = xa
elif isinstance(_A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE : List[Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , _A )
return out
class a__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 5_1_2 , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = max_chunk_size
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[tuple] = None
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int ) ->int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE : Dict = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase__ : int ) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase__ , chunk_size=UpperCAmelCase__ )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase__ ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE : int = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE : Tuple = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE : List[str] = i
SCREAMING_SNAKE_CASE : List[str] = (i + len(UpperCAmelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Iterable , UpperCAmelCase__ : Iterable ) ->bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = True
for aa, aa in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert type(UpperCAmelCase__ ) == type(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
SCREAMING_SNAKE_CASE : List[str] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
else:
consistent &= aa == aa
return consistent
def _lowercase ( self : List[str] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : tuple = tree_map(lambda UpperCAmelCase__ : a.shape if isinstance(UpperCAmelCase__ , torch.Tensor ) else a , UpperCAmelCase__ , UpperCAmelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase__ )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE : List[Any] = False
if not consistent:
SCREAMING_SNAKE_CASE : List[Any] = self._determine_favorable_chunk_size(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 245 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase: Any = logging.get_logger(__name__)
_lowercase: Dict = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _lowercase ( lowerCAmelCase_ ):
"""simple docstring"""
__A = 'gptj'
__A = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase_=50400 , lowerCamelCase_=2048 , lowerCamelCase_=4096 , lowerCamelCase_=28 , lowerCamelCase_=16 , lowerCamelCase_=64 , lowerCamelCase_=None , lowerCamelCase_="gelu_new" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=1E-5 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=50256 , lowerCamelCase_=50256 , lowerCamelCase_=False , **lowerCamelCase_ , ):
"""simple docstring"""
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = n_inner
a = rotary_dim
a = activation_function
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = use_cache
a = bos_token_id
a = eos_token_id
super().__init__(
bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase )
class _lowercase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ = "default" , lowerCamelCase_ = None , lowerCamelCase_ = False , ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , task=__lowerCAmelCase , patching_specs=__lowerCAmelCase , use_past=__lowerCAmelCase )
if not getattr(self._config , "pad_token_id" , __lowerCAmelCase ):
# TODO: how to do that better?
a = 0
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
a = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction="inputs" )
a = {0: "batch", 1: "past_sequence + sequence"}
else:
a = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self._config.n_layer
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self._config.n_head
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = -1 , lowerCamelCase_ = -1 , lowerCamelCase_ = False , lowerCamelCase_ = None , ):
"""simple docstring"""
a = super(__lowerCAmelCase , self ).generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
a = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a , a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a = seqlen + 2
a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
a = common_inputs["attention_mask"]
if self.use_past:
a = ordered_inputs["attention_mask"].dtype
a = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return 13
| 358 |
def a( A : int = 200 ) -> int:
"""simple docstring"""
a = [1, 2, 5, 10, 20, 50, 100, 200]
a = [0] * (pence + 1)
a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 71 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = CanineTokenizer
_lowerCamelCase : int = False
def __A ( self : Any ):
super().setUp()
A_ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self : Dict ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def __A ( self : List[str] , **UpperCAmelCase : Optional[int] ):
A_ = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
A_ = 1024
return tokenizer
@require_torch
def __A ( self : Optional[int] ):
A_ = self.canine_tokenizer
A_ = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
A_ = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
A_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="pt" )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def __A ( self : str ):
A_ = self.canine_tokenizer
A_ = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
A_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , UpperCAmelCase )
self.assertIn("attention_mask" , UpperCAmelCase )
self.assertIn("token_type_ids" , UpperCAmelCase )
@require_torch
def __A ( self : Dict ):
A_ = self.canine_tokenizer
A_ = [
"What's the weater?",
"It's about 25 degrees.",
]
A_ = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding="max_length" , truncation=UpperCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __A ( self : Dict ):
# safety check on max_len default value so we are sure the test works
A_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A_ = tempfile.mkdtemp()
A_ = " He is very happy, UNwant\u00E9d,running"
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
A_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
A_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
A_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A_ = tempfile.mkdtemp()
A_ = " He is very happy, UNwant\u00E9d,running"
A_ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
A_ = chr(0xe0_07 )
additional_special_tokens.append(UpperCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
A_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
A_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn(UpperCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A_ , A_ = self.get_clean_sequence(UpperCAmelCase )
# a special token for Canine can be defined as follows:
A_ = 0xe0_05
A_ = chr(UpperCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 1 )
A_ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCAmelCase )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , input_encoded + special_token_id )
A_ = tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def __A ( self : Optional[Any] ):
A_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A_ = chr(0xe0_05 )
A_ = chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
A_ = tokenizer.tokenize(UpperCAmelCase )
A_ = tokenizer.tokenize(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(token_a[0] , UpperCAmelCase )
self.assertEqual(token_a[0] , UpperCAmelCase )
@require_tokenizers
def __A ( self : List[Any] ):
A_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
A_ = 0xe0_06
A_ = chr(UpperCAmelCase )
A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(UpperCAmelCase )
tokenizer.from_pretrained(UpperCAmelCase )
def __A ( self : int ):
A_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
A_ = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
A_ = json.load(UpperCAmelCase )
# a special token for Canine can be defined as follows:
A_ = 0xe0_06
A_ = chr(UpperCAmelCase )
A_ = [new_token_a]
A_ = [new_token_a]
with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A_ = tokenizer_class.from_pretrained(UpperCAmelCase , extra_ids=0 )
self.assertIn(UpperCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
A_ = 0xe0_07
A_ = chr(UpperCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A_ = [AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )]
A_ = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , extra_ids=0 )
self.assertIn(UpperCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __A ( self : Tuple ):
A_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A_ = "hello world"
if self.space_between_special_tokens:
A_ = "[CLS] hello world [SEP]"
else:
A_ = input
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
A_ = tokenizer.decode(UpperCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(UpperCAmelCase , [output, output.lower()] )
def __A ( self : Any ):
A_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A_ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
A_ = "a"
A_ = ord(UpperCAmelCase )
for attr in attributes_list:
setattr(UpperCAmelCase , attr + "_id" , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + "_id" ) , UpperCAmelCase )
setattr(UpperCAmelCase , attr + "_id" , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + "_id" ) , UpperCAmelCase )
setattr(UpperCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(UpperCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(UpperCAmelCase , "additional_special_tokens_ids" ) , [] )
A_ = 0xe0_06
A_ = chr(UpperCAmelCase )
setattr(UpperCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(UpperCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(UpperCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def __A ( self : int ):
pass
def __A ( self : List[str] ):
pass
def __A ( self : Tuple ):
pass
def __A ( self : Union[str, Any] ):
pass
def __A ( self : List[str] ):
pass
def __A ( self : List[Any] ):
pass
def __A ( self : Any ):
pass
def __A ( self : Any ):
pass
| 312 |
import cva
import numpy as np
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : float , UpperCAmelCase : int ):
if k in (0.04, 0.06):
A_ = k
A_ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : Optional[Any] ):
return str(self.k )
def __A ( self : int , UpperCAmelCase : str ):
A_ = cva.imread(UpperCAmelCase , 0 )
A_ , A_ = img.shape
A_ = []
A_ = img.copy()
A_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_GRAY2RGB )
A_ , A_ = np.gradient(UpperCAmelCase )
A_ = dx**2
A_ = dy**2
A_ = dx * dy
A_ = 0.04
A_ = self.window_size // 2
for y in range(UpperCAmelCase , h - offset ):
for x in range(UpperCAmelCase , w - offset ):
A_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A_ = (wxx * wyy) - (wxy**2)
A_ = wxx + wyy
A_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a :List[str] = HarrisCorner(0.04, 3)
__a , __a :str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 312 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__A : Optional[int] =logging.getLogger()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = '''\n'''.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open("w" ).writelines(__lowerCAmelCase )
__A : Dict ='''patrickvonplaten/t5-tiny-random'''
__A : Union[str, Any] ='''sshleifer/bart-tiny-random'''
__A : List[Any] ='''sshleifer/tiny-mbart'''
__A : List[str] =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
lowerCamelCase_ = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
lowerCamelCase_ = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
lowerCamelCase_ = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(snake_case__ , snake_case__ )
lowerCamelCase_ = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
lowerCamelCase_ = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
lowerCamelCase_ = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case__ , "argv" , snake_case__ ):
run_generate()
assert Path(snake_case__ ).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
self.run_eval_tester(snake_case__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
self.run_eval_tester(snake_case__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[int]:
lowerCamelCase_ = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
lowerCamelCase_ = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
lowerCamelCase_ = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
lowerCamelCase_ = Path(self.get_auto_remove_tmp_dir() )
lowerCamelCase_ = str(tmp_dir / "scores.json" )
lowerCamelCase_ = str(tmp_dir / "val.target" )
_dump_articles(snake_case__ , text["en"] )
_dump_articles(snake_case__ , text["de"] )
lowerCamelCase_ = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
lowerCamelCase_ = f'\n run_eval_search.py\n {model}\n {str(snake_case__ )}\n {str(snake_case__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(snake_case__ , "argv" , snake_case__ ):
with CaptureStdout() as cs:
run_search()
lowerCamelCase_ = [''' num_beams | length_penalty''', model, '''Best score args''']
lowerCamelCase_ = ['''Info''']
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(snake_case__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case__ ).exists()
os.remove(Path(snake_case__ ) )
| 361 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = get_failure_array(lowerCamelCase__ )
# 2) Step through text searching for pattern
lowerCamelCase_ , lowerCamelCase_ = 0, 0 # index into text, pattern
while i < len(lowerCamelCase__ ):
if pattern[j] == text[i]:
if j == (len(lowerCamelCase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCamelCase_ = failure[j - 1]
continue
i += 1
return False
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [0]
lowerCamelCase_ = 0
lowerCamelCase_ = 1
while j < len(lowerCamelCase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCamelCase_ = failure[i - 1]
continue
j += 1
failure.append(lowerCamelCase__ )
return failure
if __name__ == "__main__":
# Test 1)
__A ='''abc1abc12'''
__A ='''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__A ='''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__A ='''ABABX'''
__A ='''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__A ='''AAAB'''
__A ='''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__A ='''abcdabcy'''
__A ='''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__A ='''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 47 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
a__ = '''bert-base-cased'''
a__ = '''fp16'''
a__ = '''bf16'''
a__ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
super().setUp()
_a : List[str] = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def __lowercase ( self ) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ):
_a : Tuple = self.dist_env.copy()
_a : int = F"""{i + 1}"""
_a : Optional[int] = strategy
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __lowercase ( self ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_SCREAMING_SNAKE_CASE ):
_a : Tuple = self.dist_env.copy()
_a : List[str] = prefetch_policy
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : int = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __lowercase ( self ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_SCREAMING_SNAKE_CASE ):
_a : Tuple = self.dist_env.copy()
_a : Optional[Any] = state_dict_type
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __lowercase ( self ) -> List[Any]:
_a : Any = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE )
for policy in FSDP_AUTO_WRAP_POLICY:
_a : Tuple = self.dist_env.copy()
_a : Dict = policy
if policy == "TRANSFORMER_BASED_WRAP":
_a : Union[str, Any] = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
_a : List[Any] = '''2000'''
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : Dict = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_SCREAMING_SNAKE_CASE )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_a : int = self.dist_env.copy()
_a : Any = '''TRANSFORMER_BASED_WRAP'''
_a : Any = '''T5Layer'''
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : str = FullyShardedDataParallelPlugin()
with self.assertRaises(_SCREAMING_SNAKE_CASE ) as cm:
fsdp_plugin.set_auto_wrap_policy(_SCREAMING_SNAKE_CASE )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
_a : int = self.dist_env.copy()
_a : Optional[int] = '''SIZE_BASED_WRAP'''
_a : Tuple = '''0'''
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : Tuple = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_SCREAMING_SNAKE_CASE )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __lowercase ( self ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_a : Union[str, Any] = self.dist_env.copy()
_a : List[Any] = mp_dtype
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : Optional[int] = Accelerator()
if mp_dtype == "fp16":
_a : int = torch.floataa
elif mp_dtype == "bf16":
_a : Optional[Any] = torch.bfloataa
_a : int = MixedPrecision(param_dtype=_SCREAMING_SNAKE_CASE , reduce_dtype=_SCREAMING_SNAKE_CASE , buffer_dtype=_SCREAMING_SNAKE_CASE )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _SCREAMING_SNAKE_CASE )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _SCREAMING_SNAKE_CASE ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_SCREAMING_SNAKE_CASE )
def __lowercase ( self ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_a : Any = self.dist_env.copy()
_a : Dict = str(_SCREAMING_SNAKE_CASE ).lower()
with mockenv_context(**_SCREAMING_SNAKE_CASE ):
_a : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_SCREAMING_SNAKE_CASE ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : List[Any] = 0.82
_a : List[Any] = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
_a : Dict = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_a : List[Any] = 1_6_0
_a : int = 1_6_0
_a : List[str] = inspect.getfile(accelerate.test_utils )
_a : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def __lowercase ( self ) -> List[str]:
_a : Optional[int] = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
_a : str = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
_a : List[str] = cmd.copy()
for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
def __lowercase ( self ) -> List[Any]:
_a : Optional[Any] = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
_a : str = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ):
_a : int = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_a : Tuple = len(_SCREAMING_SNAKE_CASE )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_a : Dict = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
_a : List[Any] = cmd_config[:-1]
_a : Optional[int] = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
_a : List[Any] = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_a : Optional[int] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(_SCREAMING_SNAKE_CASE ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
| 235 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 4_5
SCREAMING_SNAKE_CASE__ = 1_5_8_1
SCREAMING_SNAKE_CASE__ = 1_5_1_7
SCREAMING_SNAKE_CASE__ = 1_5_7_0
SCREAMING_SNAKE_CASE__ = 1_5_8_4
SCREAMING_SNAKE_CASE__ = 1_7_9_3
SCREAMING_SNAKE_CASE__ = 1_7_9_5
SCREAMING_SNAKE_CASE__ = 1_9_1_6
SCREAMING_SNAKE_CASE__ = 1_8_6_4
SCREAMING_SNAKE_CASE__ = 1_9_0_5
SCREAMING_SNAKE_CASE__ = 1_9_1_9
SCREAMING_SNAKE_CASE__ = 2_4_2_9
SCREAMING_SNAKE_CASE__ = 2_2_0_8
SCREAMING_SNAKE_CASE__ = 2_4_1_8
SCREAMING_SNAKE_CASE__ = 2_3_2_3
SCREAMING_SNAKE_CASE__ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 321 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = XGLMTokenizer
a = XGLMTokenizerFast
a = True
a = True
def A ( self : List[str]):
super().setUp()
# We have a SentencePiece fixture for testing
_A : Dict = XGLMTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE)
tokenizer.save_pretrained(self.tmpdirname)
def A ( self : Optional[Any]):
_A : Optional[int] = '<pad>'
_A : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A : Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(SCREAMING_SNAKE_CASE) , 1008)
def A ( self : int):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def A ( self : List[str]):
_A : Tuple = XGLMTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE)
_A : int = tokenizer.tokenize('This is a test')
self.assertListEqual(SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_A : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_A : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE)
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A : str = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE)
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A ( self : List[Any]):
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def A ( self : Union[str, Any]):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE , f.name)
_A : str = XGLMTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE)
_A : Optional[int] = pickle.dumps(SCREAMING_SNAKE_CASE)
pickle.loads(SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
if not self.test_rust_tokenizer:
return
_A : Optional[int] = self.get_tokenizer()
_A : Optional[Any] = self.get_rust_tokenizer()
_A : Optional[int] = 'I was born in 92000, and this is falsé.'
_A : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE)
_A : Optional[int] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : str = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE)
_A : int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : Any = self.get_rust_tokenizer()
_A : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE)
_A : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
@slow
def A ( self : int):
_A : Optional[Any] = 'Hello World!'
_A : List[str] = [2, 31227, 4447, 35]
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE))
@slow
def A ( self : Any):
_A : List[str] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
_A : Tuple = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE))
@slow
def A ( self : Any):
# fmt: off
_A : List[Any] = {
'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name='facebook/xglm-564M' , padding=SCREAMING_SNAKE_CASE , )
| 366 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : Tuple):
torch.manual_seed(0)
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0)
_A : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
_A : Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_A : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
_A : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : Any = CLIPTextModel(SCREAMING_SNAKE_CASE)
_A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = 2
_A : Tuple = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , )
_A : Tuple = floats_tensor(control_image.shape , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
_A : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def A ( self : str):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : Tuple):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def A ( self : int):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __lowerCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(SCREAMING_SNAKE_CASE : Union[str, Any]):
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
_A : int = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , )
torch.manual_seed(0)
_A : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
_A : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_A : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE)
_A : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_A : List[str] = MultiControlNetModel([controlneta, controlneta])
_A : List[str] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = 2
_A : List[str] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=SCREAMING_SNAKE_CASE , device=torch.device(SCREAMING_SNAKE_CASE) , ),
]
_A : str = floats_tensor(control_image[0].shape , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE)
_A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1)[0]
_A : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert('RGB').resize((64, 64))
_A : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def A ( self : Tuple):
_A : List[str] = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
_A : int = 10.0
_A : Union[str, Any] = 4
_A : str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : List[Any] = steps
_A : List[str] = scale
_A : int = pipe(**SCREAMING_SNAKE_CASE)[0]
_A : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = steps
_A : Any = scale
_A : Dict = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
_A : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : str = steps
_A : List[Any] = scale
_A : int = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
_A : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE)
_A : Tuple = steps
_A : Tuple = scale
_A : str = pipe(**SCREAMING_SNAKE_CASE , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def A ( self : Optional[Any]):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A ( self : Any):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def A ( self : Dict):
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def A ( self : str):
_A : Optional[int] = self.get_dummy_components()
_A : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE)
pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(SCREAMING_SNAKE_CASE)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[Any]):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any):
_A : Dict = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny')
_A : List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=SCREAMING_SNAKE_CASE , controlnet=SCREAMING_SNAKE_CASE)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : List[Any] = torch.Generator(device='cpu').manual_seed(0)
_A : List[Any] = 'evil space-punk bird'
_A : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png').resize((512, 512))
_A : List[str] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png').resize((512, 512))
_A : Dict = pipe(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , control_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type='np' , num_inference_steps=50 , strength=0.6 , )
_A : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
_A : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy')
assert np.abs(expected_image - image).max() < 9e-2
| 227 | 0 |
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_lowercase =grid[0]
for row_n in range(1 , len(__snake_case ) ):
_lowercase =grid[row_n]
_lowercase =fill_row(__snake_case , __snake_case )
_lowercase =grid[row_n]
return grid[-1][-1]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
from collections.abc import Sequence
def __a(SCREAMING_SNAKE_CASE_ : Sequence[float] , SCREAMING_SNAKE_CASE_ : bool = False ):
'''simple docstring'''
if not arr:
return 0
_lowerCAmelCase = 0 if allow_empty_subarrays else float("-inf" )
_lowerCAmelCase = 0.0
for num in arr:
_lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
_lowerCAmelCase = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 158 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : List[str] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_lowerCAmelCase : Tuple = logging.getLogger(__name__)
def __snake_case ( ) -> Tuple:
A_ : List[str] = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=_lowerCAmelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=_lowerCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=_lowerCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=_lowerCAmelCase , default="data/dump" , help="The dump file prefix." )
A_ : int = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
A_ : int = BertTokenizer.from_pretrained(args.tokenizer_name )
A_ : Union[str, Any] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
A_ : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
A_ : Dict = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A_ : List[str] = tokenizer.special_tokens_map["cls_token"] # `<s>`
A_ : Any = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
A_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A_ : Any = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
A_ : Union[str, Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
A_ : Union[str, Any] = fp.readlines()
logger.info("Start encoding" )
logger.info(f"{len(_lowerCAmelCase )} examples to process." )
A_ : List[Any] = []
A_ : Tuple = 0
A_ : Union[str, Any] = 10000
A_ : Optional[int] = time.time()
for text in data:
A_ : Any = f"{bos} {text.strip()} {sep}"
A_ : List[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
rslt.append(_lowerCAmelCase )
iter += 1
if iter % interval == 0:
A_ : str = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
A_ : Union[str, Any] = time.time()
logger.info("Finished binarization" )
logger.info(f"{len(_lowerCAmelCase )} examples processed." )
A_ : int = f"{args.dump_file}.{args.tokenizer_name}.pickle"
A_ : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
A_ : Union[str, Any] = [np.uintaa(_lowerCAmelCase ) for d in rslt]
else:
A_ : List[str] = [np.intaa(_lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(_lowerCAmelCase , "wb" ) as handle:
pickle.dump(rslt_ , _lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 70 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
A__ : List[Any] = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
A__ : Dict = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
A__ : Dict = logging.get_logger(__name__)
A__ : List[Any] = """ Hello world! cécé herlolip"""
A__ : Optional[Any] = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def _snake_case ( lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
lowerCamelCase_ : Union[str, Any] =[
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _snake_case ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) -> List[Any]:
lowerCamelCase_ : Tuple =dct.pop(UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] =val
def _snake_case ( lowerCamelCase__ : Optional[Any] ) -> Any:
lowerCamelCase_ : Tuple =torch.load(UpperCamelCase__ , map_location="cpu" )
lowerCamelCase_ : List[str] =torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def _snake_case ( lowerCamelCase__ : Any ) -> Tuple:
lowerCamelCase_ , lowerCamelCase_ : Any =emb.weight.shape
lowerCamelCase_ : Optional[int] =nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
lowerCamelCase_ : Any =emb.weight.data
return lin_layer
@torch.no_grad()
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int]=None ) -> int:
if not os.path.exists(UpperCamelCase__ ):
lowerCamelCase_ : List[Any] =torch.hub.load("pytorch/fairseq" , UpperCamelCase__ ).eval()
else:
lowerCamelCase_ : Optional[int] =load_xsum_checkpoint(UpperCamelCase__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCamelCase_ : Optional[Any] =checkpoint_path.replace("." , "-" )
lowerCamelCase_ : Optional[Any] =BartConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ : Any =bart.encode(UpperCamelCase__ ).unsqueeze(0 )
lowerCamelCase_ : List[Any] =BartTokenizer.from_pretrained(UpperCamelCase__ ).encode(UpperCamelCase__ , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(UpperCamelCase__ , UpperCamelCase__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
lowerCamelCase_ : Tuple =bart.state_dict()
remove_ignore_keys_(UpperCamelCase__ )
lowerCamelCase_ : List[Any] =state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ : Tuple =BartForSequenceClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
lowerCamelCase_ : Any =bart.predict("mnli" , UpperCamelCase__ , return_logits=UpperCamelCase__ )
lowerCamelCase_ : Tuple =model(UpperCamelCase__ )[0] # logits
else: # no classification heads to worry about
lowerCamelCase_ : Optional[Any] =bart.model.state_dict()
remove_ignore_keys_(UpperCamelCase__ )
lowerCamelCase_ : Optional[int] =state_dict["decoder.embed_tokens.weight"]
lowerCamelCase_ : Any =bart.extract_features(UpperCamelCase__ )
if hf_checkpoint_name == "facebook/bart-large":
lowerCamelCase_ : List[str] =BartModel(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
lowerCamelCase_ : Dict =model(UpperCamelCase__ ).model[0]
else:
lowerCamelCase_ : List[str] =BartForConditionalGeneration(UpperCamelCase__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(UpperCamelCase__ )
if hasattr(UpperCamelCase__ , "lm_head" ):
lowerCamelCase_ : Union[str, Any] =make_linear_from_emb(model.model.shared )
lowerCamelCase_ : List[Any] =model.model(UpperCamelCase__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
A__ : str = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 144 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=UpperCamelCase__ )
if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images['url'] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 285 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__UpperCamelCase : List[str] = logging.getLogger(__name__)
__UpperCamelCase : str = tf.data.AUTOTUNE
def __A ( ) -> Union[str, Any]:
a = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=__lowerCamelCase , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=__lowerCamelCase , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=__lowerCamelCase , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=__lowerCamelCase , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=__lowerCamelCase , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=__lowerCamelCase , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=__lowerCamelCase , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=__lowerCamelCase , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=__lowerCamelCase , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=__lowerCamelCase , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=__lowerCamelCase , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=__lowerCamelCase , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=__lowerCamelCase , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=__lowerCamelCase , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=__lowerCamelCase , required=__lowerCamelCase , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=__lowerCamelCase , help="""Model ID to upload to on the Hugging Face Hub.""" )
a = parser.parse_args()
return args
def __A ( __lowerCamelCase ) -> Dict:
try:
if args.tpu_name:
a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__lowerCamelCase )
tf.tpu.experimental.initialize_tpu_system(__lowerCamelCase )
return tpu
def __A ( __lowerCamelCase ) -> Union[str, Any]:
a = 0
for file in file_list:
a = file.split("""/""" )[-1]
a = re.search(R"""-\d+-(\d+)\.tfrecord""" , __lowerCamelCase ).group(1 )
a = int(__lowerCamelCase )
num_samples += sample_count
return num_samples
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> List[Any]:
a = count_samples(__lowerCamelCase )
a = tf.data.Dataset.from_tensor_slices(__lowerCamelCase )
if shuffle:
a = dataset.shuffle(len(__lowerCamelCase ) )
a = tf.data.TFRecordDataset(__lowerCamelCase , num_parallel_reads=__lowerCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
a = dataset.apply(tf.data.experimental.assert_cardinality(__lowerCamelCase ) )
a = dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
a = dataset.shuffle(args.shuffle_buffer_size )
a = dataset.batch(__lowerCamelCase , drop_remainder=__lowerCamelCase )
a = dataset.map(__lowerCamelCase , num_parallel_calls=__lowerCamelCase )
a = dataset.prefetch(__lowerCamelCase )
return dataset
def __A ( __lowerCamelCase ) -> Optional[Any]:
if not args.no_tpu:
a = initialize_tpu(__lowerCamelCase )
a = tf.distribute.TPUStrategy(__lowerCamelCase )
else:
a = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
a = AutoTokenizer.from_pretrained(args.tokenizer )
a = AutoConfig.from_pretrained(args.pretrained_model_config )
a = tokenizer.vocab_size
a = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
a = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
a = count_samples(__lowerCamelCase )
a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
a = steps_per_epoch * args.num_epochs
with strategy.scope():
a = TFAutoModelForMaskedLM.from_config(__lowerCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
a , a = create_optimizer(
num_train_steps=__lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__lowerCamelCase , metrics=["""accuracy"""] )
def decode_fn(__lowerCamelCase ):
a = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__lowerCamelCase , __lowerCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
a = DataCollatorForLanguageModeling(
tokenizer=__lowerCamelCase , mlm_probability=args.mlm_probability , mlm=__lowerCamelCase , return_tensors="""tf""" )
def mask_with_collator(__lowerCamelCase ):
# TF really needs an isin() function
a = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
a , a = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(__lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__lowerCamelCase , )
return batch
a = args.per_replica_batch_size * strategy.num_replicas_in_sync
a = prepare_dataset(
__lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
a = prepare_dataset(
__lowerCamelCase , decode_fn=__lowerCamelCase , mask_fn=__lowerCamelCase , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , )
a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__lowerCamelCase ) )
model.fit(
__lowerCamelCase , validation_data=__lowerCamelCase , epochs=args.num_epochs , callbacks=__lowerCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = parse_args()
main(args)
| 347 |
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __A ( ) -> None:
a = input("""Enter message: """ )
a = input("""Enter key [alphanumeric]: """ )
a = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a = """encrypt"""
a = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
a = """decrypt"""
a = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f'\n{mode.title()}ed message:' )
print(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = []
a = 0
a = key.upper()
for symbol in message:
a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
a = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 347 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
class __A ( lowerCAmelCase_ ):
"""simple docstring"""
__lowerCAmelCase = ["pixel_values"]
def __init__( self , __A = True , __A = 1 / 255 , __A = True , __A = 8 , **__A , ) -> None:
super().__init__(**__A )
a =do_rescale
a =rescale_factor
a =do_pad
a =pad_size
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A ) -> np.ndarray:
return rescale(__A , scale=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None ) -> int:
a =get_image_size(__A )
a =(old_height // size + 1) * size - old_height
a =(old_width // size + 1) * size - old_width
return pad(__A , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> Union[str, Any]:
a =do_rescale if do_rescale is not None else self.do_rescale
a =rescale_factor if rescale_factor is not None else self.rescale_factor
a =do_pad if do_pad is not None else self.do_pad
a =pad_size if pad_size is not None else self.pad_size
a =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
a =[to_numpy_array(__A ) for image in images]
if do_rescale:
a =[self.rescale(image=__A , scale=__A ) for image in images]
if do_pad:
a =[self.pad(__A , size=__A ) for image in images]
a =[to_channel_dimension_format(__A , __A ) for image in images]
a ={'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A )
| 81 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_A : Any = namedtuple('covid_data', 'cases deaths recovered')
def _a ( UpperCAmelCase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(UpperCAmelCase ).content ).xpath(UpperCAmelCase ) )
_A : Dict = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 142 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 363 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
"""simple docstring"""
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=3 , lowercase_ : Tuple=32 , lowercase_ : List[str]=3 , lowercase_ : str=10 , lowercase_ : Tuple=[10, 20, 30, 40] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]="relu" , lowercase_ : Optional[int]=3 , lowercase_ : List[Any]=None , ):
UpperCamelCase__ : Tuple =parent
UpperCamelCase__ : str =batch_size
UpperCamelCase__ : List[Any] =image_size
UpperCamelCase__ : Union[str, Any] =num_channels
UpperCamelCase__ : Dict =embeddings_size
UpperCamelCase__ : Tuple =hidden_sizes
UpperCamelCase__ : List[Any] =depths
UpperCamelCase__ : List[Any] =is_training
UpperCamelCase__ : Union[str, Any] =use_labels
UpperCamelCase__ : Optional[int] =hidden_act
UpperCamelCase__ : Dict =num_labels
UpperCamelCase__ : List[str] =scope
UpperCamelCase__ : Optional[Any] =len(lowercase_ )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Tuple =None
if self.use_labels:
UpperCamelCase__ : str =ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[str] =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Tuple ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
UpperCamelCase__ : Any =RegNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Union[str, Any] =model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
UpperCamelCase__ : List[str] =self.num_labels
UpperCamelCase__ : Optional[Any] =RegNetForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Optional[int] =model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : List[Any] =self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] =config_and_inputs
UpperCamelCase__ : str ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __a ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Optional[int] =RegNetModelTester(self )
UpperCamelCase__ : Optional[int] =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Dict ):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _lowerCAmelCase ( self : Dict ):
pass
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ , UpperCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] =model_class(lowercase_ )
UpperCamelCase__ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[str] =[*signature.parameters.keys()]
UpperCamelCase__ : str =['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ , UpperCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] =model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _lowerCAmelCase ( self : List[Any] ):
def check_hidden_states_output(lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[Any] ):
UpperCamelCase__ : int =model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : List[str] =model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCamelCase__ : Tuple =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : Optional[int] =self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase__ , UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[Any] =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ : Dict =layer_type
UpperCamelCase__ : Dict =True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : int =True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _lowerCAmelCase ( self : Any ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] =RegNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : int ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : List[Any] =RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.default_image_processor
UpperCamelCase__ : Any =prepare_img()
UpperCamelCase__ : Optional[Any] =image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict =model(**lowercase_ )
# verify the logits
UpperCamelCase__ : Union[str, Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCamelCase__ : Union[str, Any] =torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 157 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "camembert"
def __init__( self : Optional[int] ,lowercase_ : List[str]=3_0_5_2_2 ,lowercase_ : Dict=7_6_8 ,lowercase_ : str=1_2 ,lowercase_ : int=1_2 ,lowercase_ : Any=3_0_7_2 ,lowercase_ : str="gelu" ,lowercase_ : List[str]=0.1 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : Tuple=5_1_2 ,lowercase_ : Optional[Any]=2 ,lowercase_ : Any=0.02 ,lowercase_ : Union[str, Any]=1E-12 ,lowercase_ : str=1 ,lowercase_ : Tuple=0 ,lowercase_ : Any=2 ,lowercase_ : Optional[int]="absolute" ,lowercase_ : List[Any]=True ,lowercase_ : Optional[Any]=None ,**lowercase_ : Optional[Any] ,):
super().__init__(pad_token_id=lowercase_ ,bos_token_id=lowercase_ ,eos_token_id=lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Tuple = position_embedding_type
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : List[str] = classifier_dropout
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : List[Any] ):
if self.task == "multiple-choice":
lowerCAmelCase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 106 |
"""simple docstring"""
import random
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(A_ )
elif element > pivot:
greater.append(A_ )
else:
equal.append(A_ )
return less, equal, greater
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(A_ ) or index < 0:
return None
lowerCAmelCase__ : str = items[random.randint(0 , len(A_ ) - 1 )]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = _partition(A_ , A_ )
lowerCAmelCase__ : str = len(A_ )
lowerCAmelCase__ : Optional[Any] = len(A_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A_ , A_ )
# must be in larger
else:
return quick_select(A_ , index - (m + count) )
| 106 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ : List[str] ={"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
A_ : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A_ : Any =None
A_ : Optional[int] =logging.get_logger(__name__)
A_ : List[str] ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A_ : List[Any] ={
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
A_ : Any ={
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
A_ : Union[str, Any] ="""▁"""
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : str = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE__ : int = BarthezTokenizer
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , )
_lowerCamelCase = vocab_file
_lowerCamelCase = False if not self.vocab_file else True
def snake_case_ ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
_lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 80 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
a_ = 'bert-base-cased'
a_ = 'google/pegasus-xsum'
a_ = [' Sam ate lunch today.', 'Sams lunch ingredients.']
a_ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
a_ = 'patrickvonplaten/t5-tiny-random'
a_ = 'sshleifer/bart-tiny-random'
a_ = 'sshleifer/tiny-mbart'
a_ = 'sshleifer/tiny-marian-en-de'
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : str = "\n".join(UpperCamelCase__)
Path(UpperCamelCase__).open("w").writelines(UpperCamelCase__)
def lowerCamelCase__ ( _a):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase__ , f"{split}.source") , UpperCamelCase__)
_dump_articles(os.path.join(UpperCamelCase__ , f"{split}.target") , UpperCamelCase__)
return tmp_dir
class _UpperCamelCase ( __A ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCamelCase ( self : List[str] , a : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE : Any = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE : Dict = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : int = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
SCREAMING_SNAKE_CASE : Optional[int] = SeqaSeqDataset(
__UpperCAmelCase , data_dir=__UpperCAmelCase , type_path="train" , max_source_length=__UpperCAmelCase , max_target_length=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(__UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
SCREAMING_SNAKE_CASE : Optional[Any] = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCamelCase ( self : Optional[Any] , a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE : int = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE : Tuple = max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE : List[str] = 4
SCREAMING_SNAKE_CASE : List[str] = LegacySeqaSeqDataset(
__UpperCAmelCase , data_dir=__UpperCAmelCase , type_path="train" , max_source_length=20 , max_target_length=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(__UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
SCREAMING_SNAKE_CASE : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
SCREAMING_SNAKE_CASE : Dict = tmp_dir.joinpath("train.source" ).open().readlines()
SCREAMING_SNAKE_CASE : Dict = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__UpperCAmelCase , __UpperCAmelCase , 128 , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = {x.name for x in tmp_dir.iterdir()}
SCREAMING_SNAKE_CASE : str = {x.name for x in save_dir.iterdir()}
SCREAMING_SNAKE_CASE : str = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__UpperCAmelCase ) < len(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == 1
assert len(packed_examples[0] ) == sum(len(__UpperCAmelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = self._get_dataset(max_len=64 )
SCREAMING_SNAKE_CASE : Tuple = 64
SCREAMING_SNAKE_CASE : Union[str, Any] = ds.make_dynamic_sampler(__UpperCAmelCase , required_batch_size_multiple=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = [len(__UpperCAmelCase ) for x in batch_sampler]
assert len(set(__UpperCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__UpperCAmelCase ) == len(__UpperCAmelCase ) # no dropped or added examples
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(__UpperCAmelCase , batch_sampler=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
for batch in data_loader:
SCREAMING_SNAKE_CASE : Union[str, Any] = batch["input_ids"].shape
SCREAMING_SNAKE_CASE : Any = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
SCREAMING_SNAKE_CASE : int = np.product(batch["input_ids"].shape )
num_src_per_batch.append(__UpperCAmelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__UpperCAmelCase )
assert num_src_per_batch[0] == max(__UpperCAmelCase )
if failures:
raise AssertionError(F"too many tokens in {len(__UpperCAmelCase )} batches" )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = self._get_dataset(max_len=512 )
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Optional[Any] = ds.make_sortish_sampler(__UpperCAmelCase , shuffle=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = tokenizer.pad_token_id
def count_pad_tokens(a : List[str] , a : List[str]="input_ids" ):
return [batch[k].eq(__UpperCAmelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__UpperCAmelCase , k="labels" ) ) < sum(count_pad_tokens(__UpperCAmelCase , k="labels" ) )
assert sum(count_pad_tokens(__UpperCAmelCase ) ) < sum(count_pad_tokens(__UpperCAmelCase ) )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
def __UpperCamelCase ( self : int , a : List[Any]=1000 , a : Tuple=128 ) -> List[str]:
"""simple docstring"""
if os.getenv("USE_REAL_DATA" , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE : List[Any] = "examples/seq2seq/wmt_en_ro"
SCREAMING_SNAKE_CASE : Tuple = max_len * 2 * 64
if not Path(__UpperCAmelCase ).joinpath("train.len" ).exists():
save_len_file(__UpperCAmelCase , __UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE : Any = "examples/seq2seq/test_data/wmt_en_ro"
SCREAMING_SNAKE_CASE : Optional[int] = max_len * 4
save_len_file(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = SeqaSeqDataset(
__UpperCAmelCase , data_dir=__UpperCAmelCase , type_path="train" , max_source_length=__UpperCAmelCase , max_target_length=__UpperCAmelCase , n_obs=__UpperCAmelCase , )
return ds, max_tokens, tokenizer
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = self._get_dataset()
SCREAMING_SNAKE_CASE : List[str] = set(DistributedSortishSampler(__UpperCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Any = set(DistributedSortishSampler(__UpperCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__UpperCAmelCase ) )
assert idsa.intersection(__UpperCAmelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCamelCase ( self : List[Any] , a : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase , use_fast=__UpperCAmelCase )
if tok_name == MBART_TINY:
SCREAMING_SNAKE_CASE : Dict = SeqaSeqDataset(
__UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
SCREAMING_SNAKE_CASE : int = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
SCREAMING_SNAKE_CASE : Dict = SeqaSeqDataset(
__UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
SCREAMING_SNAKE_CASE : Any = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__UpperCAmelCase ) == 1 if tok_name == BART_TINY else len(__UpperCAmelCase ) == 0
| 76 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 221 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[str] ={
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] =[
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__snake_case : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
while b:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = b, a % b
return a
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase_ ,a % b)
def lowerCAmelCase__ ( ):
'''simple docstring'''
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 ,5)}""")
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 ,3)}""")
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 ,3)}""")
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 ,6)}""")
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 ,3)}""")
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 ,5)}""")
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 ,3)}""")
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 ,3)}""")
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 ,6)}""")
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 ,3)}""")
if __name__ == "__main__":
main()
| 94 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = (DPMSolverSinglestepScheduler,)
lowerCamelCase = (('''num_inference_steps''', 25),)
def UpperCAmelCase_ ( self , **_lowerCamelCase ) -> Union[str, Any]:
A_ : Union[str, Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**_lowerCamelCase )
return config
def UpperCAmelCase_ ( self , _lowerCamelCase=0 , **_lowerCamelCase ) -> Dict:
A_ : List[str] = dict(self.forward_default_kwargs )
A_ : int = kwargs.pop("""num_inference_steps""" , _lowerCamelCase )
A_ : List[str] = self.dummy_sample
A_ : str = 0.1 * sample
A_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A_ : Tuple = self.get_scheduler_config(**_lowerCamelCase )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
A_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
A_ : List[str] = scheduler_class.from_pretrained(_lowerCamelCase )
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals
A_ : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A_ , A_ : Optional[int] = sample, sample
for t in range(_lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
A_ : List[Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
A_ : List[str] = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase=0 , **_lowerCamelCase ) -> Tuple:
A_ : Optional[int] = dict(self.forward_default_kwargs )
A_ : Union[str, Any] = kwargs.pop("""num_inference_steps""" , _lowerCamelCase )
A_ : Tuple = self.dummy_sample
A_ : List[Any] = 0.1 * sample
A_ : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**_lowerCamelCase )
scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
A_ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowerCamelCase )
A_ : int = scheduler_class.from_pretrained(_lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
A_ : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A_ : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
A_ : Optional[int] = new_scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self , _lowerCamelCase=None , **_lowerCamelCase ) -> Dict:
if scheduler is None:
A_ : List[Any] = self.scheduler_classes[0]
A_ : Dict = self.get_scheduler_config(**_lowerCamelCase )
A_ : Any = scheduler_class(**_lowerCamelCase )
A_ : Optional[int] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config(**_lowerCamelCase )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
A_ : Tuple = 10
A_ : Tuple = self.dummy_model()
A_ : str = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A_ : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
A_ : int = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
return sample
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A_ : List[Any] = 50
A_ : int = self.dummy_model()
A_ : Any = self.dummy_sample_deter
scheduler.set_timesteps(_lowerCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A_ : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase )
A_ : int = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
A_ : Optional[int] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A_ : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A_ : Dict = self.full_loop(scheduler=_lowerCamelCase )
A_ : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
A_ : Any = DEISMultistepScheduler.from_config(scheduler.config )
A_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A_ : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
A_ : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A_ : int = self.full_loop(scheduler=_lowerCamelCase )
A_ : Optional[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=_lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , algorithm_type="""dpmsolver++""" , solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , )
def UpperCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , prediction_type=_lowerCamelCase , algorithm_type=_lowerCamelCase , )
A_ : Optional[int] = self.full_loop(
solver_order=_lowerCamelCase , solver_type=_lowerCamelCase , prediction_type=_lowerCamelCase , algorithm_type=_lowerCamelCase , )
assert not torch.isnan(_lowerCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.check_over_configs(lower_order_final=_lowerCamelCase )
self.check_over_configs(lower_order_final=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> int:
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
self.check_over_configs(variance_type=_lowerCamelCase )
self.check_over_configs(variance_type="""learned_range""" )
def UpperCAmelCase_ ( self ) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowerCamelCase , time_step=0 )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Dict = self.full_loop()
A_ : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.full_loop(use_karras_sigmas=_lowerCamelCase )
A_ : Union[str, Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : str = self.full_loop(prediction_type="""v_prediction""" )
A_ : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : List[Any] = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=_lowerCamelCase )
A_ : Tuple = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : int = self.scheduler_classes[0]
A_ : List[str] = self.get_scheduler_config(thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0 )
A_ : List[Any] = scheduler_class(**_lowerCamelCase )
A_ : List[Any] = 10
A_ : Union[str, Any] = self.dummy_model()
A_ : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A_ : Dict = model(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = ['YolosFeatureExtractor']
UpperCamelCase__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( UpperCAmelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_SCREAMING_SNAKE_CASE : Union[str, Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def _lowerCAmelCase ( UpperCAmelCase : int ):
'''simple docstring'''
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
UpperCamelCase__ : Union[str, Any] =[]
for num in range(len(UpperCAmelCase ) ):
UpperCamelCase__ : Tuple =0
while 2 * i * i <= odd_composites[num]:
UpperCamelCase__ : Any =odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase ) == n:
return list_nums
return []
def _lowerCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 157 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
@dataclass(frozen=snake_case__ )
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : Optional[int]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Tuple =hans_processors[task]()
UpperCamelCase__ : Union[str, Any] =os.path.join(
lowercase_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCamelCase__ : int =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] =label_list[2], label_list[1]
UpperCamelCase__ : List[Any] =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : Any =cached_features_file + '''.lock'''
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
UpperCamelCase__ : Optional[int] =torch.load(lowercase_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
UpperCamelCase__ : str =(
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('''Training examples: %s''' , len(lowercase_ ) )
UpperCamelCase__ : Tuple =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('''Saving features into cached file %s''' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ):
return len(self.features )
def __getitem__( self : Optional[int] , lowercase_ : Optional[Any] ):
return self.features[i]
def _lowerCAmelCase ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __a :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
def __init__( self : Any , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : Union[str, Any]=False , lowercase_ : bool = False , ):
UpperCamelCase__ : Any =hans_processors[task]()
UpperCamelCase__ : Tuple =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : Tuple =label_list[2], label_list[1]
UpperCamelCase__ : Union[str, Any] =label_list
UpperCamelCase__ : Any =processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCamelCase__ : Union[str, Any] =hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase__ : Optional[Any] =tf.data.Dataset.from_generator(
lowercase_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self : Optional[Any] ):
return self.dataset
def __len__( self : str ):
return len(self.features )
def __getitem__( self : List[str] , lowercase_ : Dict ):
return self.features[i]
def _lowerCAmelCase ( self : Dict ):
return self.label_list
class __a ( snake_case__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Union[str, Any] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _lowerCAmelCase ( self : Tuple , lowercase_ : Optional[int] ):
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _lowerCAmelCase ( self : List[Any] ):
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
UpperCamelCase__ : Tuple =[]
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCamelCase__ : str ='''%s-%s''' % (set_type, line[0])
UpperCamelCase__ : str =line[5]
UpperCamelCase__ : Any =line[6]
UpperCamelCase__ : Optional[int] =line[7][2:] if line[7].startswith('''ex''' ) else line[7]
UpperCamelCase__ : str =line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def _lowerCAmelCase ( UpperCAmelCase : List[InputExample] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : PreTrainedTokenizer , ):
'''simple docstring'''
UpperCamelCase__ : List[str] ={label: i for i, label in enumerate(UpperCAmelCase )}
UpperCamelCase__ : int =[]
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
UpperCamelCase__ : str =tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , )
UpperCamelCase__ : str =label_map[example.label] if example.label in label_map else 0
UpperCamelCase__ : int =int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase , label=UpperCAmelCase , pairID=UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
_SCREAMING_SNAKE_CASE : List[str] = {
"""hans""": 3,
}
_SCREAMING_SNAKE_CASE : Tuple = {
"""hans""": HansProcessor,
}
| 157 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
class __snake_case ( _lowerCamelCase ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 143 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __snake_case ( unittest.TestCase ):
@require_torch
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
snake_case__ : Union[str, Any] = load_dataset('ashraq/esc50' )
snake_case__ : List[Any] = dataset['train']['audio'][-1]['array']
snake_case__ : Tuple = audio_classifier(__UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [{'score': 0.5_0_1, 'label': 'Sound of a dog'}, {'score': 0.4_9_9, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def __a ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
@require_torch
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
snake_case__ : Dict = load_dataset('ashraq/esc50' )
snake_case__ : Optional[int] = dataset['train']['audio'][-1]['array']
snake_case__ : Tuple = audio_classifier(__UpperCamelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
] , )
snake_case__ : str = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
snake_case__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{'score': 0.9_9_9, 'label': 'Sound of a dog'},
{'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def __a ( self ) -> Any:
'''simple docstring'''
pass
| 143 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __lowerCamelCase ( lowerCAmelCase__ ):
return np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
class a_ :
'''simple docstring'''
def __init__( self : str , *,
lowercase__ : float = np.inf , lowercase__ : str = "linear" , lowercase__ : float = 0.0 , ):
'''simple docstring'''
lowerCAmelCase__ = regularization
lowerCAmelCase__ = gamma
if kernel == "linear":
lowerCAmelCase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma')
if not isinstance(self.gamma , (float, int)):
raise ValueError('gamma must be float or int')
if not self.gamma > 0:
raise ValueError('gamma must be > 0')
lowerCAmelCase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCAmelCase__ = F"""Unknown kernel: {kernel}"""
raise ValueError(lowercase__)
def __snake_case ( self : List[str] , lowercase__ : ndarray , lowercase__ : ndarray):
'''simple docstring'''
return np.dot(lowercase__ , lowercase__)
def __snake_case ( self : Dict , lowercase__ : ndarray , lowercase__ : ndarray):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora)))
def __snake_case ( self : List[Any] , lowercase__ : list[ndarray] , lowercase__ : ndarray):
'''simple docstring'''
lowerCAmelCase__ = observations
lowerCAmelCase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCAmelCase__) , ) = np.shape(lowercase__)
def to_minimize(lowercase__ : ndarray) -> float:
lowerCAmelCase__ = 0
((lowerCAmelCase__) , ) = np.shape(lowercase__)
for i in range(lowercase__):
for j in range(lowercase__):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j])
)
return 1 / 2 * s - sum(lowercase__)
lowerCAmelCase__ = LinearConstraint(lowercase__ , 0 , 0)
lowerCAmelCase__ = Bounds(0 , self.regularization)
lowerCAmelCase__ = minimize(
lowercase__ , np.ones(lowercase__) , bounds=lowercase__ , constraints=[ly_contraint]).x
lowerCAmelCase__ = l_star
# calculating mean offset of separation plane to points
lowerCAmelCase__ = 0
for i in range(lowercase__):
for j in range(lowercase__):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j])
lowerCAmelCase__ = s / n
def __snake_case ( self : List[Any] , lowercase__ : ndarray):
'''simple docstring'''
lowerCAmelCase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase__)
for n in range(len(self.classes)))
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 |
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = len(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
for j in range(i + 1 , lowerCAmelCase__ ):
if numbers[j] < numbers[i]:
lowerCAmelCase__ , lowerCAmelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 119 | 1 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCamelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase ( ):
snake_case : Optional[Any] = "https://pypi.org/pypi/diffusers/json"
snake_case : List[str] = json.loads(request.urlopen(__lowerCamelCase ).read() )["releases"].keys()
return sorted(__lowerCamelCase , key=lambda __lowerCamelCase : version.Version(__lowerCamelCase ) )
def UpperCamelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
snake_case : int = Path(__lowerCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCamelCase ( __lowerCamelCase : Union[str, os.PathLike] ):
init_hf_modules()
snake_case : Union[str, Any] = Path(__lowerCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
snake_case : Dict = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as f:
snake_case : Tuple = f.read()
# Imports of the form `import .xxx`
snake_case : Any = re.findall("^\s*import\s+\.(\S+)\s*$" , __lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , __lowerCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__lowerCamelCase ) )
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : Union[str, Any] = False
snake_case : str = [module_file]
snake_case : str = []
# Let's recurse through all relative imports
while not no_change:
snake_case : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__lowerCamelCase ) )
snake_case : Any = Path(__lowerCamelCase ).parent
snake_case : str = [str(module_path / m ) for m in new_imports]
snake_case : Tuple = [f for f in new_import_files if f not in all_relative_imports]
snake_case : Optional[int] = [f"""{f}.py""" for f in new_import_files]
snake_case : Optional[Any] = len(__lowerCamelCase ) == 0
all_relative_imports.extend(__lowerCamelCase )
return all_relative_imports
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as f:
snake_case : int = f.read()
# Imports of the form `import xxx`
snake_case : Union[str, Any] = re.findall("^\s*import\s+(\S+)\s*$" , __lowerCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , __lowerCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
snake_case : Dict = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
snake_case : Optional[Any] = list(set(__lowerCamelCase ) )
snake_case : Union[str, Any] = []
for imp in imports:
try:
importlib.import_module(__lowerCamelCase )
except ImportError:
missing_packages.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{', '.join(__lowerCamelCase )}. Run `pip install {' '.join(__lowerCamelCase )}`""" )
return get_relative_imports(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Any ):
snake_case : List[str] = module_path.replace(os.path.sep , "." )
snake_case : List[Any] = importlib.import_module(__lowerCamelCase )
if class_name is None:
return find_pipeline_class(__lowerCamelCase )
return getattr(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : List[str] ):
from ..pipelines import DiffusionPipeline
snake_case : List[str] = dict(inspect.getmembers(__lowerCamelCase , inspect.isclass ) )
snake_case : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __lowerCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
snake_case : Dict = cls
return pipeline_class
def UpperCamelCase ( __lowerCamelCase : Union[str, os.PathLike] , __lowerCamelCase : str , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[Dict[str, str]] = None , __lowerCamelCase : Optional[Union[bool, str]] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : bool = False , ):
snake_case : List[str] = str(__lowerCamelCase )
snake_case : Optional[Any] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ):
snake_case : Optional[int] = module_file_or_url
snake_case : List[str] = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
snake_case : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
snake_case : str = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
snake_case : Dict = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
snake_case : Dict = f"""v{revision}"""
elif revision == "main":
snake_case : str = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
snake_case : Any = COMMUNITY_PIPELINES_URL.format(revision=__lowerCamelCase , pipeline=__lowerCamelCase )
try:
snake_case : List[Any] = cached_download(
__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , use_auth_token=__lowerCamelCase , )
snake_case : Tuple = "git"
snake_case : Optional[int] = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
snake_case : List[Any] = hf_hub_download(
__lowerCamelCase , __lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , proxies=__lowerCamelCase , resume_download=__lowerCamelCase , local_files_only=__lowerCamelCase , use_auth_token=__lowerCamelCase , )
snake_case : Union[str, Any] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
snake_case : List[str] = check_imports(__lowerCamelCase )
# Now we move the module inside our cached dynamic modules.
snake_case : Tuple = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__lowerCamelCase )
snake_case : Union[str, Any] = Path(__lowerCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__lowerCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
snake_case : str = f"""{module_needed}.py"""
shutil.copy(os.path.join(__lowerCamelCase , __lowerCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__lowerCamelCase , __lowerCamelCase ):
snake_case : Any = use_auth_token
elif use_auth_token is True:
snake_case : Dict = HfFolder.get_token()
else:
snake_case : Optional[Any] = None
snake_case : Optional[Any] = model_info(__lowerCamelCase , revision=__lowerCamelCase , token=__lowerCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
snake_case : Any = submodule_path / commit_hash
snake_case : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__lowerCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__lowerCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__lowerCamelCase , f"""{module_needed}.py""" , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , resume_download=__lowerCamelCase , proxies=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , local_files_only=__lowerCamelCase , )
return os.path.join(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Union[str, os.PathLike] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[Dict[str, str]] = None , __lowerCamelCase : Optional[Union[bool, str]] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : bool = False , **__lowerCamelCase : Tuple , ):
snake_case : int = get_cached_module_file(
__lowerCamelCase , __lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , resume_download=__lowerCamelCase , proxies=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , local_files_only=__lowerCamelCase , )
return get_class_in_module(__lowerCamelCase , final_module.replace(".py" , "" ) )
| 59 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase ( A_ ):
A__ : List[str] = "megatron-bert"
def __init__(self : Optional[int] , snake_case__ : List[str]=2_90_56 , snake_case__ : List[Any]=10_24 , snake_case__ : str=24 , snake_case__ : Tuple=16 , snake_case__ : Union[str, Any]=40_96 , snake_case__ : str="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_12 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : int=0 , snake_case__ : Tuple="absolute" , snake_case__ : Any=True , **snake_case__ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
snake_case : Tuple = vocab_size
snake_case : str = hidden_size
snake_case : str = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : Optional[int] = hidden_act
snake_case : int = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : Tuple = layer_norm_eps
snake_case : int = position_embedding_type
snake_case : str = use_cache
| 59 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
while second != 0:
_lowerCamelCase : int = first & second
first ^= second
_lowerCamelCase : Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = int(input("""Enter the first number: """).strip())
lowercase__ = int(input("""Enter the second number: """).strip())
print(F"{add(first, second) = }")
| 367 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _snake_case ( lowercase__ ):
return x[0]
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = get_letter_count(lowercase__ )
_lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowercase__ )
_lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ )
_lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] )
_lowerCamelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowercase__ , reverse=lowercase__ )
_lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = get_frequency_order(lowercase__ )
_lowerCamelCase : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
import doctest
from collections import deque
import numpy as np
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = [2, 1, 2, -1]
UpperCamelCase__ = [1, 2, 3, 4]
def UpperCAmelCase_ (self ):
UpperCamelCase__ = len(self.first_signal )
UpperCamelCase__ = len(self.second_signal )
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# create a zero matrix of max_length x max_length
UpperCamelCase__ = [[0] * max_length for i in range(SCREAMING_SNAKE_CASE_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = deque(self.second_signal )
rotated_signal.rotate(SCREAMING_SNAKE_CASE_ )
for j, item in enumerate(SCREAMING_SNAKE_CASE_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase__ = np.matmul(np.transpose(SCREAMING_SNAKE_CASE_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(SCREAMING_SNAKE_CASE_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCamelCase_ = ['''small''', '''medium''', '''large''']
lowerCamelCase_ = '''lm_head.decoder.weight'''
lowerCamelCase_ = '''lm_head.weight'''
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
UpperCamelCase__ = torch.load(__a )
UpperCamelCase__ = d.pop(__a )
os.makedirs(__a , exist_ok=__a )
torch.save(__a , os.path.join(__a , __a ) )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
lowerCamelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCamelCase_ = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
lowerCamelCase_ = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 244 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] )->Dict:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=__UpperCamelCase , )
def lowercase__ ( self : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] )->Tuple:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] )->int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__UpperCamelCase )
class _a ( datasets.BeamBasedBuilder):
"""simple docstring"""
def lowercase__ ( self : Optional[int] )->Optional[Any]:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=__UpperCamelCase , )
def lowercase__ ( self : int , __UpperCamelCase : Dict , __UpperCamelCase : str )->Tuple:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase__ ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : str )->str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__UpperCamelCase )
def lowercase ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowercase ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class _a ( lowerCAmelCase):
"""simple docstring"""
@require_beam
def lowercase__ ( self : int )->str:
_UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase = DummyBeamDataset(cache_dir=__UpperCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __UpperCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __UpperCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase__ ( self : Any )->int:
import apache_beam as beam
_UpperCAmelCase = beam.io.parquetio.WriteToParquet
_UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase = DummyBeamDataset(cache_dir=__UpperCamelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
_UpperCAmelCase = partial(__UpperCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
_UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __UpperCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __UpperCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase__ ( self : Dict )->Dict:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase = DummyBeamDataset(cache_dir=__UpperCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase__ ( self : Optional[Any] )->int:
_UpperCAmelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase = NestedBeamDataset(cache_dir=__UpperCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
_UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , __UpperCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , __UpperCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 358 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' )
return parser.parse_args()
__A : Union[str, Any] = load("accuracy")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None:
super().__init__()
_UpperCAmelCase = trainer
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any:
if control.should_evaluate:
_UpperCAmelCase = deepcopy(__UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
_UpperCAmelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 326 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __get__( self , __A , __A=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
lowerCamelCase : Union[str, Any] = "__cached_" + self.fget.__name__
lowerCamelCase : str = getattr(__A , __A , __A )
if cached is None:
lowerCamelCase : int = self.fget(__A )
setattr(__A , __A , __A )
return cached
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if is_torch_fx_proxy(SCREAMING_SNAKE_CASE_ ):
return True
if is_torch_available():
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(SCREAMING_SNAKE_CASE_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return _is_numpy(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import torch
return isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import torch
return isinstance(SCREAMING_SNAKE_CASE_ , torch.device )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return False
return isinstance(SCREAMING_SNAKE_CASE_ , torch.dtype )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import tensorflow as tf
return isinstance(SCREAMING_SNAKE_CASE_ , tf.Tensor )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(SCREAMING_SNAKE_CASE_ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(SCREAMING_SNAKE_CASE_ )
return type(SCREAMING_SNAKE_CASE_ ) == tf.Tensor
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(SCREAMING_SNAKE_CASE_ , jnp.ndarray )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (dict, UserDict) ):
return {k: to_py_obj(SCREAMING_SNAKE_CASE_ ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return [to_py_obj(SCREAMING_SNAKE_CASE_ ) for o in obj]
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
return obj.numpy().tolist()
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return np.asarray(SCREAMING_SNAKE_CASE_ ).tolist()
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , (dict, UserDict) ):
return {k: to_numpy(SCREAMING_SNAKE_CASE_ ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return np.array(SCREAMING_SNAKE_CASE_ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
return obj.numpy()
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return np.asarray(SCREAMING_SNAKE_CASE_ )
else:
return obj
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCamelCase : Optional[int] = getattr(self , class_fields[0].name )
lowerCamelCase : Tuple = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A , __A ):
lowerCamelCase : Optional[Any] = first_field.items()
lowerCamelCase : List[Any] = True
else:
try:
lowerCamelCase : Optional[Any] = iter(__A )
lowerCamelCase : Optional[int] = True
except TypeError:
lowerCamelCase : Dict = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A , (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0] , __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase : Optional[int] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCamelCase : Tuple = element[1]
elif first_field is not None:
lowerCamelCase : Dict = first_field
else:
for field in class_fields:
lowerCamelCase : List[Any] = getattr(self , field.name )
if v is not None:
lowerCamelCase : Dict = v
def __delitem__( self , *__A , **__A ):
"""simple docstring"""
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , __A ):
"""simple docstring"""
if isinstance(__A , __A ):
lowerCamelCase : List[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , __A , __A ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A , __A )
super().__setattr__(__A , __A )
def __setitem__( self , __A , __A ):
"""simple docstring"""
super().__setitem__(__A , __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@classmethod
def _snake_case ( cls , __A ):
"""simple docstring"""
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "longest"
__A : Tuple = "max_length"
__A : Optional[int] = "do_not_pad"
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "pt"
__A : Any = "tf"
__A : Optional[int] = "np"
__A : Dict = "jax"
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = context_managers
lowerCamelCase : List[str] = ExitStack()
def __enter__( self ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self , *__A , **__A ):
"""simple docstring"""
self.stack.__exit__(*__A , **__A )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = infer_framework(SCREAMING_SNAKE_CASE_ )
if framework == "tf":
lowerCamelCase : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase : int = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = model_class.__name__
lowerCamelCase : Any = infer_framework(SCREAMING_SNAKE_CASE_ )
if framework == "tf":
lowerCamelCase : str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "" , SCREAMING_SNAKE_CASE_ = "." ):
'''simple docstring'''
def _flatten_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="" , SCREAMING_SNAKE_CASE_="." ):
for k, v in d.items():
lowerCamelCase : int = str(SCREAMING_SNAKE_CASE_ ) + delimiter + str(SCREAMING_SNAKE_CASE_ ) if parent_key else k
if v and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
yield from flatten_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delimiter=SCREAMING_SNAKE_CASE_ ).items()
else:
yield key, v
return dict(_flatten_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
@contextmanager
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE_ ):
return np.transpose(SCREAMING_SNAKE_CASE_ , axes=SCREAMING_SNAKE_CASE_ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return array.T if axes is None else array.permute(*SCREAMING_SNAKE_CASE_ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
import tensorflow as tf
return tf.transpose(SCREAMING_SNAKE_CASE_ , perm=SCREAMING_SNAKE_CASE_ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return jnp.transpose(SCREAMING_SNAKE_CASE_ , axes=SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f"""Type not supported for transpose: {type(SCREAMING_SNAKE_CASE_ )}.""" )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE_ ):
return np.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return array.reshape(*SCREAMING_SNAKE_CASE_ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
import tensorflow as tf
return tf.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return jnp.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f"""Type not supported for reshape: {type(SCREAMING_SNAKE_CASE_ )}.""" )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE_ ):
return np.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return array.squeeze() if axis is None else array.squeeze(dim=SCREAMING_SNAKE_CASE_ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
import tensorflow as tf
return tf.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return jnp.squeeze(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(SCREAMING_SNAKE_CASE_ )}.""" )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE_ ):
return np.expand_dims(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return array.unsqueeze(dim=SCREAMING_SNAKE_CASE_ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
import tensorflow as tf
return tf.expand_dims(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return jnp.expand_dims(SCREAMING_SNAKE_CASE_ , axis=SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE_ )}.""" )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE_ ):
return np.size(SCREAMING_SNAKE_CASE_ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
return array.numel()
elif is_tf_tensor(SCREAMING_SNAKE_CASE_ ):
import tensorflow as tf
return tf.size(SCREAMING_SNAKE_CASE_ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE_ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE_ )}.""" )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(SCREAMING_SNAKE_CASE_ , (tuple, list) ):
lowerCamelCase : Dict = [f"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase : Dict = f"""{repo_id}--{value}"""
return auto_map
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for base_class in inspect.getmro(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[Any] = base_class.__module__
lowerCamelCase : Any = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 283 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 64, 64)
_snake_case = torch.rand(1) * 9_99
_snake_case = torch.randn(2, 77, 7_68)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_66
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 283 | 1 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =1.5
__lowercase =int(factor * num_class_images )
__lowercase =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_lowerCAmelCase , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=_lowerCAmelCase )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
__lowercase =client.query(text=_lowerCAmelCase )
if len(_lowerCAmelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
__lowercase =int(factor * num_images )
__lowercase =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_lowerCAmelCase , aesthetic_weight=0.1 , )
__lowercase =0
__lowercase =0
__lowercase =tqdm(desc='downloading real regularization images' , total=_lowerCAmelCase )
with open(f"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(f"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
f"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
__lowercase =class_images[count]
count += 1
try:
__lowercase =requests.get(images['url'] )
if img.status_code == 200:
__lowercase =Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser('' , add_help=_lowerCAmelCase )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=_lowerCAmelCase , type=_lowerCAmelCase )
parser.add_argument('--class_data_dir' , help='path to save images' , required=_lowerCAmelCase , type=_lowerCAmelCase )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=_lowerCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 48 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
lowerCamelCase = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
lowerCamelCase = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
lowerCamelCase = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self : Any):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32')),
'references': datasets.Sequence(datasets.Value('int32')),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def __lowerCamelCase ( self : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : List[Any]="binary" , _lowerCAmelCase : Tuple=None):
'''simple docstring'''
__lowercase =fa_score(
_lowerCAmelCase , _lowerCAmelCase , labels=_lowerCAmelCase , pos_label=_lowerCAmelCase , average=_lowerCAmelCase , sample_weight=_lowerCAmelCase)
return {"f1": float(_lowerCAmelCase) if score.size == 1 else score}
| 48 | 1 |
"""simple docstring"""
from math import pi, sqrt
def __magic_name__ ( __snake_case : float ) -> float:
if num <= 0:
raise ValueError("math domain error" )
if num > 1_71.5:
raise OverflowError("math range error" )
elif num - int(__snake_case ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(__snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __magic_name__ ( ) -> None:
assert gamma(0.5 ) == sqrt(__snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_A : Tuple = 1.0
while num:
_A : List[Any] = float(input("""Gamma of: """))
print(F"gamma({num}) = {gamma(num)}")
print("""\nEnter 0 to exit...""")
| 202 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[int] = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_A : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _A ( __SCREAMING_SNAKE_CASE ):
def __A ( self , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if self.framework == "tf":
__UpperCAmelCase : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__UpperCAmelCase : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def __A ( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_masked_index(_a )
__UpperCAmelCase : List[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
if return_tensors is None:
__UpperCAmelCase : Optional[Any] = self.framework
__UpperCAmelCase : Union[str, Any] = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __A ( self , __UpperCAmelCase ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model(**_a )
__UpperCAmelCase : List[Any] = model_inputs["""input_ids"""]
return model_outputs
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=5 , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCAmelCase : List[str] = target_ids.shape[0]
__UpperCAmelCase : Union[str, Any] = model_outputs["""input_ids"""][0]
__UpperCAmelCase : Any = model_outputs["""logits"""]
if self.framework == "tf":
__UpperCAmelCase : int = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__UpperCAmelCase : Any = outputs.numpy()
__UpperCAmelCase : List[str] = outputs[0, masked_index, :]
__UpperCAmelCase : List[Any] = stable_softmax(_a , axis=-1 )
if target_ids is not None:
__UpperCAmelCase : Union[str, Any] = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
__UpperCAmelCase : int = tf.expand_dims(_a , 0 )
__UpperCAmelCase : Optional[Any] = tf.math.top_k(_a , k=_a )
__UpperCAmelCase , __UpperCAmelCase : Any = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCAmelCase : Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCAmelCase : Union[str, Any] = outputs[0, masked_index, :]
__UpperCAmelCase : Optional[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
__UpperCAmelCase : str = probs[..., target_ids]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = probs.topk(_a )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__UpperCAmelCase : Union[str, Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__UpperCAmelCase : List[str] = input_ids.numpy().copy()
if target_ids is not None:
__UpperCAmelCase : Union[str, Any] = target_ids[p].tolist()
__UpperCAmelCase : Optional[Any] = p
# Filter padding out:
__UpperCAmelCase : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCAmelCase : Tuple = self.tokenizer.decode(_a , skip_special_tokens=_a )
__UpperCAmelCase : Any = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> int:
'''simple docstring'''
if isinstance(_a , _a ):
__UpperCAmelCase : Any = [targets]
try:
__UpperCAmelCase : Dict = self.tokenizer.get_vocab()
except Exception:
__UpperCAmelCase : Any = {}
__UpperCAmelCase : List[Any] = []
for target in targets:
__UpperCAmelCase : str = vocab.get(_a , _a )
if id_ is None:
__UpperCAmelCase : List[str] = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )["""input_ids"""]
if len(_a ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
__UpperCAmelCase : str = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
__UpperCAmelCase : Union[str, Any] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
__UpperCAmelCase : str = np.array(_a )
return target_ids
def __A ( self , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Dict = {}
if targets is not None:
__UpperCAmelCase : Optional[int] = self.get_target_ids(_a , _a )
__UpperCAmelCase : Any = target_ids
if top_k is not None:
__UpperCAmelCase : str = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 362 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 ) -> None:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = row, column
__UpperCAmelCase : Union[str, Any] = [[default_value for c in range(__UpperCAmelCase )] for r in range(__UpperCAmelCase )]
def __str__( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Dict = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__UpperCAmelCase : Optional[Any] = 0
for row_vector in self.array:
for obj in row_vector:
__UpperCAmelCase : Union[str, Any] = max(__UpperCAmelCase , len(str(__UpperCAmelCase ) ) )
__UpperCAmelCase : Optional[int] = f'%{max_element_length}s'
# Make string and return
def single_line(__UpperCAmelCase ) -> str:
nonlocal string_format_identifier
__UpperCAmelCase : Any = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__UpperCAmelCase ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
'''simple docstring'''
return str(self )
def __A ( self , __UpperCAmelCase ) -> bool:
'''simple docstring'''
if not (isinstance(__UpperCAmelCase , (list, tuple) ) and len(__UpperCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
assert self.validate_indicies(__UpperCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
assert self.validate_indicies(__UpperCAmelCase )
__UpperCAmelCase : List[Any] = value
def __add__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
__UpperCAmelCase : Dict = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : Dict = -self[r, c]
return result
def __sub__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
if isinstance(__UpperCAmelCase , (int, float) ): # Scalar multiplication
__UpperCAmelCase : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : List[Any] = self[r, c] * another
return result
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): # Matrix multiplication
assert self.column == another.row
__UpperCAmelCase : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__UpperCAmelCase : List[Any] = f'Unsupported type given for another ({type(__UpperCAmelCase )})'
raise TypeError(__UpperCAmelCase )
def __A ( self ) -> Matrix:
'''simple docstring'''
__UpperCAmelCase : Dict = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__UpperCAmelCase : List[str] = self[r, c]
return result
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__UpperCAmelCase : Optional[Any] = v.transpose()
__UpperCAmelCase : List[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Dict = Matrix(3 , 3 , 0 )
for i in range(3 ):
__UpperCAmelCase : Tuple = 1
print(f'a^(-1) is {ainv}' )
# u, v
__UpperCAmelCase : Dict = Matrix(3 , 1 , 0 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = 1, 2, -3
__UpperCAmelCase : Union[str, Any] = Matrix(3 , 1 , 0 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}' )
def lowercase_ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 16 | 0 |
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase_ ( torch.nn.Module ):
def __init__( self , _lowerCAmelCase="sayef/fsner-bert-base-uncased" ) -> Optional[int]:
super(_lowerCAmelCase , self ).__init__()
_lowerCAmelCase = AutoModel.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
_lowerCAmelCase = torch.nn.CosineSimilarity(3 , 1E-08 )
_lowerCAmelCase = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **_lowerCAmelCase ) -> Dict:
return self.bert(**_lowerCAmelCase ).last_hidden_state
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
return token_embeddings.sum(2 , keepdim=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1 ) -> int:
return self.softmax(T * self.cos(_lowerCAmelCase , _lowerCAmelCase ) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = W_supports["sizes"].tolist()
_lowerCAmelCase = W_supports["start_token_id"].item()
_lowerCAmelCase = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_lowerCAmelCase = self.BERT(**_lowerCAmelCase )
_lowerCAmelCase = self.BERT(**_lowerCAmelCase )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = W_supports["input_ids"] == start_token_id
_lowerCAmelCase = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowerCAmelCase ):
if i == 0:
_lowerCAmelCase = 0
else:
_lowerCAmelCase = support_sizes[i - 1]
_lowerCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
_lowerCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
_lowerCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_lowerCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_lowerCAmelCase = torch.vstack((p_starts, p_start) )
_lowerCAmelCase = torch.vstack((p_ends, p_end) )
else:
_lowerCAmelCase = p_start
_lowerCAmelCase = p_end
return p_starts, p_ends
| 158 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = parent
_lowerCAmelCase = config_class
_lowerCAmelCase = has_text_modality
_lowerCAmelCase = kwargs
_lowerCAmelCase = common_properties
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCAmelCase ):
try:
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCAmelCase ):
try:
_lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCAmelCase , _lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , "config.json" )
config_first.to_json_file(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_json_file(_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
_lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
config_first.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = self.config_class.from_pretrained(_lowerCAmelCase , subfolder=_lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _snake_case ( self ) -> List[Any]:
if self.config_class.is_composition:
return
_lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(_lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase = copy.deepcopy(_lowerCAmelCase )
_lowerCAmelCase = self.config_class(**_lowerCAmelCase )
_lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(_lowerCAmelCase , _lowerCAmelCase ) != value:
wrong_values.append((key, getattr(_lowerCAmelCase , _lowerCAmelCase ), value) )
if len(_lowerCAmelCase ) > 0:
_lowerCAmelCase = "\n".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def _snake_case ( self ) -> List[str]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 158 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_a : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
_a : Tuple = CLIPTextModel(UpperCAmelCase__ )
_a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Dict = CLIPTextModelWithProjection(UpperCAmelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=0 ) -> int:
_a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
_a : Any = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Union[str, Any]:
_a : int = self.get_dummy_components()
_a : Any = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
_a : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = 3 * ["""this is a negative prompt"""]
_a : Dict = negative_prompt
_a : Dict = 3 * [inputs["""prompt"""]]
_a : Optional[Any] = sd_pipe(**UpperCAmelCase__ )
_a : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
_a : int = 3 * [inputs.pop("""prompt""" )]
(
_a
) : List[str] = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
_a : Tuple = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
_a : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : List[Any]=0 ) -> List[str]:
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_a : List[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
_a : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Union[str, Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_inputs(UpperCAmelCase__ )
_a : Tuple = pipe(**UpperCAmelCase__ ).images
_a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 358 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a : Optional[int] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a : List[Any] = collections.defaultdict(UpperCamelCase__ )
_a : List[str] = collections.defaultdict(UpperCamelCase__ )
_a : Tuple = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
_a : str = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
_a : List[Any] = tf_models
_a : int = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
_a : Any = flax_models
_a : Any = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
_a : int = pt_models
_a : int = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
_a : Optional[int] = True
break
# Try again after removing the last word in the name
_a : List[Any] = """""".join(camel_case_split(UpperCamelCase__ )[:-1] )
_a : Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a : Dict = list(UpperCamelCase__ )
all_models.sort()
_a : str = {"""model_type""": all_models}
_a : List[Any] = [pt_models[t] for t in all_models]
_a : str = [tf_models[t] for t in all_models]
_a : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a : List[str] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a : str = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a : int = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a : int = """AutoTokenizer"""
_a : Any = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_a : Union[str, Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
_a : str = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = get_frameworks_table()
_a : Optional[Any] = Dataset.from_pandas(UpperCamelCase__ )
_a : Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=UpperCamelCase__ )
_a : List[Any] = Dataset.from_json(UpperCamelCase__ )
_a : List[str] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(UpperCamelCase__ ) )
}
_a : str = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a : int = sorted(table.keys() )
_a : Union[str, Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_a : Dict = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
_a : List[str] = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_a : Optional[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=UpperCamelCase__ , repo_type="""dataset""" , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a : Any = transformers_module.pipelines.SUPPORTED_TASKS
_a : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
_a : Tuple = pipeline_tasks[key]["""pt"""]
if isinstance(UpperCamelCase__ , (list, tuple) ):
_a : Dict = model[0]
_a : List[str] = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_a : Union[str, Any] = """, """.join(UpperCamelCase__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 324 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __magic_name__ ( ) -> Optional[int]:
lowercase , lowercase : List[str] = 9, 14 # noqa: F841
lowercase : Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowercase : Tuple = defaultdict(__snake_case )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowercase : int = mst(__snake_case )
lowercase : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowercase : Optional[int] = tuple(answer[:2] )
lowercase : Dict = tuple(edge[::-1] )
assert edge in result or reverse in result
| 202 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=False ) -> Tuple:
lowercase : Union[str, Any] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None ) -> Tuple:
if conf_path is None:
lowercase : List[Any] = "./model_checkpoints/vqgan_only.yaml"
lowercase : Tuple = load_config(__snake_case , display=__snake_case )
lowercase : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
lowercase : List[str] = "./model_checkpoints/vqgan_only.pt"
lowercase : Optional[int] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
lowercase : str = sd["state_dict"]
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def __magic_name__ ( __snake_case : Tuple , __snake_case : Union[str, Any] ) -> int:
lowercase , lowercase , lowercase : List[Any] = model.encode(__snake_case )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowercase : str = model.decode(__snake_case )
return xrec
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[int]=False ) -> int:
lowercase , lowercase : Union[str, Any] = string.rsplit("." , 1 )
if reload:
lowercase : Any = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def __magic_name__ ( __snake_case : str ) -> List[str]:
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def __magic_name__ ( __snake_case : Any , __snake_case : int , __snake_case : List[Any]=True , __snake_case : Dict=True ) -> str:
lowercase : Optional[int] = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str] ) -> Any:
# load the specified checkpoint
if ckpt:
lowercase : Dict = torch.load(__snake_case , map_location="cpu" )
lowercase : List[Any] = pl_sd["global_step"]
print(f"""loaded model from global step {global_step}.""" )
else:
lowercase : int = {"state_dict": None}
lowercase : Optional[Any] = None
lowercase : List[Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__snake_case , eval_mode=__snake_case )["model"]
return model, global_step
| 202 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_lowerCamelCase : int = logging.getLogger(__name__)
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=UpperCAmelCase__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=UpperCAmelCase__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=UpperCAmelCase__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=UpperCAmelCase__ , default="data/dump" , help="The dump file prefix." )
SCREAMING_SNAKE_CASE = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["cls_token"] # `<s>`
SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
SCREAMING_SNAKE_CASE = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(UpperCAmelCase__ )} examples to process." )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1_0_0_0_0
SCREAMING_SNAKE_CASE = time.time()
for text in data:
SCREAMING_SNAKE_CASE = F"{bos} {text.strip()} {sep}"
SCREAMING_SNAKE_CASE = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
rslt.append(UpperCAmelCase__ )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
SCREAMING_SNAKE_CASE = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(UpperCAmelCase__ )} examples processed." )
SCREAMING_SNAKE_CASE = F"{args.dump_file}.{args.tokenizer_name}.pickle"
SCREAMING_SNAKE_CASE = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
SCREAMING_SNAKE_CASE = [np.uintaa(UpperCAmelCase__ ) for d in rslt]
else:
SCREAMING_SNAKE_CASE = [np.intaa(UpperCAmelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(UpperCAmelCase__ , "wb" ) as handle:
pickle.dump(rslt_ , UpperCAmelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 206 |
from random import randint, random
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 5 , ):
SCREAMING_SNAKE_CASE = [[-1] * number_of_cells] # Create a highway without any car
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , 0 )
while i < number_of_cells:
SCREAMING_SNAKE_CASE = (
randint(0 , UpperCAmelCase__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = highway_now[car_index + 1 :]
for cell in range(len(UpperCAmelCase__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(UpperCAmelCase__ , -1 )
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : float , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
# Beforce calculations, the highway is empty
SCREAMING_SNAKE_CASE = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
SCREAMING_SNAKE_CASE = min(highway_now[car_index] + 1 , UpperCAmelCase__ )
# Number of empty cell before the next car
SCREAMING_SNAKE_CASE = get_distance(UpperCAmelCase__ , UpperCAmelCase__ ) - 1
# We can't have the car causing an accident
SCREAMING_SNAKE_CASE = min(next_highway[car_index] , UpperCAmelCase__ )
if random() < probability:
# Randomly, a driver will slow down
SCREAMING_SNAKE_CASE = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __lowerCamelCase (UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = len(highway[0] )
for i in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = update(highway[i] , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = [-1] * number_of_cells
for car_index in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
SCREAMING_SNAKE_CASE = (car_index + speed) % number_of_cells
# Commit the change of position
SCREAMING_SNAKE_CASE = speed
highway.append(UpperCAmelCase__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_A = ['''text''', '''image''', '''audio''']
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_A , _A ):
inputs.append(create_inputs(_A ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
for output in outputs:
if isinstance(_A , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(_A , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(_A , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class A :
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool, '''inputs''' ) )
self.assertTrue(hasattr(self.tool, '''outputs''' ) )
lowerCAmelCase_ = self.tool.inputs
for _input in inputs:
if isinstance(_input, UpperCamelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = create_inputs(self.tool.inputs )
lowerCAmelCase_ = self.tool(*UpperCamelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ = [outputs]
self.assertListEqual(output_types(UpperCamelCase__ ), self.tool.outputs )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool, '''description''' ) )
self.assertTrue(hasattr(self.tool, '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = create_inputs(self.tool.inputs )
lowerCAmelCase_ = self.tool(*UpperCamelCase__ )
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = [outputs]
self.assertEqual(len(UpperCamelCase__ ), len(self.tool.outputs ) )
for output, output_type in zip(UpperCamelCase__, self.tool.outputs ):
lowerCAmelCase_ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCamelCase__, UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = create_inputs(self.tool.inputs )
lowerCAmelCase_ = []
for _input, input_type in zip(UpperCamelCase__, self.tool.inputs ):
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ = self.tool(*UpperCamelCase__ )
if not isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = [outputs]
self.assertEqual(len(UpperCamelCase__ ), len(self.tool.outputs ) )
| 278 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
__snake_case : str = AutoTokenizer.from_pretrained("google/mt5-small" )
__snake_case : List[Any] = tokenizer("Hello there" , return_tensors="np" ).input_ids
__snake_case : int = tokenizer("Hi I am" , return_tensors="np" ).input_ids
__snake_case : Tuple = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case : Tuple = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
__snake_case : str = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
__snake_case : Any = -(labels.shape[-1] * loss.item())
__snake_case : List[str] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 326 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.dummy_uncond_unet
A_ : List[Any] = DDIMScheduler()
A_ : Any = self.dummy_vq_model
A_ : int = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' ).images
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A_ : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : str = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy' ).images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
A_ : Tuple = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
A_ : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 192 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.dummy_uncond_unet
A_ : List[Any] = DDIMScheduler()
A_ : Any = self.dummy_vq_model
A_ : int = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' ).images
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A_ : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : str = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy' ).images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
A_ : Tuple = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
A_ : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 192 | 1 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
__lowercase , __lowercase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
__lowercase = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
__lowercase = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__lowercase = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 40 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
snake_case : List[str] = logging.getLogger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
"""simple docstring"""
a :Optional[Any] = np.argmax(UpperCAmelCase_ , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
with open(UpperCAmelCase_ , encoding='''utf_8''' ) as f:
a :List[str] = csv.reader(UpperCAmelCase_ )
a :Tuple = []
next(UpperCAmelCase_ ) # skip the first line
for line in tqdm(UpperCAmelCase_ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
a :Optional[int] = []
for dataset in encoded_datasets:
a :Union[str, Any] = len(UpperCAmelCase_ )
a :Tuple = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
a :Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
a :int = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
a :Union[str, Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCAmelCase_ ):
a :Optional[int] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a :Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
a :Union[str, Any] = with_conta
a :List[Any] = with_conta
a :Dict = len(UpperCAmelCase_ ) - 1
a :Tuple = len(UpperCAmelCase_ ) - 1
a :Dict = with_conta
a :List[str] = with_conta
a :Any = mc_label
a :str = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCAmelCase_ ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCAmelCase_ , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=UpperCAmelCase_ , default='''''' )
parser.add_argument('''--eval_dataset''' , type=UpperCAmelCase_ , default='''''' )
parser.add_argument('''--seed''' , type=UpperCAmelCase_ , default=42 )
parser.add_argument('''--num_train_epochs''' , type=UpperCAmelCase_ , default=3 )
parser.add_argument('''--train_batch_size''' , type=UpperCAmelCase_ , default=8 )
parser.add_argument('''--eval_batch_size''' , type=UpperCAmelCase_ , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=UpperCAmelCase_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=UpperCAmelCase_ , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=UpperCAmelCase_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=UpperCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=UpperCAmelCase_ , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=UpperCAmelCase_ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=UpperCAmelCase_ , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=UpperCAmelCase_ , default=0.01 )
parser.add_argument('''--lm_coef''' , type=UpperCAmelCase_ , default=0.9 )
parser.add_argument('''--n_valid''' , type=UpperCAmelCase_ , default=374 )
parser.add_argument('''--server_ip''' , type=UpperCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=UpperCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
a :int = parser.parse_args()
print(UpperCAmelCase_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
a :Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
a :str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
a :List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
a :Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCAmelCase_ )
a :Tuple = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
a :Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCAmelCase_ ) )
model.to(UpperCAmelCase_ )
# Load and encode the datasets
def tokenize_and_encode(UpperCAmelCase_ : Optional[int] ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return obj
return [tokenize_and_encode(UpperCAmelCase_ ) for o in obj]
logger.info('''Encoding dataset...''' )
a :Dict = load_rocstories_dataset(args.train_dataset )
a :Optional[Any] = load_rocstories_dataset(args.eval_dataset )
a :str = (train_dataset, eval_dataset)
a :Union[str, Any] = tokenize_and_encode(UpperCAmelCase_ )
# Compute the max input length for the Transformer
a :List[Any] = model.config.n_positions // 2 - 2
a :List[str] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
a :List[Any] = min(UpperCAmelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
a :Union[str, Any] = pre_process_datasets(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ )
a :Any = tensor_datasets[0], tensor_datasets[1]
a :int = TensorDataset(*UpperCAmelCase_ )
a :Tuple = RandomSampler(UpperCAmelCase_ )
a :Union[str, Any] = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.train_batch_size )
a :int = TensorDataset(*UpperCAmelCase_ )
a :Dict = SequentialSampler(UpperCAmelCase_ )
a :str = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
a :List[Any] = args.max_steps
a :List[str] = args.max_steps // (len(UpperCAmelCase_ ) // args.gradient_accumulation_steps) + 1
else:
a :Dict = len(UpperCAmelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs
a :Optional[Any] = list(model.named_parameters() )
a :List[Any] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
a :int = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
a :Any = AdamW(UpperCAmelCase_ , lr=args.learning_rate , eps=args.adam_epsilon )
a :List[Any] = get_linear_schedule_with_warmup(
UpperCAmelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCAmelCase_ )
if args.do_train:
a :List[Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
a :List[str] = 0
a :Any = 0
a :Optional[int] = tqdm(UpperCAmelCase_ , desc='''Training''' )
for step, batch in enumerate(UpperCAmelCase_ ):
a :Optional[Any] = tuple(t.to(UpperCAmelCase_ ) for t in batch )
a :Optional[Any] = batch
a :int = model(UpperCAmelCase_ , mc_token_ids=UpperCAmelCase_ , lm_labels=UpperCAmelCase_ , mc_labels=UpperCAmelCase_ )
a :List[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
a :Any = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
a :Union[str, Any] = '''Training loss: {:.2e} lr: {:.2e}'''.format(UpperCAmelCase_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
a :List[Any] = model.module if hasattr(UpperCAmelCase_ , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
a :Any = os.path.join(args.output_dir , UpperCAmelCase_ )
a :List[Any] = os.path.join(args.output_dir , UpperCAmelCase_ )
torch.save(model_to_save.state_dict() , UpperCAmelCase_ )
model_to_save.config.to_json_file(UpperCAmelCase_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
a :Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
a :Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCAmelCase_ )
if args.do_eval:
model.eval()
a :List[str] = 0, 0
a :Dict = 0, 0
for batch in tqdm(UpperCAmelCase_ , desc='''Evaluating''' ):
a :Union[str, Any] = tuple(t.to(UpperCAmelCase_ ) for t in batch )
a :List[Any] = batch
with torch.no_grad():
a :List[str] = model(
UpperCAmelCase_ , mc_token_ids=UpperCAmelCase_ , lm_labels=UpperCAmelCase_ , mc_labels=UpperCAmelCase_ )
a :Dict = mc_logits.detach().cpu().numpy()
a :Dict = mc_labels.to('''cpu''' ).numpy()
a :Union[str, Any] = accuracy(UpperCAmelCase_ , UpperCAmelCase_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
a :List[Any] = eval_loss / nb_eval_steps
a :Any = eval_accuracy / nb_eval_examples
a :str = tr_loss / nb_tr_steps if args.do_train else None
a :Optional[int] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
a :int = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(UpperCAmelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , UpperCAmelCase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 358 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Optional[Any] = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
snake_case : str = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
snake_case : List[Any] = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
a :Union[str, Any] = set()
a :str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a :Optional[int] = char
a :Optional[int] = set(UpperCAmelCase_ )
return pairs
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , **_lowerCamelCase , ):
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
a :Optional[Any] = vocab_file
a :Optional[Any] = merges_file
a :Any = {}
a :Any = 0
a :int = 1
a :Union[str, Any] = 2
a :List[Any] = 3
self.add_from_file(_lowerCamelCase )
a :List[str] = {v: k for k, v in self.encoder.items()}
with open(_lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
a :List[str] = merges_handle.read().split('''\n''' )[:-1]
a :Any = [tuple(merge.split()[:-1] ) for merge in merges]
a :str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :str = {}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a :Union[str, Any] = [self.cls_token_id]
a :Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :Optional[int] = [self.sep_token_id]
a :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if token in self.cache:
return self.cache[token]
a :Optional[int] = tuple(_lowerCamelCase )
a :List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
a :Union[str, Any] = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
a :Optional[Any] = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a , a :Dict = bigram
a :Union[str, Any] = []
a :int = 0
while i < len(_lowerCamelCase ):
try:
a :Optional[Any] = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a :Union[str, Any] = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a :Union[str, Any] = tuple(_lowerCamelCase )
a :int = new_word
if len(_lowerCamelCase ) == 1:
break
else:
a :List[str] = get_pairs(_lowerCamelCase )
a :Union[str, Any] = '''@@ '''.join(_lowerCamelCase )
a :Dict = word[:-4]
a :Any = word
return word
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = []
a :str = re.findall(R'''\S+\n?''' , _lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.decoder.get(_lowerCamelCase , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[int] = ''' '''.join(_lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a :Tuple = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a :Optional[int] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.merges_file , _lowerCamelCase )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
try:
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
a :str = f.readlines()
for lineTmp in lines:
a :Tuple = lineTmp.strip()
a :int = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
a :Tuple = line[:idx]
a :Tuple = len(self.encoder )
| 281 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : torch.FloatTensor
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple ,lowerCamelCase__ : int = 65536 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : str = "fourier" ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,lowerCamelCase__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,lowerCamelCase__ : Tuple[str] = "UNetMidBlock1D" ,lowerCamelCase__ : str = None ,lowerCamelCase__ : Tuple[int] = (32, 32, 64) ,lowerCamelCase__ : str = None ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = False ,) -> List[Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE = GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=lowerCamelCase__ ,log=lowerCamelCase__ ,flip_sin_to_cos=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE = Timesteps(
block_out_channels[0] ,flip_sin_to_cos=lowerCamelCase__ ,downscale_freq_shift=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
SCREAMING_SNAKE_CASE = TimestepEmbedding(
in_channels=lowerCamelCase__ ,time_embed_dim=lowerCamelCase__ ,act_fn=lowerCamelCase__ ,out_dim=block_out_channels[0] ,)
SCREAMING_SNAKE_CASE = nn.ModuleList([] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = nn.ModuleList([] )
SCREAMING_SNAKE_CASE = None
# down
SCREAMING_SNAKE_CASE = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = output_channel
SCREAMING_SNAKE_CASE = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE = i == len(lowerCamelCase__ ) - 1
SCREAMING_SNAKE_CASE = get_down_block(
lowerCamelCase__ ,num_layers=lowerCamelCase__ ,in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(lowerCamelCase__ )
# mid
SCREAMING_SNAKE_CASE = get_mid_block(
lowerCamelCase__ ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=lowerCamelCase__ ,add_downsample=lowerCamelCase__ ,)
# up
SCREAMING_SNAKE_CASE = list(reversed(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE = out_channels
else:
SCREAMING_SNAKE_CASE = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = output_channel
SCREAMING_SNAKE_CASE = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE = i == len(lowerCamelCase__ ) - 1
SCREAMING_SNAKE_CASE = get_up_block(
lowerCamelCase__ ,num_layers=lowerCamelCase__ ,in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output_channel
# out
SCREAMING_SNAKE_CASE = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 )
SCREAMING_SNAKE_CASE = get_out_block(
out_block_type=lowerCamelCase__ ,num_groups_out=lowerCamelCase__ ,embed_dim=block_out_channels[0] ,out_channels=lowerCamelCase__ ,act_fn=lowerCamelCase__ ,fc_dim=block_out_channels[-1] // 4 ,)
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : Union[torch.Tensor, float, int] ,lowerCamelCase__ : bool = True ,) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timestep
if not torch.is_tensor(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE = timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE = self.time_mlp(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = timestep_embed[..., None]
SCREAMING_SNAKE_CASE = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE = ()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = downsample_block(hidden_states=lowerCamelCase__ ,temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE = self.mid_block(lowerCamelCase__ ,lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE = down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE = down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE = upsample_block(lowerCamelCase__ ,res_hidden_states_tuple=lowerCamelCase__ ,temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE = self.out_block(lowerCamelCase__ ,lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 296 |
from pathlib import Path
import fire
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open("""w""" ).write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 296 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase = 1_0_0_0 ) -> List[Any]:
A: Any = 3
A: Tuple = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 350 |
'''simple docstring'''
import requests
UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def SCREAMING_SNAKE_CASE( __lowercase ) -> None:
# fetching a list of articles in json format
A: Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 334 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 106 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : Dict = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True})
A_ : Optional[int] = Features({'audio': Audio()})
A_ : Tuple = Features({'transcription': Value('string')})
A_ : Union[str, Any] = 'audio'
A_ : Optional[Any] = 'transcription'
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f"Column {self.audio_column} is not an Audio type." )
__lowerCAmelCase : Union[str, Any] = copy.deepcopy(self )
__lowerCAmelCase : Tuple = self.input_schema.copy()
__lowerCAmelCase : List[Any] = features[self.audio_column]
__lowerCAmelCase : List[str] = input_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 352 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = str(_UpperCamelCase )
return len(_UpperCamelCase ) == 9 and set(_UpperCamelCase ) == set('123456789' )
def __lowerCAmelCase ():
for base_num in range(9999 , 4999 , -1 ):
__lowerCAmelCase : Union[str, Any] = 10_0002 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowerCAmelCase : Dict = 100_2003 * base_num
if is_9_pandigital(_UpperCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 182 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Dict=3 , lowerCAmelCase : Any=32 , lowerCAmelCase : str=3 , lowerCAmelCase : int=10 , lowerCAmelCase : Optional[int]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : List[Any]=True , lowerCAmelCase : int=True , lowerCAmelCase : List[str]="relu" , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Any=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =parent
SCREAMING_SNAKE_CASE_: List[str] =batch_size
SCREAMING_SNAKE_CASE_: Optional[Any] =image_size
SCREAMING_SNAKE_CASE_: Dict =num_channels
SCREAMING_SNAKE_CASE_: Dict =embeddings_size
SCREAMING_SNAKE_CASE_: Dict =hidden_sizes
SCREAMING_SNAKE_CASE_: str =depths
SCREAMING_SNAKE_CASE_: List[str] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: int =hidden_act
SCREAMING_SNAKE_CASE_: Tuple =num_labels
SCREAMING_SNAKE_CASE_: Tuple =scope
SCREAMING_SNAKE_CASE_: Union[str, Any] =len(lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =RegNetModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: List[str] =model(lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE_: Tuple =RegNetForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =config_and_inputs
SCREAMING_SNAKE_CASE_: List[str] ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : List[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : List[str] = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Tuple = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =RegNetModelTester(self )
SCREAMING_SNAKE_CASE_: Dict =ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: str =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: List[str] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Union[str, Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] =model_class(config=lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[Any] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: int =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Dict =["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_: List[str] =layer_type
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: int =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Tuple =RegNetModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: str =prepare_img()
SCREAMING_SNAKE_CASE_: Any =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[Any] =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Tuple =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 173 |
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE_: Optional[int] =str(bin(lowercase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: Any =str(bin(lowercase ) )[2:]
SCREAMING_SNAKE_CASE_: Dict =max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 361 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]:
UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) )
UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : float = 0
UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase__ : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase__ : Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
A_ : Optional[int] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds
| 286 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowercase ( _UpperCamelCase=None ) ->Tuple:
"""simple docstring"""
if subparsers is not None:
lowercase : List[Any] = subparsers.add_parser('''test''' )
else:
lowercase : Dict = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''', default=_UpperCamelCase, help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
), )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
lowercase : List[str] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
lowercase : List[str] = script_name
else:
lowercase : Optional[int] = f"""--config_file={args.config_file} {script_name}"""
lowercase : Optional[Any] = ['''accelerate-launch'''] + test_args.split()
lowercase : List[str] = execute_subprocess_async(_UpperCamelCase, env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __lowercase ( ) ->Any:
"""simple docstring"""
lowercase : int = test_command_parser()
lowercase : int = parser.parse_args()
test_command(_UpperCamelCase )
if __name__ == "__main__":
main()
| 173 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__a = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__a = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.1_5},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
__a = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__a = '''allenai'''
def __lowercase ( _UpperCamelCase ) ->str:
"""simple docstring"""
lowercase : Tuple = dict((re.sub(R'''@@$''', '''''', _UpperCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''', '''</w>''', _UpperCamelCase ), v) for k, v in d.items() )
lowercase : List[str] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
lowercase : Union[str, Any] = d[k] # restore
return da
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Any:
"""simple docstring"""
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase, exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowercase : Union[str, Any] = basename(_UpperCamelCase )
lowercase : List[str] = dirname(_UpperCamelCase )
lowercase : Optional[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase : List[str] = cls.hub_models()
lowercase : Tuple = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
lowercase : List[str] = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
lowercase : int = hub_utils.from_pretrained(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, archive_map=_UpperCamelCase, **_UpperCamelCase )
lowercase : int = vars(chkpt['''args''']['''model'''] )
lowercase : Union[str, Any] = args['''source_lang''']
lowercase : Dict = args['''target_lang''']
lowercase : Optional[int] = dirname(_UpperCamelCase )
lowercase : str = basename(_UpperCamelCase )
# dicts
lowercase : Optional[Any] = os.path.join(_UpperCamelCase, f"""dict.{src_lang}.txt""" )
lowercase : Any = os.path.join(_UpperCamelCase, f"""dict.{tgt_lang}.txt""" )
lowercase : Union[str, Any] = Dictionary.load(_UpperCamelCase )
lowercase : List[Any] = rewrite_dict_keys(src_dict.indices )
lowercase : List[str] = len(_UpperCamelCase )
lowercase : Tuple = os.path.join(_UpperCamelCase, '''vocab-src.json''' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase : str = True
for k in src_vocab.keys():
if not k.islower():
lowercase : Dict = False
break
lowercase : Union[str, Any] = Dictionary.load(_UpperCamelCase )
lowercase : Any = rewrite_dict_keys(tgt_dict.indices )
lowercase : Tuple = len(_UpperCamelCase )
lowercase : Dict = os.path.join(_UpperCamelCase, '''vocab-tgt.json''' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# merges_file (bpecodes)
lowercase : Optional[int] = os.path.join(_UpperCamelCase, VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase : str = os.path.join(_UpperCamelCase, _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase, encoding='''utf-8''' ) as fin:
lowercase : List[str] = fin.read()
lowercase : Tuple = re.sub(R''' \d+$''', '''''', _UpperCamelCase, 0, re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as fout:
fout.write(_UpperCamelCase )
# model config
lowercase : Dict = os.path.join(_UpperCamelCase, '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
lowercase : Optional[int] = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.0_2,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
lowercase : Dict = 5
lowercase : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase : int = best_score_hparams[model_dir]['''length_penalty''']
else:
lowercase : Any = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# tokenizer config
lowercase : Any = os.path.join(_UpperCamelCase, _UpperCamelCase )
lowercase : Tuple = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1024,
'''do_lower_case''': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase, ensure_ascii=_UpperCamelCase, indent=_UpperCamelCase ) )
# model
lowercase : int = chkpt['''models'''][0]
lowercase : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowercase : Union[str, Any] = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase : int = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase, _UpperCamelCase )
lowercase : str = FSMTConfig.from_pretrained(_UpperCamelCase )
lowercase : str = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase, strict=_UpperCamelCase )
# save
lowercase : List[Any] = os.path.join(_UpperCamelCase, _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase, _UpperCamelCase )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 173 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : List[str] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : List[Any] = '''imagenet-1k-id2label.json'''
lowercase__ : Optional[Any] = 10_00
lowercase__ : Optional[Any] = '''huggingface/label-files'''
lowercase__ : Dict = num_labels
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
lowercase__ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Any = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
lowercase__ : int = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
lowercase__ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase__ : List[Any] = [2, 2, 20]
lowercase__ : Any = [3, 12, 16]
lowercase__ : Tuple = [1_92, 7_68, 10_24]
lowercase__ : List[Any] = CvtForImageClassification(__lowerCamelCase )
lowercase__ : str = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
lowercase__ : List[str] = image_size
lowercase__ : Union[str, Any] = torch.load(__lowerCamelCase , map_location=torch.device('''cpu''' ) )
lowercase__ : int = OrderedDict()
lowercase__ : List[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase__ : Any = list_of_state_dict + cls_token(__lowerCamelCase )
lowercase__ : Any = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
lowercase__ : Tuple = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 16 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a: Tuple = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[str] = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__a: Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ProphetNetTokenizer
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Any:
super().setUp()
lowercase__ : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
lowercase__ : Union[str, Any] = '''UNwant\u00E9d,running'''
lowercase__ : List[Any] = '''unwanted, running'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[str] = self.tokenizer_class(self.vocab_file )
lowercase__ : Union[str, Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : str = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : int = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Tuple = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Tuple = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase__ : Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
lowercase__ : List[Any] = i
lowercase__ : Dict = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase__ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase__ : Dict = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
lowercase__ : int = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _lowerCAmelCase( self ) -> str:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowerCAmelCase( self ) -> Optional[Any]:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowerCAmelCase( self ) -> Dict:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : List[Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase__ : List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
lowercase__ : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowercase__ : List[str] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 214 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _a ( lowerCAmelCase):
"""simple docstring"""
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , '''depth_multiplier''' ) )
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str=1_3 , __UpperCamelCase : int=3 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : Optional[Any]=0.2_5 , __UpperCamelCase : Tuple=8 , __UpperCamelCase : Tuple=True , __UpperCamelCase : List[Any]=1_0_2_4 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : Dict="relu6" , __UpperCamelCase : str=0.1 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : Optional[Any]=None , )->Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = depth_multiplier
_UpperCAmelCase = min_depth
_UpperCAmelCase = tf_padding
_UpperCAmelCase = int(last_hidden_size * depth_multiplier )
_UpperCAmelCase = output_stride
_UpperCAmelCase = hidden_act
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = use_labels
_UpperCAmelCase = is_training
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
def lowercase__ ( self : Tuple )->str:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : Any )->Optional[int]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = MobileNetVaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] )->int:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileNetVaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] )->Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : List[str] )->List[str]:
_UpperCAmelCase = MobileNetVaModelTester(self )
_UpperCAmelCase = MobileNetVaConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->str:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def lowercase__ ( self : Dict )->Union[str, Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def lowercase__ ( self : Any )->Tuple:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def lowercase__ ( self : Optional[int] )->str:
pass
def lowercase__ ( self : Union[str, Any] )->str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase__ ( self : Dict )->List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->List[str]:
def check_hidden_states_output(__UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 2_6
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Tuple )->Optional[Any]:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MobileNetVaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase__ ( self : List[Any] )->List[str]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : List[Any] )->Dict:
_UpperCAmelCase = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(__UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 260 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """camembert"""
def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class _a ( lowerCAmelCase):
"""simple docstring"""
@property
def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 260 | 1 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case : Any = 1
__snake_case : List[Any] = 1
while repunit:
__snake_case : List[str] = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
__snake_case : Dict = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__SCREAMING_SNAKE_CASE ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 370 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__A = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__A = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = SavedModel()
__lowerCAmelCase: Union[str, Any] = []
with open(os.path.join(__SCREAMING_SNAKE_CASE , "utils" , "tf_ops" , "onnx.json" ) ) as f:
__lowerCAmelCase: List[str] = json.load(__SCREAMING_SNAKE_CASE )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__SCREAMING_SNAKE_CASE )] )
with open(__SCREAMING_SNAKE_CASE , "rb" ) as f:
saved_model.ParseFromString(f.read() )
__lowerCAmelCase: List[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__lowerCAmelCase: Optional[int] = sorted(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__SCREAMING_SNAKE_CASE )
if strict and len(__SCREAMING_SNAKE_CASE ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__SCREAMING_SNAKE_CASE ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*__SCREAMING_SNAKE_CASE , sep="\n" )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
__A = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 217 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[int] = filter_non_english
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
__lowerCAmelCase: List[Any] = {}
__lowerCAmelCase: Dict = {}
for i, value in enumerate(UpperCamelCase__):
__lowerCAmelCase: List[Any] = i
__lowerCAmelCase: Union[str, Any] = i
__lowerCAmelCase: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"])
__lowerCAmelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.word_shape_file , "w" , encoding="utf-8") as word_shape_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__)
with open(self.word_pronunciation_file , "w" , encoding="utf-8") as word_pronunciation_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__)
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__lowerCAmelCase: Union[str, Any] = tokenizer.tokenize("你好[SEP]你是谁")
self.assertListEqual(UpperCamelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
__lowerCAmelCase: int = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Tuple = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : Any)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def lowercase_ ( self : List[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__lowerCAmelCase: int = {}
for i, token in enumerate(UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = i
__lowerCAmelCase: str = RoCBertWordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def lowercase_ ( self : Union[str, Any])-> str:
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def lowercase_ ( self : Dict)-> int:
'''simple docstring'''
__lowerCAmelCase: Any = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
__lowerCAmelCase: Any = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: str = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__lowerCAmelCase: Tuple = tokenizer_r.encode_plus(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , )
__lowerCAmelCase: str = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ , "do_lower_case") else False
__lowerCAmelCase: List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = ["的", "人", "有"]
__lowerCAmelCase: int = "".join(UpperCamelCase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: Tuple = True
__lowerCAmelCase: str = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Dict = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Any = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__)
__lowerCAmelCase: List[str] = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: int = False
__lowerCAmelCase: Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__)
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase: Dict = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCamelCase__)
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
@slow
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__lowerCAmelCase: Dict = tokenizer.encode("你好" , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer.encode("你是谁" , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = self.get_tokenizers(do_lower_case=UpperCamelCase__)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
__lowerCAmelCase: str = "你好,你是谁"
__lowerCAmelCase: Dict = tokenizer.tokenize(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__)
__lowerCAmelCase: Dict = tokenizer.prepare_for_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = tokenizer.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
| 217 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Tuple = {}
lowercase__ : Tuple = job['''started_at''']
lowercase__ : List[str] = job['''completed_at''']
lowercase__ : int = date_parser.parse(__a )
lowercase__ : Tuple = date_parser.parse(__a )
lowercase__ : int = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowercase__ : List[str] = start
lowercase__ : Optional[int] = end
lowercase__ : Any = duration_in_min
return job_info
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
'''simple docstring'''
lowercase__ : List[str] = None
if token is not None:
lowercase__ : List[str] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
lowercase__ : Tuple = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowercase__ : List[Any] = requests.get(__a , headers=__a ).json()
lowercase__ : Tuple = {}
try:
job_time.update({job['name']: extract_time_from_single_job(__a ) for job in result['jobs']} )
lowercase__ : List[str] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(__a ):
lowercase__ : Union[str, Any] = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
job_time.update({job['name']: extract_time_from_single_job(__a ) for job in result['jobs']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
snake_case_ : List[str] = parser.parse_args()
snake_case_ : Any = get_job_time(args.workflow_run_id)
snake_case_ : Union[str, Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 365 |
import math
import sys
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
lowercase__ : Tuple = [-1] * (number + 1)
lowercase__ : Tuple = 0
for i in range(1 , number + 1 ):
lowercase__ : Tuple = sys.maxsize
lowercase__ : str = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
lowercase__ : List[Any] = 1 + answers[i - (j**2)]
lowercase__ : str = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = DiTPipeline
__UpperCamelCase : List[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__UpperCamelCase : int = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase : List[str] = False
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=10_00 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Dict = AutoencoderKL()
SCREAMING_SNAKE_CASE__ : Tuple = DDIMScheduler()
SCREAMING_SNAKE_CASE__ : List[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> Dict:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = """cpu"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1E-3 )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE__ : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
SCREAMING_SNAKE_CASE__ : int = pipe.get_label_ids(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
SCREAMING_SNAKE_CASE__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""vase""", """umbrella"""]
SCREAMING_SNAKE_CASE__ : int = pipe.get_label_ids(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 25 |
from __future__ import annotations
import requests
def lowerCAmelCase ( lowerCAmelCase_ )-> dict:
lowerCAmelCase_ : List[Any] = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(lowerCAmelCase_ ).json()
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> list[dict]:
lowerCAmelCase_ : List[Any] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCAmelCase_ : Tuple = requests.get(lowerCAmelCase_ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase_ ) for story_id in story_ids]
def lowerCAmelCase ( lowerCAmelCase_ = 10 )-> str:
lowerCAmelCase_ : Optional[Any] = hackernews_top_stories(lowerCAmelCase_ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 262 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=1 , )->List[Any]:
'''simple docstring'''
A_ : List[str] = parent
A_ : Tuple = batch_size
A_ : List[Any] = seq_length
A_ : Optional[int] = is_training
A_ : Any = use_input_mask
A_ : str = use_token_type_ids
A_ : Union[str, Any] = use_labels
A_ : Any = vocab_size
A_ : Any = hidden_size
A_ : int = num_hidden_layers
A_ : int = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Dict = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : Dict = num_labels
A_ : List[Any] = num_choices
A_ : Any = scope
A_ : List[Any] = q_groups
A_ : Union[str, Any] = k_groups
A_ : List[Any] = v_groups
A_ : Optional[Any] = post_attention_groups
A_ : str = intermediate_groups
A_ : Tuple = output_groups
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : str = None
if self.use_input_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Tuple = None
A_ : Optional[Any] = None
A_ : Optional[Any] = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self )->int:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[int] = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Union[str, Any] = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Optional[int] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : Optional[int] = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : int = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Any = self.num_labels
A_ : Any = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Dict = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : Optional[int] = self.num_choices
A_ : Dict = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A_ : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : str = self.prepare_config_and_inputs()
((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) : List[Any] = config_and_inputs
A_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
snake_case = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case = False
snake_case = True
snake_case = False
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Optional[int] = SqueezeBertModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37 )
def _snake_case ( self )->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
A_ : int = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
A_ : Dict = model(_SCREAMING_SNAKE_CASE )[0]
A_ : Optional[Any] = torch.Size((1, 3) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
A_ : str = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 65 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
A_ , A_ , A_ : Any = equationa
A_ , A_ , A_ : Union[str, Any] = equationa
# Calculate the determinants of the matrices
A_ : Optional[Any] = aa * ba - aa * ba
A_ : Optional[int] = ca * ba - ca * ba
A_ : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A_ : Optional[int] = determinant_x / determinant
A_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 65 | 1 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _A ( snake_case , snake_case , snake_case ) -> List[str]:
_lowercase : Dict = AutoConfig.from_pretrained(snake_case )
_lowercase : Union[str, Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case )
_lowercase : str = checkpoints.load_tax_checkpoint(snake_case )
_lowercase : Optional[int] = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_lowercase : str = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowercase : Optional[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowercase : Union[str, Any] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"
" attribute with a value from [\'local\', \'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_lowercase : List[str] = F'''layers_{str(snake_case )}'''
# Self-Attention
_lowercase : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_lowercase : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_lowercase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_lowercase : Dict = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowercase : Any = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_lowercase : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_lowercase : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowercase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowercase : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowercase : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowercase : Tuple = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowercase : Union[str, Any] = flax_model.params["encoder"]["block"][str(snake_case )]["layer"]
_lowercase : Tuple = tax_attention_key
_lowercase : Tuple = tax_attention_out
_lowercase : str = tax_attention_query
_lowercase : Dict = tax_attention_value
_lowercase : int = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowercase : List[str] = tax_global_layer_norm
if split_mlp_wi:
_lowercase : List[str] = tax_mlp_wi_a
_lowercase : List[Any] = tax_mlp_wi_a
else:
_lowercase : List[Any] = tax_mlp_wi
_lowercase : Union[str, Any] = tax_mlp_wo
_lowercase : Optional[int] = tax_mlp_layer_norm
_lowercase : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
_lowercase : str = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_lowercase : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowercase : Optional[Any] = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_lowercase : Tuple = tax_encoder_global_rel_embedding
# Assigning
_lowercase : Optional[Any] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_lowercase : Tuple = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowercase : Optional[int] = F'''layers_{str(snake_case )}'''
# Self-Attention
_lowercase : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_lowercase : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_lowercase : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_lowercase : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_lowercase : Any = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_lowercase : Optional[Any] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_lowercase : Union[str, Any] = tax_enc_dec_attention_module["key"]["kernel"]
_lowercase : Optional[Any] = tax_enc_dec_attention_module["out"]["kernel"]
_lowercase : str = tax_enc_dec_attention_module["query"]["kernel"]
_lowercase : Any = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_lowercase : int = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_lowercase : Optional[int] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowercase : Tuple = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowercase : str = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowercase : List[str] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowercase : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowercase : Tuple = flax_model.params["decoder"]["block"][str(snake_case )]["layer"]
_lowercase : List[Any] = tax_attention_key
_lowercase : Tuple = tax_attention_out
_lowercase : Optional[int] = tax_attention_query
_lowercase : Union[str, Any] = tax_attention_value
_lowercase : List[str] = tax_pre_attention_layer_norm
_lowercase : Tuple = tax_enc_dec_attention_key
_lowercase : Any = tax_enc_dec_attention_out
_lowercase : List[str] = tax_enc_dec_attention_query
_lowercase : int = tax_enc_dec_attention_value
_lowercase : List[str] = tax_cross_layer_norm
if split_mlp_wi:
_lowercase : int = tax_mlp_wi_a
_lowercase : Tuple = tax_mlp_wi_a
else:
_lowercase : Any = tax_mlp_wi
_lowercase : Union[str, Any] = tax_mlp_wo
_lowercase : Optional[Any] = txa_mlp_layer_norm
_lowercase : List[str] = flax_model_decoder_layer_block
# Decoder Normalization
_lowercase : Any = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_lowercase : Optional[int] = txa_decoder_norm
# Only for layer 0:
_lowercase : Any = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_lowercase : int = tax_decoder_rel_embedding
# Token Embeddings
_lowercase : List[str] = tax_model["target"]["token_embedder"]["embedding"]
_lowercase : Union[str, Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowercase : List[str] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(snake_case )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_snake_case = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 250 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''T5Config'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
| 69 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ['pixel_values']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :str = size if size is not None else {'''shortest_edge''': 224}
a :int = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
a :Optional[int] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a :Union[str, Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name='''crop_size''' )
a :Dict = do_resize
a :Optional[int] = size
a :Optional[int] = resample
a :Dict = do_center_crop
a :List[str] = crop_size
a :Any = do_rescale
a :Optional[int] = rescale_factor
a :str = do_normalize
a :Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a :Any = image_std if image_std is not None else OPENAI_CLIP_STD
a :List[Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
a :Union[str, Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
a :List[str] = get_resize_output_image_size(_lowerCamelCase , size=size['''shortest_edge'''] , default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
a :Union[str, Any] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
a :List[Any] = do_resize if do_resize is not None else self.do_resize
a :Any = size if size is not None else self.size
a :Any = get_size_dict(_lowerCamelCase , param_name='''size''' , default_to_square=_lowerCamelCase )
a :str = resample if resample is not None else self.resample
a :int = do_center_crop if do_center_crop is not None else self.do_center_crop
a :Dict = crop_size if crop_size is not None else self.crop_size
a :int = get_size_dict(_lowerCamelCase , param_name='''crop_size''' , default_to_square=_lowerCamelCase )
a :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
a :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
a :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
a :List[str] = image_mean if image_mean is not None else self.image_mean
a :Dict = image_std if image_std is not None else self.image_std
a :List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a :Optional[Any] = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a :int = [convert_to_rgb(_lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
a :Union[str, Any] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
a :Optional[int] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
a :List[Any] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
a :str = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
a :Union[str, Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
a :Dict = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
a :List[str] = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 281 |
import math
def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
"""simple docstring"""
return math.pow(UpperCAmelCase_ , 2 ) - a
def __lowerCamelCase ( UpperCAmelCase_ : float ):
"""simple docstring"""
return 2 * x
def __lowerCamelCase ( UpperCAmelCase_ : float ):
"""simple docstring"""
a :int = 2.0
while start <= a:
a :int = math.pow(UpperCAmelCase_ , 2 )
return start
def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 9999 , UpperCAmelCase_ : float = 0.00000000000001 ):
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
a :List[Any] = get_initial_point(UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ ):
a :Optional[int] = value
a :int = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "new-model"
if is_tf_available():
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = NewModelConfig
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """bert-base-cased"""
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """bert-base-cased"""
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_probability
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(
__SCREAMING_SNAKE_CASE , output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=__SCREAMING_SNAKE_CASE ) , 14_410 )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=__SCREAMING_SNAKE_CASE ) , 14_410 )
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = copy.deepcopy(model.config )
__SCREAMING_SNAKE_CASE = ["""FunnelBaseModel"""]
__SCREAMING_SNAKE_CASE = TFAutoModel.from_config(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
try:
AutoConfig.register("""new-model""" , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
auto_class.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
auto_class.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
auto_class.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
__SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config()
__SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() )
__SCREAMING_SNAKE_CASE = auto_class.from_config(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = auto_class.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , """bert-base is not a local folder and is not a valid model identifier""" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""bert-base""" )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , revision="""aaaaaa""" )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , """Use `from_pt=True` to load this model""" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 267 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 267 | 1 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCAmelCase : Any = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = _TestCommandArgs(dataset=_UpperCAmelCase , all_configs=_UpperCAmelCase , save_infos=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = TestCommand(*_UpperCAmelCase )
test_command.run()
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "README.md" )
assert os.path.exists(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = DatasetInfosDict.from_directory(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_35_15_63,
"num_examples": 1_00_00,
},
{
"name": "validation",
"num_bytes": 23_84_18,
"num_examples": 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = getattr(dataset_infos["default"] , _UpperCAmelCase ), getattr(expected_dataset_infos["default"] , _UpperCAmelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCAmelCase , _UpperCAmelCase )
elif key == "splits":
assert list(_UpperCAmelCase ) == list(_UpperCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 127 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase : List[str] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def A_ ( _UpperCAmelCase=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] = subparsers.add_parser("tpu-config" , description=_description )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE_: Any = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_UpperCAmelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_UpperCAmelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
SCREAMING_SNAKE_CASE_: Tuple = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_UpperCAmelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE_: Any = defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE_: int = defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE_: List[str] = defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE_: Dict = defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE_: Tuple = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE_: str = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
SCREAMING_SNAKE_CASE_: Dict = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE_: Dict = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
SCREAMING_SNAKE_CASE_: List[str] = "; ".join(_UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE_: int = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(_UpperCAmelCase )}" )
return
subprocess.run(_UpperCAmelCase )
print("Successfully setup pod." )
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[str] = tpu_command_parser()
SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args()
tpu_command_launcher(_UpperCAmelCase )
| 127 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase = tf_top_k_top_p_filtering(_A , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase = output[output != -float('''inf''' )]
UpperCAmelCase = tf.cast(
tf.where(tf.not_equal(_A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_A , _A , rtol=1E-12 )
tf.debugging.assert_equal(_A , _A )
@require_tf
class A_ (unittest.TestCase , __A ):
if is_tf_available():
UpperCAmelCase__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = 2
UpperCAmelCase = 2
class A_ (tf.Module ):
def __init__( self , _A ):
'''simple docstring'''
super(_A , self ).__init__()
UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_A , )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase = [[2, 0], [1_0_2, 1_0_3]]
UpperCAmelCase = [[1, 0], [1, 1]]
UpperCAmelCase = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase = tf.saved_model.load(_A ).signatures["serving_default"]
for batch_size in range(1 , len(_A ) + 1 ):
UpperCAmelCase = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase = serving_func(**_A )["sequences"]
UpperCAmelCase = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = 1
UpperCAmelCase = 2
class A_ (tf.Module ):
def __init__( self , _A ):
'''simple docstring'''
super(_A , self ).__init__()
UpperCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_A , )
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase = [[2], [1_0_2, 1_0_3]]
UpperCAmelCase = [[1], [1, 1]]
UpperCAmelCase = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={'''serving_default''': dummy_model.serving} )
UpperCAmelCase = tf.saved_model.load(_A ).signatures["serving_default"]
for input_row in range(len(_A ) ):
UpperCAmelCase = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase = serving_func(**_A )["sequences"]
UpperCAmelCase = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
@require_tensorflow_text
def _lowercase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_A )
class A_ (tf.keras.layers.Layer ):
def __init__( self ):
'''simple docstring'''
super().__init__()
UpperCAmelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_A , '''spiece.model''' ) , '''rb''' ).read() )
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def _lowercase ( self , _A , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.tokenize(_A )
UpperCAmelCase = text.pad_model_inputs(
_A , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase = self.model.generate(input_ids=_A , attention_mask=_A )
return self.tokenizer.detokenize(_A )
UpperCAmelCase = CompleteSentenceTransformer()
UpperCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
UpperCAmelCase = complete_model(_A )
UpperCAmelCase = tf.keras.Model(_A , _A )
keras_model.save(_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 1_0,
"temperature": 0.7,
}
UpperCAmelCase = 1_4
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = "Hello, my dog is cute and"
UpperCAmelCase = tokenizer(_A , return_tensors='''tf''' )
UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase = [6_3_8, 1_9_8]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
UpperCAmelCase = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase = "Hugging Face is a technology company based in New York and Paris."
UpperCAmelCase = bart_tokenizer(_A , return_tensors='''tf''' ).input_ids
UpperCAmelCase = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase = bart_model.generate(_A ).numpy()
class A_ (__A ):
def _lowercase ( self , _A , _A=None , **_A ):
'''simple docstring'''
return super().call(_A , **_A )
UpperCAmelCase = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
UpperCAmelCase = bart_model.generate(_A , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_A , _A ) )
class A_ (bart_model.model.encoder.__class__ ):
def _lowercase ( self , _A , **_A ):
'''simple docstring'''
return super().call(_A , **_A )
UpperCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase = bart_model.generate(_A ).numpy()
with self.assertRaises(_A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_A , foo='''bar''' )
| 273 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 76 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
lowerCAmelCase__ = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __lowerCamelCase ( ):
lowerCAmelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCAmelCase__ = False
# source code of `config_class`
lowerCAmelCase__ = inspect.getsource(lowerCAmelCase__ )
lowerCAmelCase__ = _re_checkpoint.findall(lowerCAmelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ = True
break
lowerCAmelCase__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowerCAmelCase__ = '\n'.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 351 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a : Dict = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
a : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
a : Optional[Any] = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[str] ):
"""simple docstring"""
return float((preds == labels).mean() )
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[str] = simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: Tuple = float(fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[str] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = np.array(lowerCAmelCase__ )
UpperCAmelCase_: Optional[int] = np.array(lowerCAmelCase__ )
UpperCAmelCase_: Optional[Any] = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase_: Any = en_sentvecs - np.mean(lowerCAmelCase__ , axis=0 )
UpperCAmelCase_: List[str] = in_sentvecs - np.mean(lowerCAmelCase__ , axis=0 )
UpperCAmelCase_: List[Any] = cdist(lowerCAmelCase__ , lowerCAmelCase__ , """cosine""" )
UpperCAmelCase_: int = np.array(range(lowerCAmelCase__ ) )
UpperCAmelCase_: Union[str, Any] = sim.argsort(axis=1 )[:, :1_0]
UpperCAmelCase_: int = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def __snake_case (self ) -> int:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ), codebase_urls=[], reference_urls=[], format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 147 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a : List[str] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147 | 1 |
from math import pi
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> float:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 355 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> int:
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE_ = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCAmelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCAmelCase , **__UpperCAmelCase )
return wrapper
| 210 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
snake_case__ : Optional[Any] = ['''pixel_values''']
def __init__( self : str , __lowerCamelCase : Any = True , __lowerCamelCase : Union[str, Any] = 32 , __lowerCamelCase : Dict=PILImageResampling.BILINEAR , __lowerCamelCase : Optional[int] = True , **__lowerCamelCase : List[Any] , ):
UpperCamelCase :Any = do_resize
UpperCamelCase :Dict = do_rescale
UpperCamelCase :List[Any] = size_divisor
UpperCamelCase :Tuple = resample
super().__init__(**A__ )
def _A ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] = None , **__lowerCamelCase : str ):
UpperCamelCase , UpperCamelCase :Optional[Any] = get_image_size(A__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase :Optional[int] = height // size_divisor * size_divisor
UpperCamelCase :Optional[Any] = width // size_divisor * size_divisor
UpperCamelCase :Optional[Any] = resize(A__ , (new_h, new_w) , resample=A__ , data_format=A__ , **A__ )
return image
def _A ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] = None , **__lowerCamelCase : Dict ):
return rescale(image=A__ , scale=A__ , data_format=A__ , **A__ )
def _A ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : Any = None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str] = None , __lowerCamelCase : str = None , __lowerCamelCase : Union[str, Any] = ChannelDimension.FIRST , **__lowerCamelCase : Tuple , ):
UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase :Dict = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase :Optional[int] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
UpperCamelCase :List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
UpperCamelCase :List[Any] = [to_numpy_array(A__ ) for img in images]
if do_resize:
UpperCamelCase :str = [self.resize(A__ , size_divisor=A__ , resample=A__ ) for image in images]
if do_rescale:
UpperCamelCase :Any = [self.rescale(A__ , scale=1 / 255 ) for image in images]
UpperCamelCase :str = [to_channel_dimension_format(A__ , A__ ) for image in images]
UpperCamelCase :Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 38 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ :int = logging.get_logger(__name__)
lowercase__ :Any = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] ='''poolformer'''
def __init__( self ,A__=3 ,A__=1_6 ,A__=1_6 ,A__=3 ,A__=4.0 ,A__=[2, 2, 6, 2] ,A__=[6_4, 1_2_8, 3_2_0, 5_1_2] ,A__=[7, 3, 3, 3] ,A__=[4, 2, 2, 2] ,A__=[2, 1, 1, 1] ,A__=4 ,A__=0.0 ,A__="gelu" ,A__=True ,A__=1E-5 ,A__=0.02 ,**A__ ,):
lowercase = num_channels
lowercase = patch_size
lowercase = stride
lowercase = padding
lowercase = pool_size
lowercase = hidden_sizes
lowercase = mlp_ratio
lowercase = depths
lowercase = patch_sizes
lowercase = strides
lowercase = num_encoder_blocks
lowercase = drop_path_rate
lowercase = hidden_act
lowercase = use_layer_scale
lowercase = layer_scale_init_value
lowercase = initializer_range
super().__init__(**A__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[Any] =version.parse('''1.11''' )
@property
def A__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def A__ ( self):
return 2E-3
| 101 | 0 |
'''simple docstring'''
from math import ceil, sqrt
def _snake_case ( A = 1000000 ) -> int:
lowerCAmelCase__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCAmelCase__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCAmelCase__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 228 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
__UpperCAmelCase = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def _snake_case ( A , A ) -> Optional[Any]:
lowerCAmelCase__ = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase__ = int(re.match(R'''.*layer_(\d*).*''' , A )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def _snake_case ( A ) -> Optional[int]:
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase__ = re.search(R'''[^\d](\d+)$''' , str(A ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
lowerCAmelCase__ = int(bit_search.groups()[0] )
return bit_size // 8
def _snake_case ( A , A , A , A , A ) -> Dict:
# Construct model
if bloom_config_file == "":
lowerCAmelCase__ = BloomConfig()
else:
lowerCAmelCase__ = BloomConfig.from_json_file(A )
if shard_model:
lowerCAmelCase__ = os.listdir(A )
lowerCAmelCase__ = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
lowerCAmelCase__ = {'''weight_map''': {}, '''metadata''': {}}
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomConfig()
for j, file in enumerate(A ):
print('''Processing file: {}'''.format(A ) )
lowerCAmelCase__ = None
for i in range(A ):
# load all TP files
lowerCAmelCase__ = file.replace('''model_00''' , F"""model_0{i}""" )
lowerCAmelCase__ = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(A )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
torch.save(
A , os.path.join(
A , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase__ = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) )
lowerCAmelCase__ = BloomConfig()
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowerCAmelCase__ = total_size
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ = json.dumps(A , indent=2 , sort_keys=A ) + '''\n'''
f.write(A )
else:
lowerCAmelCase__ = BloomModel(A )
lowerCAmelCase__ = os.listdir(A )
lowerCAmelCase__ = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
lowerCAmelCase__ = None
for i, file in enumerate(A ):
lowerCAmelCase__ = None
for i in range(A ):
# load all TP files
lowerCAmelCase__ = file.replace('''model_00''' , F"""model_0{i}""" )
lowerCAmelCase__ = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(A )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
lowerCAmelCase__ = set(other_keys.missing_keys )
else:
lowerCAmelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
lowerCAmelCase__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , A )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
__UpperCAmelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 228 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCamelCase = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple=None ):
if rng is None:
snake_case : Optional[Any] = random.Random()
snake_case : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
snake_case : str = []
for _ in range(__lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
snake_case : List[Any] = np.array(__lowerCamelCase , dtype=jnp.intaa ).reshape(__lowerCamelCase )
return output
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=None ):
snake_case : Any = ids_tensor(__lowerCamelCase , vocab_size=2 , rng=__lowerCamelCase )
# make sure that at least one token is attended to for each batch
snake_case : List[str] = 1
return attn_mask
@require_flax
class UpperCAmelCase :
A__ : Dict = None
A__ : Optional[int] = ()
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Any:
'''simple docstring'''
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
snake_case : str = 2
snake_case : int = inputs["input_ids"].shape[-1] // 2
snake_case : Union[str, Any] = inputs["input_ids"][:max_batch_size, :sequence_length]
snake_case : Tuple = jnp.ones_like(snake_case__ )
snake_case : str = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
snake_case : Any = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
snake_case : Union[str, Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Dict:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Tuple = self._get_input_ids_and_config()
snake_case : Union[str, Any] = False
snake_case : Union[str, Any] = max_length
snake_case : List[Any] = 0
for model_class in self.all_generative_model_classes:
snake_case : List[Any] = model_class(snake_case__ )
snake_case : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case : List[str] = getattr(snake_case__ , snake_case__ )
snake_case : Optional[int] = pt_model_class(snake_case__ ).eval()
snake_case : Tuple = load_flax_weights_in_pytorch_model(snake_case__ , flax_model.params )
snake_case : str = flax_model.generate(snake_case__ ).sequences
snake_case : str = pt_model.generate(torch.tensor(snake_case__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
snake_case : Tuple = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : str = self._get_input_ids_and_config()
snake_case : Union[str, Any] = False
snake_case : List[str] = max_length
for model_class in self.all_generative_model_classes:
snake_case : int = model_class(snake_case__ )
snake_case : Dict = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : str = jit(model.generate )
snake_case : Optional[int] = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[Any] = self._get_input_ids_and_config()
snake_case : Optional[Any] = True
snake_case : int = max_length
for model_class in self.all_generative_model_classes:
snake_case : List[Any] = model_class(snake_case__ )
snake_case : List[str] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[int] = jit(model.generate )
snake_case : int = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : int = self._get_input_ids_and_config()
snake_case : List[str] = False
snake_case : Optional[Any] = max_length
snake_case : List[Any] = 2
for model_class in self.all_generative_model_classes:
snake_case : int = model_class(snake_case__ )
snake_case : Any = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : int = jit(model.generate )
snake_case : Dict = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[Any] = self._get_input_ids_and_config()
snake_case : str = False
snake_case : Optional[int] = max_length
snake_case : Union[str, Any] = 2
snake_case : Optional[int] = 2
for model_class in self.all_generative_model_classes:
snake_case : str = model_class(snake_case__ )
snake_case : Dict = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Any = self._get_input_ids_and_config()
snake_case : int = True
snake_case : Dict = max_length
snake_case : Optional[int] = 0.8
snake_case : Dict = 10
snake_case : Optional[int] = 0.3
snake_case : Tuple = 1
snake_case : Optional[Any] = 8
snake_case : List[Any] = 9
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
snake_case : Union[str, Any] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[int] = jit(model.generate )
snake_case : Any = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[str] = self._get_input_ids_and_config()
snake_case : int = max_length
snake_case : int = 1
snake_case : Optional[int] = 8
snake_case : Any = 9
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
snake_case : int = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : List[Any] = jit(model.generate )
snake_case : Union[str, Any] = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> str:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : List[Any] = self._get_input_ids_and_config()
snake_case : List[Any] = max_length
snake_case : Dict = 2
snake_case : Any = 1
snake_case : str = 8
snake_case : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
snake_case : Union[str, Any] = model_class(snake_case__ )
snake_case : Union[str, Any] = model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[int] = jit(model.generate )
snake_case : Optional[Any] = jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : List[Any] = attention_mask.at[(0, 0)].set(0 )
snake_case : Tuple = False
snake_case : Tuple = max_length
for model_class in self.all_generative_model_classes:
snake_case : Optional[int] = model_class(snake_case__ )
snake_case : str = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : List[str] = jit(model.generate )
snake_case : Dict = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Any = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : List[str] = attention_mask.at[(0, 0)].set(0 )
snake_case : Optional[int] = True
snake_case : Any = max_length
for model_class in self.all_generative_model_classes:
snake_case : str = model_class(snake_case__ )
snake_case : Optional[Any] = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : Optional[Any] = jit(model.generate )
snake_case : List[str] = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _SCREAMING_SNAKE_CASE (self : str ) -> int:
'''simple docstring'''
snake_case , snake_case , snake_case , snake_case : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case : Optional[int] = attention_mask.at[(0, 0)].set(0 )
snake_case : Optional[Any] = 2
snake_case : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case : Union[str, Any] = model_class(snake_case__ )
snake_case : Optional[Any] = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
snake_case : List[Any] = jit(model.generate )
snake_case : str = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
snake_case : List[str] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
snake_case : Any = "Hello world"
snake_case : str = tokenizer(snake_case__ , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(snake_case__ , "do_samples" ):
model.generate(snake_case__ , do_samples=snake_case__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(snake_case__ , "foo" ):
snake_case : Optional[Any] = {"foo": "bar"}
model.generate(snake_case__ , **snake_case__ )
| 59 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def _lowercase( self , A = "auto" ) -> List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _lowercase( self ) -> Dict:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]:
if isinstance(A , A ):
UpperCAmelCase : List[str] = 1
elif isinstance(A , A ):
UpperCAmelCase : Dict = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
UpperCAmelCase : List[str] = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape
UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 )
UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase : List[str]
if negative_prompt is None:
UpperCAmelCase : Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
UpperCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase : Any = negative_prompt
UpperCAmelCase : Dict = text_input_ids.shape[-1]
UpperCAmelCase : List[Any] = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase : int = uncond_embeddings.shape[1]
UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 )
UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase : Dict = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
UpperCAmelCase : int = torch.randn(
A , generator=A , device=self.device , dtype=A )
UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCAmelCase : Optional[Any] = latents_reference.to(self.device )
UpperCAmelCase : Tuple = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx
UpperCAmelCase : List[str] = 0 if dy < 0 else dy
UpperCAmelCase : Union[str, Any] = max(-dx , 0 )
UpperCAmelCase : List[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = self.scheduler.scale_model_input(A , A )
# predict the noise residual
UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 )
UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents
UpperCAmelCase : Tuple = self.vae.decode(A ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
UpperCAmelCase , UpperCAmelCase : int = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase : Any = None
if output_type == "pil":
UpperCAmelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 265 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case_ :
@staticmethod
def __UpperCamelCase ( *lowercase_ : Optional[Any] , **lowercase_ : Dict ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class snake_case_ ( unittest.TestCase ):
__A : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCamelCase ( self : Any , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[str] ) -> Any:
lowercase__ : List[Any] = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCamelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ) -> Tuple:
lowercase__ : str = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
"score": ANY(lowercase_ ),
"label": ANY(lowercase_ ),
"box": {"xmin": ANY(lowercase_ ), "ymin": ANY(lowercase_ ), "xmax": ANY(lowercase_ ), "ymax": ANY(lowercase_ )},
} , )
import datasets
lowercase__ : List[str] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowercase__ : int = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
lowercase__ : List[Any] = object_detector(lowercase_ , threshold=0.0 )
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
"score": ANY(lowercase_ ),
"label": ANY(lowercase_ ),
"box": {"xmin": ANY(lowercase_ ), "ymin": ANY(lowercase_ ), "xmax": ANY(lowercase_ ), "ymax": ANY(lowercase_ )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@require_torch
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : Tuple = "hf-internal-testing/tiny-detr-mobilenetsv3"
lowercase__ : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(lowercase_ )
lowercase__ : List[Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
lowercase__ : Dict = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
] , )
lowercase__ : Dict = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowercase__ : List[Any] = "facebook/detr-resnet-50"
lowercase__ : int = AutoModelForObjectDetection.from_pretrained(lowercase_ )
lowercase__ : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
lowercase__ : List[str] = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : Any = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
lowercase__ : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : Dict ) -> Dict:
lowercase__ : List[Any] = "facebook/detr-resnet-50"
lowercase__ : Tuple = pipeline("object-detection" , model=lowercase_ )
lowercase__ : Dict = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
lowercase__ : List[Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = 0.99_85
lowercase__ : int = "facebook/detr-resnet-50"
lowercase__ : Tuple = pipeline("object-detection" , model=lowercase_ )
lowercase__ : Optional[int] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
lowercase__ : List[str] = "Narsil/layoutlmv3-finetuned-funsd"
lowercase__ : List[Any] = 0.99_93
lowercase__ : Optional[Any] = pipeline("object-detection" , model=lowercase_ , threshold=lowercase_ )
lowercase__ : Optional[Any] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
] , )
| 369 |
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 0 |
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = []
__UpperCamelCase :List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__UpperCamelCase :Optional[Any] = result + left + right
return input_list
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return input_list
__UpperCamelCase :Dict = list(SCREAMING_SNAKE_CASE__ )
# iteration for two-way merging
__UpperCamelCase :Optional[int] = 2
while p <= len(SCREAMING_SNAKE_CASE__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase :List[Any] = i
__UpperCamelCase :Optional[int] = i + p - 1
__UpperCamelCase :Optional[Any] = (low + high + 1) // 2
__UpperCamelCase :Optional[Any] = merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# final merge of last two parts
if p * 2 >= len(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase :Tuple = i
__UpperCamelCase :Any = merge(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__lowercase = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
__lowercase = []
else:
__lowercase = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 43 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class __snake_case ( lowerCAmelCase ):
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.framework == "tf":
lowercase : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = self.get_masked_index(snake_case )
lowercase : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,f"No mask_token ({self.tokenizer.mask_token}) found on the input" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
if return_tensors is None:
lowercase : int = self.framework
lowercase : Optional[Any] = self.tokenizer(snake_case ,return_tensors=snake_case )
self.ensure_exactly_one_mask_token(snake_case )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.model(**snake_case )
lowercase : Tuple = model_inputs["""input_ids"""]
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ,snake_case=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase : str = target_ids.shape[0]
lowercase : Optional[Any] = model_outputs["""input_ids"""][0]
lowercase : List[str] = model_outputs["""logits"""]
if self.framework == "tf":
lowercase : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase : Tuple = outputs.numpy()
lowercase : Tuple = outputs[0, masked_index, :]
lowercase : Any = stable_softmax(snake_case ,axis=-1 )
if target_ids is not None:
lowercase : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case ,0 ) ,target_ids.reshape(-1 ,1 ) )
lowercase : int = tf.expand_dims(snake_case ,0 )
lowercase : Tuple = tf.math.top_k(snake_case ,k=snake_case )
lowercase , lowercase : int = topk.values.numpy(), topk.indices.numpy()
else:
lowercase : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase : Union[str, Any] = outputs[0, masked_index, :]
lowercase : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase : List[str] = probs[..., target_ids]
lowercase , lowercase : Union[str, Any] = probs.topk(snake_case )
lowercase : Any = []
lowercase : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
lowercase : Dict = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
lowercase : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowercase : Union[str, Any] = target_ids[p].tolist()
lowercase : Tuple = p
# Filter padding out:
lowercase : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase : Tuple = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case )
lowercase : Optional[Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(snake_case )
result.append(snake_case )
if single_mask:
return result[0]
return result
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
lowercase : List[Any] = [targets]
try:
lowercase : List[str] = self.tokenizer.get_vocab()
except Exception:
lowercase : Any = {}
lowercase : Dict = []
for target in targets:
lowercase : Dict = vocab.get(snake_case ,snake_case )
if id_ is None:
lowercase : Optional[int] = self.tokenizer(
snake_case ,add_special_tokens=snake_case ,return_attention_mask=snake_case ,return_token_type_ids=snake_case ,max_length=1 ,truncation=snake_case ,)["""input_ids"""]
if len(snake_case ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
lowercase : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowercase : Optional[Any] = list(set(snake_case ) )
if len(snake_case ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
lowercase : Optional[Any] = np.array(snake_case )
return target_ids
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ):
'''simple docstring'''
lowercase : Dict = {}
if targets is not None:
lowercase : str = self.get_target_ids(snake_case ,snake_case )
lowercase : List[Any] = target_ids
if top_k is not None:
lowercase : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,"""The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Tuple = super().__call__(snake_case ,**snake_case )
if isinstance(snake_case ,snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
| 20 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase_ : List[Any] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCamelCase_ : Optional[int] = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
@lru_cache()
def A__ ( ) -> Dict:
UpperCamelCase_: Dict = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase_: Optional[int] = bs[:]
UpperCamelCase_: List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase )
cs.append(2**8 + n )
n += 1
UpperCamelCase_: Dict = [chr(lowerCamelCase ) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase ) )
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: int = set()
UpperCamelCase_: Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_: Tuple = char
return pairs
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : str="replace" , snake_case_ : Any="<s>" , snake_case_ : Optional[int]="</s>" , snake_case_ : Union[str, Any]="</s>" , snake_case_ : List[Any]="<s>" , snake_case_ : List[Any]="<unk>" , snake_case_ : Any="<pad>" , snake_case_ : Union[str, Any]="<mask>" , snake_case_ : Union[str, Any]=False , **snake_case_ : Optional[Any] , ):
UpperCamelCase_: Optional[int] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token
UpperCamelCase_: List[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token
UpperCamelCase_: Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token
UpperCamelCase_: Optional[Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token
UpperCamelCase_: Any = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token
UpperCamelCase_: Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_: Union[str, Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase_: str = json.load(snake_case_ )
UpperCamelCase_: Tuple = {v: k for k, v in self.encoder.items()}
UpperCamelCase_: List[Any] = errors # how to handle errors in decoding
UpperCamelCase_: List[str] = bytes_to_unicode()
UpperCamelCase_: int = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase_: Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase_: Any = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase_: List[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: List[str] = {}
UpperCamelCase_: int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase_: Any = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return len(self.encoder )
def lowerCAmelCase__ ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : List[str] ):
if token in self.cache:
return self.cache[token]
UpperCamelCase_: Optional[int] = tuple(snake_case_ )
UpperCamelCase_: List[Any] = get_pairs(snake_case_ )
if not pairs:
return token
while True:
UpperCamelCase_: Any = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_, UpperCamelCase_: int = bigram
UpperCamelCase_: Optional[Any] = []
UpperCamelCase_: Dict = 0
while i < len(snake_case_ ):
try:
UpperCamelCase_: Union[str, Any] = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_: Optional[Any] = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_: Any = tuple(snake_case_ )
UpperCamelCase_: Tuple = new_word
if len(snake_case_ ) == 1:
break
else:
UpperCamelCase_: Dict = get_pairs(snake_case_ )
UpperCamelCase_: Optional[Any] = """ """.join(snake_case_ )
UpperCamelCase_: Optional[int] = word
return word
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int ):
UpperCamelCase_: Tuple = []
for token in re.findall(self.pat , snake_case_ ):
UpperCamelCase_: Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Dict ):
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str ):
return self.decoder.get(snake_case_ )
def lowerCAmelCase__ ( self : Any , snake_case_ : List[str] ):
UpperCamelCase_: str = """""".join(snake_case_ )
UpperCamelCase_: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_: Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: int = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + """\n""" )
UpperCamelCase_: List[Any] = 0
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase_: Optional[int] = token_index
writer.write(""" """.join(snake_case_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_: Tuple = [self.cls_token_id]
UpperCamelCase_: Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase__ ( self : Tuple , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: Optional[int] = [self.sep_token_id]
UpperCamelCase_: Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : str , snake_case_ : List[str] , snake_case_ : Any=False , **snake_case_ : Optional[Any] ):
UpperCamelCase_: Tuple = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()):
UpperCamelCase_: Tuple = """ """ + text
return (text, kwargs)
| 223 |
def A__ ( lowerCamelCase ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 223 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__A : List[str] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def UpperCamelCase_ ( A__ : List[Any] ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowerCAmelCase_ : Tuple = k.replace(__snake_case , __snake_case )
return k
def UpperCamelCase_ ( A__ : Optional[int] , A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : Dict = DEFAULTS.copy()
cfg_kwargs.update(__snake_case )
lowerCAmelCase_ : Any = PegasusConfig(**__snake_case )
lowerCAmelCase_ : Optional[Any] = PegasusForConditionalGeneration(__snake_case )
lowerCAmelCase_ : Optional[Any] = torch_model.model.state_dict()
lowerCAmelCase_ : Tuple = {}
for k, v in tf_weights.items():
lowerCAmelCase_ : Any = rename_state_dict_key(__snake_case )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
lowerCAmelCase_ : Any = v.T
lowerCAmelCase_ : Dict = torch.tensor(__snake_case , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
lowerCAmelCase_ : List[str] = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowerCAmelCase_ : List[str] = mapping["""shared.weight"""]
lowerCAmelCase_ : Tuple = mapping["""shared.weight"""]
lowerCAmelCase_ : Optional[int] = {k: torch.zeros_like(__snake_case ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__snake_case )
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = torch_model.model.load_state_dict(__snake_case , strict=__snake_case )
lowerCAmelCase_ : Union[str, Any] = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def UpperCamelCase_ ( A__ : str="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
lowerCAmelCase_ : int = tf.train.list_variables(__snake_case )
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : Tuple = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__snake_case , desc="""converting tf checkpoint to dict""" ):
lowerCAmelCase_ : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCAmelCase_ : Optional[Any] = tf.train.load_variable(__snake_case , __snake_case )
lowerCAmelCase_ : Optional[int] = array
return tf_weights
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Any = Path(__snake_case ).parent.name
lowerCAmelCase_ : Tuple = task_specific_params[f'summarization_{dataset}']["""max_position_embeddings"""]
lowerCAmelCase_ : List[str] = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__snake_case )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__snake_case )
# convert model
lowerCAmelCase_ : str = get_tf_weights_as_numpy(__snake_case )
lowerCAmelCase_ : List[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
lowerCAmelCase_ : Tuple = task_specific_params
lowerCAmelCase_ : Dict = convert_pegasus(__snake_case , __snake_case )
torch_model.save_pretrained(__snake_case )
lowerCAmelCase_ : Optional[int] = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__snake_case , Path(__snake_case ) / """pytorch_model.bin""" )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__A : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
__A : Optional[int] = Path(args.tf_ckpt_path).parent.name
__A : Optional[int] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 120 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> str:
"""simple docstring"""
with open(__snake_case ) as metadata_file:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = LukeConfig(use_entity_aware_attention=__snake_case, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCamelCase = torch.load(__snake_case, map_location='''cpu''' )
# Load the entity vocab file
_UpperCamelCase = load_entity_vocab(__snake_case )
_UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase = AddedToken('''<ent>''', lstrip=__snake_case, rstrip=__snake_case )
_UpperCamelCase = AddedToken('''<ent2>''', lstrip=__snake_case, rstrip=__snake_case )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__snake_case )
with open(os.path.join(__snake_case, LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(__snake_case, __snake_case )
_UpperCamelCase = LukeTokenizer.from_pretrained(__snake_case )
# Initialize the embeddings of the special tokens
_UpperCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase = state_dict[prefix + matrix_name]
_UpperCamelCase = state_dict[prefix + matrix_name]
_UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_UpperCamelCase = LukeModel(config=__snake_case ).eval()
_UpperCamelCase , _UpperCamelCase = model.load_state_dict(__snake_case, strict=__snake_case )
if not (len(__snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(__snake_case )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
_UpperCamelCase = LukeTokenizer.from_pretrained(__snake_case, task='''entity_classification''' )
_UpperCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCamelCase = (39, 42)
_UpperCamelCase = tokenizer(__snake_case, entity_spans=[span], add_prefix_space=__snake_case, return_tensors='''pt''' )
_UpperCamelCase = model(**__snake_case )
# Verify word hidden states
if model_size == "large":
_UpperCamelCase = torch.Size((1, 42, 10_24) )
_UpperCamelCase = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCamelCase = torch.Size((1, 42, 7_68) )
_UpperCamelCase = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], __snake_case, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCamelCase = torch.Size((1, 1, 10_24) )
_UpperCamelCase = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCamelCase = torch.Size((1, 1, 7_68) )
_UpperCamelCase = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], __snake_case, atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__snake_case ) )
model.save_pretrained(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
with open(__snake_case, '''r''', encoding='''utf-8''' ) as f:
for index, line in enumerate(__snake_case ):
_UpperCamelCase , _UpperCamelCase = line.rstrip().split('''\t''' )
_UpperCamelCase = index
return entity_vocab
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_a = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 194 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __A ( _lowercase , _lowercase=False ):
'''simple docstring'''
try:
_A = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_A = default
else:
# KEY is set, convert it to True or False.
try:
_A = strtobool(UpperCAmelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
__A = parse_flag_from_env('RUN_SLOW', default=False)
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skip('''Test was skipped''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(UpperCAmelCase__ )
def __A ( _lowercase=None , _lowercase=None ):
'''simple docstring'''
if test_case is None:
return partial(UpperCAmelCase__ , version=UpperCAmelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , UpperCAmelCase__ ) , f"""test requires torch version >= {version}""" )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(UpperCAmelCase__ )
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(UpperCAmelCase__ )
__A = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __A ( _lowercase ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(UpperCAmelCase__ )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
A_ = True
@classmethod
def __A ( cls: List[str] ) -> Union[str, Any]:
_A = tempfile.mkdtemp()
@classmethod
def __A ( cls: List[Any] ) -> Any:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __A ( self: Optional[Any] ) -> List[str]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Dict ) -> Dict:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Dict , __A: Any ) -> Optional[int]:
_A = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __A ( _lowercase ):
'''simple docstring'''
_A = AcceleratorState()
_A = tensor[None].clone().to(state.device )
_A = gather(UpperCAmelCase__ ).cpu()
_A = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCAmelCase__ ):
return False
return True
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Any , __A: List[Any] , __A: Any , __A: Tuple ) -> Dict:
_A = returncode
_A = stdout
_A = stderr
async def __A ( _lowercase , _lowercase ):
'''simple docstring'''
while True:
_A = await stream.readline()
if line:
callback(UpperCAmelCase__ )
else:
break
async def __A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(UpperCAmelCase__ ) )
_A = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCAmelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCAmelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_A = []
_A = []
def tee(_lowercase , _lowercase , _lowercase , _lowercase="" ):
_A = line.decode('''utf-8''' ).rstrip()
sink.append(UpperCAmelCase__ )
if not quiet:
print(UpperCAmelCase__ , UpperCAmelCase__ , file=UpperCAmelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowercase : tee(UpperCAmelCase__ , UpperCAmelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _lowercase : tee(UpperCAmelCase__ , UpperCAmelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=UpperCAmelCase__ , )
return _RunOutput(await p.wait() , UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=1_80 , _lowercase=False , _lowercase=True ):
'''simple docstring'''
_A = asyncio.get_event_loop()
_A = loop.run_until_complete(
_stream_subprocess(UpperCAmelCase__ , env=UpperCAmelCase__ , stdin=UpperCAmelCase__ , timeout=UpperCAmelCase__ , quiet=UpperCAmelCase__ , echo=UpperCAmelCase__ ) )
_A = ''' '''.join(UpperCAmelCase__ )
if result.returncode > 0:
_A = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class SCREAMING_SNAKE_CASE ( _snake_case ):
"""simple docstring"""
pass
def __A ( _lowercase , _lowercase=False ):
'''simple docstring'''
try:
_A = subprocess.check_output(UpperCAmelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCAmelCase__ , '''decode''' ):
_A = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(UpperCAmelCase__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 362 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Union[str, Any] ) -> Union[str, Any]:
_A = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_A = dict(zip(__A , range(len(__A ) ) ) )
_A = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_A = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A = os.path.join(self.tmpdirname , __A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
# load decoder from hub
_A = '''hf-internal-testing/ngram-beam-search-decoder'''
def __A ( self: Tuple , **__A: str ) -> str:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(__A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A )
def __A ( self: Any , **__A: List[Any] ) -> Union[str, Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A )
def __A ( self: List[Any] , **__A: Union[str, Any] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A )
def __A ( self: List[str] ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __A ( self: List[str] ) -> Optional[Any]:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __A )
def __A ( self: Optional[int] ) -> Union[str, Any]:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __A ( self: str ) -> Any:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __A ( self: List[str] ) -> str:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = floats_list((3, 10_00) )
_A = feature_extractor(__A , return_tensors='''np''' )
_A = processor(__A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self: Union[str, Any] ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = '''This is a test string'''
_A = processor(text=__A )
_A = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self: List[str] , __A: Optional[int]=(2, 10, 16) , __A: Optional[int]=77 ) -> List[Any]:
np.random.seed(__A )
return np.random.rand(*__A )
def __A ( self: List[Any] ) -> Optional[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(__A )
_A = decoder.decode_beams(__A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def __A ( self: str , __A: Any ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(__A )
else:
with get_context(__A ).Pool() as pool:
_A = processor.batch_decode(__A , __A )
_A = list(__A )
with get_context('''fork''' ).Pool() as p:
_A = decoder.decode_beams_batch(__A , __A )
_A ,_A ,_A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__A , decoded_processor.logit_score )
self.assertListEqual(__A , decoded_processor.lm_score )
def __A ( self: Optional[Any] ) -> int:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
__A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = decoded_processor_out.text
_A = list(__A )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __A )
self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __A , atol=1e-3 ) )
self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __A , atol=1e-3 ) )
def __A ( self: Optional[int] ) -> Dict:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
__A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
_A = decoded_processor_out.text
_A = list(__A )
decoder.reset_params(
alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
with get_context('''fork''' ).Pool() as pool:
_A = decoder.decode_beams_batch(
__A , __A , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __A )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __A )
def __A ( self: int ) -> Optional[Any]:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__A , __A )
def __A ( self: Tuple ) -> Any:
_A = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_A = WavaVecaProcessorWithLM.from_pretrained(__A )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_A = os.listdir(__A )
_A = os.listdir(__A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__A , __A )
def __A ( self: List[str] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(__A , return_tensors='''np''' )
_A = processor_auto(__A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(__A )
_A = processor_auto.batch_decode(__A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __A ( self: Optional[int] ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def __A ( __A: int , __A: List[str] ) -> Union[str, Any]:
_A = [d[key] for d in offsets]
return retrieved_list
def __A ( self: Optional[Any] ) -> int:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()[0]
_A = processor.decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def __A ( self: Optional[Any] ) -> Tuple:
_A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_A = self._get_dummy_logits()
_A = processor.batch_decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __A ( self: Optional[Any] ) -> Optional[Any]:
import torch
_A = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__A )
_A = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(__A )
_A = next(__A )
_A = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_A = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_A = model(__A ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=__A )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_A = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , __A )
self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(__A , '''start_time''' ) )
_A = torch.tensor(self.get_from_offsets(__A , '''end_time''' ) )
# fmt: off
_A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
_A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
| 75 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase : Any = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
lowerCamelCase : int = parser.parse_args()
if args.check_lib:
lowerCamelCase : Optional[int] = importlib.import_module('transformers')
lowerCamelCase : List[str] = Path(transformers_module.__file__).parent
else:
lowerCamelCase : Optional[int] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 124 |
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not isinstance(lowercase ,lowercase ):
snake_case : List[Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase )
if number < 1:
snake_case : int = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
snake_case : Any = int(math.log(number // 3 ,2 ) ) + 2
snake_case : List[Any] = [3, 5]
snake_case : Optional[int] = 2
snake_case : Union[str, Any] = 3
for block in range(1 ,lowercase ):
for _ in range(lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
lowerCamelCase : Optional[Any] = 0
try:
lowerCamelCase : Tuple = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 124 | 1 |
import math
import tensorflow as tf
from packaging import version
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(math.pi , x.dtype )
lowerCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__ , 3 )) ))
return x * cdf
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCamelCase ( lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return tf.clip_by_value(_gelu(lowerCamelCase__ ) , -10 , 10 )
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int]=-1 ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = tf.split(lowerCamelCase__ , 2 , axis=lowerCamelCase__ )
return a * tf.math.sigmoid(lowerCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
return tf.keras.activations.gelu(lowerCamelCase__ , approximate=lowerCamelCase__ )
UpperCAmelCase : Union[str, Any] = tf.keras.activations.gelu
UpperCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
UpperCAmelCase : List[Any] = _gelu
UpperCAmelCase : str = _gelu_new
UpperCAmelCase : Union[str, Any] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 66 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = 0
if start < end:
lowerCamelCase = randint(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = a[end]
lowerCamelCase = a[pivot]
lowerCamelCase = temp
lowerCamelCase , lowerCamelCase = _in_place_partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
count += _in_place_quick_sort(lowerCamelCase__ , lowerCamelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCamelCase__ , p + 1 , lowerCamelCase__ )
return count
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = randint(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = a[end]
lowerCamelCase = a[pivot]
lowerCamelCase = temp
lowerCamelCase = start - 1
for index in range(lowerCamelCase__ , lowerCamelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCamelCase = new_pivot_index + 1
lowerCamelCase = a[new_pivot_index]
lowerCamelCase = a[index]
lowerCamelCase = temp
lowerCamelCase = a[new_pivot_index + 1]
lowerCamelCase = a[end]
lowerCamelCase = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_00 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : Optional[int] = 0, 1 # mean and standard deviation
UpperCAmelCase : List[str] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : List[Any] = np.load(outfile)
UpperCAmelCase : Optional[Any] = len(M) - 1
UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 66 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def A_ ( *snake_case , **snake_case ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Union[str, Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase : Dict = len(snake_case )
self.assertGreater(snake_case , 0 )
self.assertEqual(
snake_case , [
{
"score": ANY(snake_case ),
"label": ANY(snake_case ),
"box": {"xmin": ANY(snake_case ), "ymin": ANY(snake_case ), "xmax": ANY(snake_case ), "ymax": ANY(snake_case )},
}
for i in range(snake_case )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase : Optional[Any] = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
] , )
UpperCAmelCase : Tuple = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_0_4, "ymin": 1_6_7, "xmax": 2_3_2, "ymax": 1_9_0}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_7_1, "ymin": 8_3, "xmax": 5_9_8, "ymax": 1_0_3}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
{"score": 0.642, "label": "remote", "box": {"xmin": 6_7, "ymin": 2_7_4, "xmax": 9_3, "ymax": 2_9_7}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_9_4, "ymin": 1_0_5, "xmax": 5_2_1, "ymax": 1_2_7}},
]
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
] , )
UpperCAmelCase : Union[str, Any] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_3_5, "ymin": 7_4, "xmax": 3_7_1, "ymax": 1_8_7}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_4_2, "ymax": 4_7_6}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def A_ ( self ):
'''simple docstring'''
pass
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = 0.2
UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : str = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 5_5, "xmax": 3_1_5, "ymax": 4_7_2}},
] , )
@require_torch
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = pipeline("zero-shot-object-detection" )
UpperCAmelCase : List[str] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case , )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_2_4, "ymin": 2_0, "xmax": 6_4_0, "ymax": 3_7_3}},
{"score": 0.277, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_2, "xmax": 1_7_7, "ymax": 1_1_5}},
] , )
| 311 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( __magic_name__ ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Any = False
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = plt.imshow(__magic_name__ )
fig.axes.get_xaxis().set_visible(__magic_name__ )
fig.axes.get_yaxis().set_visible(__magic_name__ )
plt.show()
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : str = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 311 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCamelCase = 12_8022
lowerCamelCase = 12_8028
@require_sentencepiece
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MaMaaaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def __lowerCamelCase ( self : str):
'''simple docstring'''
super().setUp()
__lowercase =['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
__lowercase =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase))))
__lowercase =Path(self.tmpdirname)
save_json(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowerCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'])
__lowercase =MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : Tuple , **_lowerCAmelCase : List[str]):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : List[str]):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase ='</s>'
__lowercase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase) , _lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.get_tokenizer()
__lowercase =list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '</s>')
self.assertEqual(vocab_keys[1] , '<unk>')
self.assertEqual(vocab_keys[-1] , '<s>')
self.assertEqual(len(_lowerCAmelCase) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('Skip this test while all models are still to be uploaded.')
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.get_tokenizer()
__lowercase =tokenizer.tokenize('This is a test')
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [2, 3, 4, 5, 6] , )
__lowercase =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
__lowercase =tokenizer.convert_tokens_to_string(_lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , 'This is a test')
@slow
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase ={'input_ids': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/m2m100_418M"""
lowerCAmelCase__ = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCAmelCase__ = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCAmelCase__ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __lowerCamelCase ( cls : int):
'''simple docstring'''
__lowercase =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en' , tgt_lang='fr')
__lowercase =1
return cls
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('ar') , 1_2_8_0_0_6)
self.assertEqual(self.tokenizer.get_lang_id('en') , 1_2_8_0_2_2)
self.assertEqual(self.tokenizer.get_lang_id('ro') , 1_2_8_0_7_6)
self.assertEqual(self.tokenizer.get_lang_id('mr') , 1_2_8_0_6_3)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.tokenizer.get_vocab()
self.assertEqual(len(_lowerCAmelCase) , self.tokenizer.vocab_size)
self.assertEqual(vocab['<unk>'] , 3)
self.assertIn(self.tokenizer.get_lang_token('en') , _lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase ='en'
__lowercase =self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids)
# fmt: off
__lowercase =[FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__lowercase =self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase)
__lowercase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =tempfile.mkdtemp()
__lowercase =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =MaMaaaTokenizer.from_pretrained(_lowerCAmelCase)
self.assertDictEqual(new_tok.lang_token_to_id , _lowerCAmelCase)
@require_torch
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase ='en'
__lowercase ='fr'
__lowercase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , return_tensors='pt')
__lowercase =shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
__lowercase =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase ='mr'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
__lowercase ='zh'
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase ='mr'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
__lowercase ='zh'
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar')
self.assertEqual(
nested_simplify(_lowerCAmelCase) , {
# en_XX, A, test, EOS
'input_ids': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 1_2_8_0_0_6,
} , )
| 48 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """wavlm"""
def __init__( self : List[str] , _lowerCAmelCase : List[Any]=3_2 , _lowerCAmelCase : int=7_6_8 , _lowerCAmelCase : Any=1_2 , _lowerCAmelCase : Union[str, Any]=1_2 , _lowerCAmelCase : List[Any]=3_0_7_2 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : List[Any]="group" , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : int=1_2_8 , _lowerCAmelCase : Tuple=1_6 , _lowerCAmelCase : Optional[int]=3_2_0 , _lowerCAmelCase : Union[str, Any]=8_0_0 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any=0.05 , _lowerCAmelCase : List[Any]=1_0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Union[str, Any]=1_0 , _lowerCAmelCase : List[Any]=3_2_0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=1_0_0 , _lowerCAmelCase : Tuple=2_5_6 , _lowerCAmelCase : Union[str, Any]=2_5_6 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple="mean" , _lowerCAmelCase : Any=False , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=2_5_6 , _lowerCAmelCase : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _lowerCAmelCase : Dict=(5, 3, 3, 1, 1) , _lowerCAmelCase : Dict=(1, 2, 3, 1, 1) , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[int]=8_0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Any=3 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase)
__lowercase =hidden_size
__lowercase =feat_extract_norm
__lowercase =feat_extract_activation
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =conv_bias
__lowercase =num_buckets
__lowercase =max_bucket_distance
__lowercase =num_conv_pos_embeddings
__lowercase =num_conv_pos_embedding_groups
__lowercase =len(self.conv_dim)
__lowercase =num_hidden_layers
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =num_attention_heads
__lowercase =hidden_dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =feat_proj_dropout
__lowercase =final_dropout
__lowercase =layerdrop
__lowercase =layer_norm_eps
__lowercase =initializer_range
__lowercase =num_ctc_classes
__lowercase =vocab_size
__lowercase =do_stable_layer_norm
__lowercase =use_weighted_layer_sum
__lowercase =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase =apply_spec_augment
__lowercase =mask_time_prob
__lowercase =mask_time_length
__lowercase =mask_time_min_masks
__lowercase =mask_feature_prob
__lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase =num_codevectors_per_group
__lowercase =num_codevector_groups
__lowercase =contrastive_logits_temperature
__lowercase =num_negatives
__lowercase =codevector_dim
__lowercase =proj_codevector_dim
__lowercase =diversity_loss_weight
# ctc loss
__lowercase =ctc_loss_reduction
__lowercase =ctc_zero_infinity
# adapter
__lowercase =add_adapter
__lowercase =adapter_kernel_size
__lowercase =adapter_stride
__lowercase =num_adapter_layers
__lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =xvector_output_dim
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 48 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'EncodecFeatureExtractor'
__magic_name__ = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , __snake_case , __snake_case ):
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def a_ ( self , __snake_case=None , __snake_case=None , __snake_case=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self , *__snake_case , **__snake_case ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop('''audio''' , __snake_case )
snake_case = kwargs.pop('''sampling_rate''' , __snake_case )
snake_case = kwargs.pop('''text''' , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
snake_case = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
snake_case = audio_inputs['''padding_mask''']
return inputs
def a_ ( self , *__snake_case , **__snake_case ):
snake_case = kwargs.pop('''audio''' , __snake_case )
snake_case = kwargs.pop('''padding_mask''' , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio_values is not None:
return self._decode_audio(__snake_case , padding_mask=__snake_case )
else:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def a_ ( self , *__snake_case , **__snake_case ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = to_numpy(__snake_case )
snake_case , snake_case , snake_case = audio_values.shape
if padding_mask is None:
return list(__snake_case )
snake_case = to_numpy(__snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
snake_case = seq_len - padding_mask.shape[-1]
snake_case = 1 - self.feature_extractor.padding_value
snake_case = np.pad(__snake_case , ((0, 0), (0, difference)) , '''constant''' , constant_values=__snake_case )
snake_case = audio_values.tolist()
for i in range(__snake_case ):
snake_case = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
snake_case = sliced_audio.reshape(__snake_case , -1 )
return audio_values
| 127 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = int(UpperCamelCase_ )
snake_case , snake_case , snake_case = t // 36_00, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_=3_00 ):
"""simple docstring"""
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
snake_case = F'''{elt:.6f}''' if isinstance(UpperCamelCase_ ,UpperCamelCase_ ) else str(UpperCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A__ :
"""simple docstring"""
__magic_name__ = 5
__magic_name__ = 0.2
def __init__( self , __snake_case , __snake_case = None , __snake_case = True , __snake_case = None , __snake_case = 3_0_0 , ):
snake_case = total
snake_case = '''''' if prefix is None else prefix
snake_case = leave
snake_case = parent
snake_case = width
snake_case = None
snake_case = None
snake_case = None
def a_ ( self , __snake_case , __snake_case = False , __snake_case = None ):
snake_case = value
if comment is not None:
snake_case = comment
if self.last_value is None:
snake_case = snake_case = time.time()
snake_case = snake_case = value
snake_case = snake_case = None
snake_case = self.warmup
snake_case = 1
self.update_bar(__snake_case )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
snake_case = time.time()
snake_case = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
snake_case = self.elapsed_time / (value - self.start_value)
else:
snake_case = None
if value >= self.total:
snake_case = self.total
snake_case = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
snake_case = self.average_time_per_item * (self.total - value)
self.update_bar(__snake_case )
snake_case = value
snake_case = current_time
if self.average_time_per_item is None:
snake_case = 1
else:
snake_case = max(int(self.update_every / self.average_time_per_item ) , 1 )
def a_ ( self , __snake_case , __snake_case=None ):
snake_case = ''' ''' * (len(str(self.total ) ) - len(str(__snake_case ) )) + str(__snake_case )
if self.elapsed_time is None:
snake_case = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
snake_case = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
snake_case = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def a_ ( self ):
snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def a_ ( self ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=None ):
super().__init__(__snake_case )
snake_case = None if column_names is None else [column_names]
snake_case = None
def a_ ( self ):
snake_case = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
snake_case = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def a_ ( self , __snake_case ):
if self.inner_table is None:
snake_case = [list(values.keys() ), list(values.values() )]
else:
snake_case = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__snake_case )
snake_case = columns
self.inner_table.append([values[c] for c in columns] )
def a_ ( self , __snake_case , __snake_case=None , __snake_case=3_0_0 ):
snake_case = NotebookProgressBar(__snake_case , prefix=__snake_case , parent=self , width=__snake_case )
return self.child_bar
def a_ ( self ):
snake_case = None
self.display()
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self ):
snake_case = None
snake_case = None
snake_case = False
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
snake_case = 0
snake_case = 0
snake_case = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
snake_case = NotebookTrainingTracker(state.max_steps , __snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
snake_case = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
snake_case = False
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
if not has_length(__snake_case ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
snake_case = self.training_tracker.add_child(len(__snake_case ) )
else:
snake_case = NotebookProgressBar(len(__snake_case ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
if self.prediction_bar is not None:
self.prediction_bar.close()
snake_case = None
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
snake_case = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
snake_case = state.global_step
self.training_tracker.write_line(__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None , **__snake_case ):
if self.training_tracker is not None:
snake_case = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
snake_case = log['''loss''']
break
if self.first_column == "Epoch":
snake_case = int(state.epoch )
else:
snake_case = state.global_step
snake_case = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
snake_case = re.sub(R'''\_loss$''' , '''''' , __snake_case )
snake_case = metrics.pop('''total_flos''' , __snake_case )
snake_case = metrics.pop('''epoch''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_runtime''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __snake_case )
snake_case = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __snake_case )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
snake_case = v
else:
snake_case = k.split('''_''' )
snake_case = ''' '''.join([part.capitalize() for part in splits[1:]] )
snake_case = v
self.training_tracker.write_line(__snake_case )
self.training_tracker.remove_child()
snake_case = None
# Evaluation takes a long time so we should force the next update.
snake_case = True
def a_ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ):
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=__snake_case )
snake_case = None
| 127 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A__ : Tuple ={
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
"""simple docstring"""
_lowerCAmelCase = XLNetConfig.from_json_file(a__ )
_lowerCAmelCase = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase = finetuning_task
_lowerCAmelCase = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase = XLNetForSequenceClassification(a__ )
elif "squad" in finetuning_task:
_lowerCAmelCase = finetuning_task
_lowerCAmelCase = XLNetForQuestionAnswering(a__ )
else:
_lowerCAmelCase = XLNetLMHeadModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ , a__ , a__ )
# Save pytorch-model
_lowerCAmelCase = os.path.join(a__ , a__ )
_lowerCAmelCase = os.path.join(a__ , a__ )
print(f"Save PyTorch model to {os.path.abspath(a__ )}" )
torch.save(model.state_dict() , a__ )
print(f"Save configuration file to {os.path.abspath(a__ )}" )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
A__ : int =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 361 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
A__ : List[str] ={
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Dict = '''facebook/nllb-200-distilled-600M'''
_lowercase: int = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowercase: Any = '''translator'''
_lowercase: Optional[int] = AutoTokenizer
_lowercase: str = AutoModelForSeqaSeqLM
_lowercase: List[Any] = LANGUAGE_CODES
_lowercase: Tuple = ['''text''', '''text''', '''text''']
_lowercase: List[str] = ['''text''']
def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> Optional[Any]:
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
_lowerCAmelCase = self.lang_to_code[src_lang]
_lowerCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__snake_case , return_tensors="""pt""" , src_lang=__snake_case , tgt_lang=__snake_case )
def lowercase__ ( self : Optional[int] , __snake_case : Any ) -> List[str]:
return self.model.generate(**__snake_case )
def lowercase__ ( self : Dict , __snake_case : List[Any] ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__snake_case )
| 220 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Union[str, Any] = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( a , a , a , ) -> float:
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
a , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _a ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = IFInpaintingSuperResolutionPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def snake_case ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def snake_case ( self : Union[str, Any], lowerCAmelCase__ : Optional[Any], lowerCAmelCase__ : Union[str, Any]=0 ) -> Dict:
'''simple docstring'''
if str(__lowerCamelCase ).startswith('''mps''' ):
_UpperCamelCase : Union[str, Any] = torch.manual_seed(__lowerCamelCase )
else:
_UpperCamelCase : int = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
_UpperCamelCase : str = floats_tensor((1, 3, 1_6, 1_6), rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
_UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
_UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2), rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
_UpperCamelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def snake_case ( self : Dict ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case ( self : List[Any] ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' )
def snake_case ( self : Optional[int] ) -> str:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case ( self : Dict ) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self._test_save_load_local()
def snake_case ( self : int ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 356 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCamelCase_ =5
UpperCamelCase_ =10
@require_sentencepiece
@require_tokenizers
class _a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = SpeechaTextTokenizer
UpperCamelCase = False
UpperCamelCase = True
def snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
_UpperCamelCase : Dict = sp.SentencePieceProcessor()
spm_model.Load(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase__ ) )]
_UpperCamelCase : Tuple = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__ ) ) ) )
_UpperCamelCase : Dict = Path(self.tmpdirname )
save_json(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
_UpperCamelCase : List[str] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = '''<pad>'''
_UpperCamelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ), lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ), lowerCAmelCase__ )
def snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''j''' )
self.assertEqual(len(lowerCAmelCase__ ), 1_0_0_1 )
def snake_case ( self : Any ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_1 )
def snake_case ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
_UpperCamelCase : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ), [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6], )
_UpperCamelCase : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], )
_UpperCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__, [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
_UpperCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], )
@slow
def snake_case ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Any = {'''input_ids''': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', )
@require_sentencepiece
class _a ( unittest.TestCase ):
UpperCamelCase = '''valhalla/s2t_mustc_multilinguial_medium'''
UpperCamelCase = '''C\'est trop cool'''
UpperCamelCase = '''Esto es genial'''
@classmethod
def snake_case ( cls : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def snake_case ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 1_1 )
def snake_case ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size, 1_0_0_0_0 )
def snake_case ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__, self.tokenizer.all_special_ids )
_UpperCamelCase : List[Any] = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
_UpperCamelCase : Optional[Any] = self.tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__, lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase__ )
def snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Dict = '''fr'''
_UpperCamelCase : int = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0], lowerCAmelCase__ )
self.assertEqual(encoded[-1], self.tokenizer.eos_token_id )
def snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] )
_UpperCamelCase : List[str] = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
| 128 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __a ( A__ ):
_lowerCAmelCase : str = (DDPMScheduler,)
def __lowercase ( self : Dict , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
UpperCamelCase__ : int = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowercase ( self : int ):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE , beta_end=SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : int = self.scheduler_classes[0]
UpperCamelCase__ : List[str] = self.get_scheduler_config()
UpperCamelCase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : int = self.scheduler_classes[0]
UpperCamelCase__ : List[str] = self.get_scheduler_config()
UpperCamelCase__ : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = self.dummy_model()
UpperCamelCase__ : str = self.dummy_sample_deter
UpperCamelCase__ : List[Any] = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase__ : List[str] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase__ : int = pred_prev_sample
UpperCamelCase__ : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : str = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.dummy_model()
UpperCamelCase__ : str = self.dummy_sample_deter
UpperCamelCase__ : Dict = torch.manual_seed(0 )
for t in reversed(range(SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
UpperCamelCase__ : List[Any] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase__ : List[str] = pred_prev_sample
UpperCamelCase__ : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[int] = self.get_scheduler_config()
UpperCamelCase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE ):
if i == len(SCREAMING_SNAKE_CASE ) - 1:
UpperCamelCase__ : str = -1
else:
UpperCamelCase__ : List[str] = timesteps[i + 1]
UpperCamelCase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ : List[str] = self.get_scheduler_config()
UpperCamelCase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = [1_00, 87, 50, 1, 0]
UpperCamelCase__ : int = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.scheduler_classes[0]
UpperCamelCase__ : List[Any] = self.get_scheduler_config()
UpperCamelCase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 189 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="shi-labs/oneformer_demo" ) -> Tuple:
with open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
UpperCamelCase__ : Optional[Any] = json.load(__lowerCAmelCase )
UpperCamelCase__ : str = {}
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = []
for key, info in class_info.items():
UpperCamelCase__ : List[str] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(__lowerCAmelCase ) )
UpperCamelCase__ : Dict = thing_ids
UpperCamelCase__ : Optional[int] = class_names
return metadata
class __a ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=7 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Tuple=30 , SCREAMING_SNAKE_CASE : Dict=4_00 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[Any]=10 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : int=2_55 , SCREAMING_SNAKE_CASE : str="shi-labs/oneformer_demo" , SCREAMING_SNAKE_CASE : List[Any]="ade20k_panoptic.json" , SCREAMING_SNAKE_CASE : Tuple=10 , ):
'''simple docstring'''
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : Optional[int] = min_resolution
UpperCamelCase__ : Union[str, Any] = max_resolution
UpperCamelCase__ : Optional[int] = do_resize
UpperCamelCase__ : List[Any] = {"shortest_edge": 32, "longest_edge": 13_33} if size is None else size
UpperCamelCase__ : Dict = do_normalize
UpperCamelCase__ : Optional[int] = image_mean
UpperCamelCase__ : Union[str, Any] = image_std
UpperCamelCase__ : Union[str, Any] = class_info_file
UpperCamelCase__ : Tuple = prepare_metadata(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = num_text
UpperCamelCase__ : int = repo_path
# for the post_process_functions
UpperCamelCase__ : int = 2
UpperCamelCase__ : str = 10
UpperCamelCase__ : Any = 10
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : List[Any] = 4
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : Tuple = do_reduce_labels
UpperCamelCase__ : List[str] = ignore_index
def __lowercase ( self : int ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ : str = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ : Any = int(self.size["shortest_edge"] * h / w )
UpperCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
elif w > h:
UpperCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
UpperCamelCase__ : int = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase__ : Optional[Any] = self.size["shortest_edge"]
UpperCamelCase__ : str = self.size["shortest_edge"]
else:
UpperCamelCase__ : Tuple = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ : List[str] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase__ : int = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def __lowercase ( self : Any ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowerCAmelCase : List[str] = image_processing_class
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "ignore_index" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "class_info_file" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_text" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "repo_path" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "metadata" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_reduce_labels" ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : Dict = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase__ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase__ : List[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any="np" ):
'''simple docstring'''
UpperCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCamelCase__ : Any = self.image_processing_tester.num_labels
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
UpperCamelCase__ : Tuple = num_labels
if is_instance_map:
UpperCamelCase__ : List[str] = list(range(SCREAMING_SNAKE_CASE ) ) * 2
UpperCamelCase__ : Optional[Any] = dict(enumerate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCamelCase__ : List[str] = [Image.fromarray(SCREAMING_SNAKE_CASE ) for annotation in annotations]
UpperCamelCase__ : Optional[int] = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , return_tensors="pt" , instance_id_to_semantic_id=SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE , )
return inputs
def __lowercase ( self : int ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
def common(SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : str=None ):
UpperCamelCase__ : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=SCREAMING_SNAKE_CASE , is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = inputs["mask_labels"]
UpperCamelCase__ : Optional[Any] = inputs["class_labels"]
UpperCamelCase__ : List[str] = inputs["pixel_values"]
UpperCamelCase__ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=SCREAMING_SNAKE_CASE )
common(is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type="pil" )
common(is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type="pil" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = np.zeros((20, 50) )
UpperCamelCase__ : int = 1
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : int = binary_mask_to_rle(SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : int = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCamelCase__ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCamelCase__ : Optional[Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE , target_sizes=SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Optional[int] = image_processor.post_process_instance_segmentation(SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Tuple = image_processor.post_process_panoptic_segmentation(SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 189 | 1 |
import requests
def lowerCamelCase__ ( _A , _A ):
a : Tuple = {'Content-Type': 'application/json'}
a : List[str] = requests.post(_A , json={'text': message_body} , headers=_A )
if response.status_code != 200:
a : Any = (
'Request to slack returned an error '
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_A )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 353 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = """encoder-decoder"""
lowercase__ = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
a : List[str] = kwargs.pop('encoder' )
a : Optional[Any] = encoder_config.pop('model_type' )
a : Tuple = kwargs.pop('decoder' )
a : Optional[int] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
a : Any = AutoConfig.for_model(__snake_case , **__snake_case )
a : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
a : Tuple = True
@classmethod
def lowercase_ ( cls : int , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
a : List[Any] = True
a : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = copy.deepcopy(self.__dict__ )
a : List[str] = self.encoder.to_dict()
a : Optional[int] = self.decoder.to_dict()
a : Optional[Any] = self.__class__.model_type
return output
| 96 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :list[list] )->list[list]:
'''simple docstring'''
snake_case_ = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase_ ):
snake_case_ = row[0]
for column_index, column in enumerate(lowerCAmelCase_ ):
if magnitude == 0:
snake_case_ = column
continue
snake_case_ = column / magnitude
# Subtract to cancel term
snake_case_ = current_set[0]
snake_case_ = [first_row]
snake_case_ = current_set[1::]
for row in current_set:
snake_case_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase_ )
continue
for column_index in range(len(lowerCAmelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
snake_case_ = final_set[0]
snake_case_ = []
snake_case_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
snake_case_ = simplify(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase_ )
snake_case_ = resultant
return final_set
def _lowerCAmelCase ( lowerCAmelCase_ :list[list] )->list:
'''simple docstring'''
if len(lowerCAmelCase_ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
snake_case_ = len(lowerCAmelCase_ ) + 1
if any(len(lowerCAmelCase_ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCAmelCase_ , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCAmelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
snake_case_ = equations.copy()
if any(0 in row for row in data_set ):
snake_case_ = data_set.copy()
snake_case_ = []
for row_index, row in enumerate(lowerCAmelCase_ ):
if 0 not in row:
snake_case_ = data_set.pop(lowerCAmelCase_ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , lowerCAmelCase_ )
snake_case_ = data_set.copy()
snake_case_ = simplify(lowerCAmelCase_ )
snake_case_ = simplified[::-1]
snake_case_ = []
for row in simplified:
snake_case_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
snake_case_ = row.copy()[: len(lowerCAmelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase_ ) == 0:
solutions.append(0 )
continue
snake_case_ = temp_row[1::]
snake_case_ = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase_ )
snake_case_ = []
for item in solutions:
final.append(float(round(lowerCAmelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 159 |
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE :Tuple = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE :Tuple = BASE_URL + '''/user'''
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE :Optional[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowerCAmelCase ( lowerCAmelCase_ :str )->dict[Any, Any]:
'''simple docstring'''
snake_case_ = {
"Authorization": F'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 159 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
a_ : Optional[Any] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
a_ : Union[str, Any] = AutoConfig.from_pretrained('gpt2' )
a_ : int = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : Dict = GenerationConfig()
a_ : Union[str, Any] = {
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
a_ : Tuple = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = generation_config.update(**SCREAMING_SNAKE_CASE__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE__ , {'foo': 'bar'} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
a_ : int = GenerationConfig()
a_ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : str = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
a_ : Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE__ )
assert not hasattr(SCREAMING_SNAKE_CASE__ , 'foo' ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
a_ : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE__ )
self.assertEqual(default_config.num_beams , 1 )
a_ : int = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : int = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any ) -> Optional[int]:
a_ : List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
a_ : Union[str, Any] = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
a_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id='test-generation-config' , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
a_ : Optional[int] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Tuple = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
a_ : Optional[Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id='valid_org/test-generation-config-org' , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
a_ : Dict = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 120 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ : str = 1.054571817e-34 # unit of ℏ : J * s
UpperCAmelCase_ : Dict = 3e8 # unit of c : m * s^-1
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
a_ : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a_ : List[str] = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a_ : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 1 |
__snake_case :Tuple = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__snake_case :str = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__snake_case :Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case :List[str] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__snake_case :str = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__snake_case :List[str] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__snake_case :int = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__snake_case :str = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 49 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A = 16
__A = 32
def lowerCamelCase_ ( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 , UpperCamelCase__ : str = "bert-base-cased" ) -> int:
"""simple docstring"""
__lowerCamelCase = AutoTokenizer.from_pretrained(__a )
__lowerCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(__a , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a )
__lowerCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['num_epochs'] )
__lowerCamelCase = int(config['seed'] )
__lowerCamelCase = int(config['batch_size'] )
__lowerCamelCase = args.model_name_or_path
set_seed(__a )
__lowerCamelCase = get_dataloaders(__a , __a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
__lowerCamelCase = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
# Now we train the model
__lowerCamelCase = evaluate.load('glue' , 'mrpc' )
__lowerCamelCase = 0
__lowerCamelCase = {}
for epoch in range(__a , __a ):
model.train()
for step, batch in enumerate(__a ):
__lowerCamelCase = model(**__a )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**__a )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a , references=__a , )
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __a )
__lowerCamelCase = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__lowerCamelCase = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(__a , __a )
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=__a , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__a , )
parser.add_argument(
'--output_dir' , type=__a , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=__a , default=__a , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=__a , default=3 , help='Number of train epochs.' , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 365 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('google/mt5-small' )
__lowerCamelCase = tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowerCamelCase = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowerCamelCase = model(input_ids.to(lowerCamelCase__ ) , labels=labels.to(lowerCamelCase__ ) ).loss
__lowerCamelCase = -(labels.shape[-1] * loss.item())
__lowerCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : List[str] =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ={
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Any = '''fnet'''
def __init__( self , _A=32_000 , _A=768 , _A=12 , _A=3_072 , _A="gelu_new" , _A=0.1 , _A=512 , _A=4 , _A=0.0_2 , _A=1e-12 , _A=False , _A=512 , _A=3 , _A=1 , _A=2 , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = use_tpu_fourier_optimizations
__SCREAMING_SNAKE_CASE = tpu_short_seq_length
| 257 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = process
__SCREAMING_SNAKE_CASE = params
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dataset[i]
__SCREAMING_SNAKE_CASE = self.process(_A , **self.params )
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = loader
__SCREAMING_SNAKE_CASE = infer
__SCREAMING_SNAKE_CASE = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = loader_batch_size
# Internal bookkeeping
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __len__( self ):
'''simple docstring'''
return len(self.loader )
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def _A ( self ):
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__SCREAMING_SNAKE_CASE = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A ):
# Convert ModelOutput to tuple first
__SCREAMING_SNAKE_CASE = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__SCREAMING_SNAKE_CASE = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__SCREAMING_SNAKE_CASE = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(_A )
self._loader_batch_index += 1
return result
def _A ( self ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__SCREAMING_SNAKE_CASE = next(self.iterator )
__SCREAMING_SNAKE_CASE = self.infer(_A , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = len(_A )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
# Setting internal index to unwrap the batch
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A=None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
__SCREAMING_SNAKE_CASE = None
return self
def _A ( self ):
'''simple docstring'''
if self.subiterator is None:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__SCREAMING_SNAKE_CASE = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
__SCREAMING_SNAKE_CASE = next(self.subiterator )
return processed
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __iter__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
if is_last:
return accumulator
while not is_last:
__SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor ):
__SCREAMING_SNAKE_CASE = processed
else:
__SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE = processed[key]
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = len(_A )
else:
__SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE = observed_batch_size
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = 0
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE = self.loader_batch_item()
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
if is_last:
return accumulator
else:
__SCREAMING_SNAKE_CASE = processed
__SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(_A )
return accumulator
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = key
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return self.dataset[i][self.key]
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = keya
__SCREAMING_SNAKE_CASE = keya
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self , _A ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 257 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class snake_case_ ( __A ):
def __get__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=None ) -> Optional[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
lowercase__ : Any = "__cached_" + self.fget.__name__
lowercase__ : Optional[int] = getattr(lowercase_ , lowercase_ , lowercase_ )
if cached is None:
lowercase__ : Optional[Any] = self.fget(lowercase_ )
setattr(lowercase_ , lowercase_ , lowercase_ )
return cached
def lowercase_ ( _lowerCamelCase : Union[str, Any]) -> Dict:
lowercase__ : str = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''')
def lowercase_ ( _lowerCamelCase : Optional[int]) -> List[str]:
if is_torch_fx_proxy(_lowerCamelCase):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer)):
return True
return isinstance(_lowerCamelCase , np.ndarray)
def lowercase_ ( _lowerCamelCase : Optional[Any]) -> List[Any]:
return isinstance(_lowerCamelCase , np.ndarray)
def lowercase_ ( _lowerCamelCase : Any) -> int:
return _is_numpy(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]) -> List[Any]:
import torch
return isinstance(_lowerCamelCase , torch.Tensor)
def lowercase_ ( _lowerCamelCase : Union[str, Any]) -> List[Any]:
return False if not is_torch_available() else _is_torch(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]) -> Any:
import torch
return isinstance(_lowerCamelCase , torch.device)
def lowercase_ ( _lowerCamelCase : List[Any]) -> Tuple:
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[Any]) -> Optional[Any]:
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase):
if hasattr(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase)
else:
return False
return isinstance(_lowerCamelCase , torch.dtype)
def lowercase_ ( _lowerCamelCase : int) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str) -> str:
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor)
def lowercase_ ( _lowerCamelCase : List[Any]) -> Tuple:
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int) -> List[Any]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor"):
return tf.is_symbolic_tensor(_lowerCamelCase)
return type(_lowerCamelCase) == tf.Tensor
def lowercase_ ( _lowerCamelCase : str) -> Dict:
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict) -> str:
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray)
def lowercase_ ( _lowerCamelCase : Any) -> List[Any]:
return False if not is_flax_available() else _is_jax(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict) -> Tuple:
if isinstance(_lowerCamelCase , (dict, UserDict)):
return {k: to_py_obj(_lowerCamelCase) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple)):
return [to_py_obj(_lowerCamelCase) for o in obj]
elif is_tf_tensor(_lowerCamelCase):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase):
return np.asarray(_lowerCamelCase).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number)): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase_ ( _lowerCamelCase : Optional[int]) -> Optional[int]:
if isinstance(_lowerCamelCase , (dict, UserDict)):
return {k: to_numpy(_lowerCamelCase) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple)):
return np.array(_lowerCamelCase)
elif is_tf_tensor(_lowerCamelCase):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase):
return np.asarray(_lowerCamelCase)
else:
return obj
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
lowercase__ : Union[str, Any] = fields(self )
# Safety and consistency checks
if not len(lowercase_ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
lowercase__ : List[Any] = getattr(self , class_fields[0].name )
lowercase__ : int = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase_ ):
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[int] = first_field.items()
lowercase__ : Optional[Any] = True
else:
try:
lowercase__ : str = iter(lowercase_ )
lowercase__ : Any = True
except TypeError:
lowercase__ : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase_ ):
if (
not isinstance(lowercase_ , (list, tuple) )
or not len(lowercase_ ) == 2
or not isinstance(element[0] , lowercase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowercase__ : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowercase__ : Tuple = element[1]
elif first_field is not None:
lowercase__ : Optional[Any] = first_field
else:
for field in class_fields:
lowercase__ : List[Any] = getattr(self , field.name )
if v is not None:
lowercase__ : List[str] = v
def __delitem__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Dict ) -> Optional[int]:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __UpperCamelCase ( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ) -> Tuple:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : List[Any] ) -> Optional[int]:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __UpperCamelCase ( self : List[str] , *lowercase_ : str , **lowercase_ : Dict ) -> str:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[Any] , lowercase_ : List[str] ) -> Union[str, Any]:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : List[str] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> List[Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase_ , lowercase_ )
super().__setattr__(lowercase_ , lowercase_ )
def __setitem__( self : Union[str, Any] , lowercase_ : int , lowercase_ : str ) -> int:
# Will raise a KeyException if needed
super().__setitem__(lowercase_ , lowercase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : str ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class snake_case_ ( __A ,__A ):
@classmethod
def __UpperCamelCase ( cls : int , lowercase_ : Dict ) -> Optional[Any]:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class snake_case_ ( __A ):
__A : str = "longest"
__A : Any = "max_length"
__A : List[Any] = "do_not_pad"
class snake_case_ ( __A ):
__A : Dict = "pt"
__A : str = "tf"
__A : Optional[int] = "np"
__A : List[Any] = "jax"
class snake_case_ :
def __init__( self : Dict , lowercase_ : List[ContextManager] ) -> Optional[Any]:
lowercase__ : str = context_managers
lowercase__ : List[Any] = ExitStack()
def __enter__( self : Dict ) -> List[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(lowercase_ )
def __exit__( self : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Optional[Any]:
self.stack.__exit__(*lowercase_ , **lowercase_ )
def lowercase_ ( _lowerCamelCase : Any) -> int:
lowercase__ : Optional[Any] = infer_framework(_lowerCamelCase)
if framework == "tf":
lowercase__ : List[str] = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
lowercase__ : List[str] = inspect.signature(model_class.forward) # PyTorch models
else:
lowercase__ : List[Any] = inspect.signature(model_class.__call__) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase_ ( _lowerCamelCase : Tuple) -> List[str]:
lowercase__ : Union[str, Any] = model_class.__name__
lowercase__ : Union[str, Any] = infer_framework(_lowerCamelCase)
if framework == "tf":
lowercase__ : Union[str, Any] = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
lowercase__ : Optional[int] = inspect.signature(model_class.forward) # PyTorch models
else:
lowercase__ : List[str] = inspect.signature(model_class.__call__) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase_ ( _lowerCamelCase : MutableMapping , _lowerCamelCase : str = "" , _lowerCamelCase : str = ".") -> List[Any]:
def _flatten_dict(_lowerCamelCase : Tuple , _lowerCamelCase : Any="" , _lowerCamelCase : int="."):
for k, v in d.items():
lowercase__ : Optional[Any] = str(_lowerCamelCase) + delimiter + str(_lowerCamelCase) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase))
@contextmanager
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : bool = False) -> List[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[Any]=None) -> Optional[int]:
if is_numpy_array(_lowerCamelCase):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase)
elif is_torch_tensor(_lowerCamelCase):
return array.T if axes is None else array.permute(*_lowerCamelCase)
elif is_tf_tensor(_lowerCamelCase):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase)
elif is_jax_tensor(_lowerCamelCase):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase)
else:
raise ValueError(f'''Type not supported for transpose: {type(_lowerCamelCase)}.''')
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple) -> Any:
if is_numpy_array(_lowerCamelCase):
return np.reshape(_lowerCamelCase , _lowerCamelCase)
elif is_torch_tensor(_lowerCamelCase):
return array.reshape(*_lowerCamelCase)
elif is_tf_tensor(_lowerCamelCase):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase)
elif is_jax_tensor(_lowerCamelCase):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase)
else:
raise ValueError(f'''Type not supported for reshape: {type(_lowerCamelCase)}.''')
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=None) -> Tuple:
if is_numpy_array(_lowerCamelCase):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase)
elif is_torch_tensor(_lowerCamelCase):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase)
elif is_tf_tensor(_lowerCamelCase):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase)
elif is_jax_tensor(_lowerCamelCase):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase)
else:
raise ValueError(f'''Type not supported for squeeze: {type(_lowerCamelCase)}.''')
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]) -> Optional[int]:
if is_numpy_array(_lowerCamelCase):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase)
elif is_torch_tensor(_lowerCamelCase):
return array.unsqueeze(dim=_lowerCamelCase)
elif is_tf_tensor(_lowerCamelCase):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase)
elif is_jax_tensor(_lowerCamelCase):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase)
else:
raise ValueError(f'''Type not supported for expand_dims: {type(_lowerCamelCase)}.''')
def lowercase_ ( _lowerCamelCase : int) -> str:
if is_numpy_array(_lowerCamelCase):
return np.size(_lowerCamelCase)
elif is_torch_tensor(_lowerCamelCase):
return array.numel()
elif is_tf_tensor(_lowerCamelCase):
import tensorflow as tf
return tf.size(_lowerCamelCase)
elif is_jax_tensor(_lowerCamelCase):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(_lowerCamelCase)}.''')
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : int) -> Union[str, Any]:
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list)):
lowercase__ : Optional[Any] = [f'''{repo_id}--{v}''' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowercase__ : Any = f'''{repo_id}--{value}'''
return auto_map
def lowercase_ ( _lowerCamelCase : Union[str, Any]) -> Union[str, Any]:
for base_class in inspect.getmro(_lowerCamelCase):
lowercase__ : List[str] = base_class.__module__
lowercase__ : Any = base_class.__name__
if module.startswith("tensorflow") or module.startswith("keras") or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch") or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''')
| 364 |
def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE = 1_6
_SCREAMING_SNAKE_CASE = 3_2
def lowercase( UpperCamelCase_ , UpperCamelCase_ = 16 ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
UpperCamelCase_ , padding="""longest""" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE = mocked_dataloaders # noqa: F811
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase_ ) == "1":
UpperCamelCase = 2
# Initialize accelerator
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["lr"]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase_ )
UpperCamelCase = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**UpperCamelCase_ )
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCamelCase = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase_ )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(UpperCamelCase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , UpperCamelCase_ )
def lowercase( ) -> int:
'''simple docstring'''
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 343 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Tuple = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''ernie_m'''
_a = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: List[Any] , _lowerCAmelCase: int = 25_00_02 , _lowerCAmelCase: int = 7_68 , _lowerCAmelCase: int = 12 , _lowerCAmelCase: int = 12 , _lowerCAmelCase: int = 30_72 , _lowerCAmelCase: str = "gelu" , _lowerCAmelCase: float = 0.1 , _lowerCAmelCase: float = 0.1 , _lowerCAmelCase: int = 5_14 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: float = 1e-0_5 , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: List[str]=0.0 , **_lowerCAmelCase: Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase :Tuple = vocab_size
lowercase :List[str] = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[Any] = intermediate_size
lowercase :Optional[Any] = hidden_act
lowercase :Any = hidden_dropout_prob
lowercase :int = attention_probs_dropout_prob
lowercase :Dict = max_position_embeddings
lowercase :Optional[Any] = initializer_range
lowercase :Any = layer_norm_eps
lowercase :Union[str, Any] = classifier_dropout
lowercase :int = is_decoder
lowercase :List[str] = act_dropout
| 236 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=10 , lowerCamelCase_=3 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=10 , lowerCamelCase_=0.02 , lowerCamelCase_=0.9 , lowerCamelCase_=None , ):
"""simple docstring"""
a = parent
a = batch_size
a = image_size
a = num_channels
a = patch_size
a = tubelet_size
a = num_frames
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = type_sequence_label_size
a = initializer_range
a = mask_ratio
a = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
a = (image_size // patch_size) ** 2
a = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
a = int(mask_ratio * self.seq_length )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ (self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = VideoMAEModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
a = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = VideoMAEForPreTraining(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a = torch.ones((self.num_masks,) )
a = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
a = mask.expand(self.batch_size , -1 ).bool()
a = model(lowerCamelCase_ , lowerCamelCase_ )
# model only returns predictions for masked patches
a = mask.sum().item()
a = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__A = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = VideoMAEModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
"""simple docstring"""
a = copy.deepcopy(lowerCamelCase_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
a = torch.ones((self.model_tester.num_masks,) )
a = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
a = mask.expand(self.model_tester.batch_size , -1 ).bool()
a = bool_masked_pos.to(lowerCamelCase_ )
if return_labels:
if model_class in [
*get_values(lowerCamelCase_ ),
]:
a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(lowerCamelCase_ )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = VideoMAEModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
for model_class in self.all_model_classes:
a = self.model_tester.seq_length - self.model_tester.num_masks
a = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
a = True
a = False
a = True
a = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
a = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a = True
a = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
a = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a = len(lowerCamelCase_ )
# Check attention is always last and order is fine
a = True
a = True
a = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase_ ) )
a = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase_ (self ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
a = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
a = outputs.hidden_states
a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
a = self.model_tester.seq_length - self.model_tester.num_masks
a = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
def a( ) -> Optional[int]:
"""simple docstring"""
a = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
a = np.load(A )
return list(A )
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
lowerCamelCase_ )
a = self.default_image_processor
a = prepare_video()
a = image_processor(lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
a = model(**lowerCamelCase_ )
# verify the logits
a = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
a = torch.tensor([0.3669, -0.0688, -0.2421] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(lowerCamelCase_ )
a = self.default_image_processor
a = prepare_video()
a = image_processor(lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ )
# add boolean mask, indicating which patches to mask
a = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
a = torch.load(lowerCamelCase_ )
# forward pass
with torch.no_grad():
a = model(**lowerCamelCase_ )
# verify the logits
a = torch.Size([1, 1408, 1536] )
a = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=lowerCamelCase_ )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
a = torch.tensor([0.5142] , device=lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCamelCase_ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
a = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=lowerCamelCase_ ).to(
lowerCamelCase_ )
with torch.no_grad():
a = model(**lowerCamelCase_ )
a = torch.tensor(torch.tensor([0.6469] ) , device=lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCamelCase_ , atol=1E-4 ) )
| 71 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = LxmertTokenizer
__A = LxmertTokenizerFast
__A = True
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = "UNwant\u00E9d,running"
a = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(lowerCamelCase_ )
a = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
a = self.get_rust_tokenizer()
a = tokenizer.encode(lowerCamelCase_ )
a = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
| 71 | 1 |
import torch
from transformers import AutoModel
class snake_case__ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : str, _snake_case : List[str]="sayef/fsner-bert-base-uncased" ) ->Tuple:
super(UpperCamelCase__, self ).__init__()
snake_case__ : int = AutoModel.from_pretrained(UpperCamelCase__, return_dict=UpperCamelCase__ )
snake_case__ : Any = torch.nn.CosineSimilarity(3, 1e-08 )
snake_case__ : int = torch.nn.Softmax(dim=1 )
def lowercase_ ( self : List[str], **_snake_case : Optional[Any] ) ->List[Any]:
return self.bert(**UpperCamelCase__ ).last_hidden_state
def lowercase_ ( self : List[str], _snake_case : Tuple ) ->Any:
return token_embeddings.sum(2, keepdim=UpperCamelCase__ )
def lowercase_ ( self : Dict, _snake_case : Any, _snake_case : List[Any], _snake_case : str=1 ) ->Union[str, Any]:
return self.softmax(T * self.cos(UpperCamelCase__, UpperCamelCase__ ) )
def lowercase_ ( self : Union[str, Any], _snake_case : Dict, _snake_case : Any ) ->Union[str, Any]:
snake_case__ : List[Any] = W_supports["sizes"].tolist()
snake_case__ : Dict = W_supports["start_token_id"].item()
snake_case__ : List[str] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case__ : str = self.BERT(**UpperCamelCase__ )
snake_case__ : int = self.BERT(**UpperCamelCase__ )
snake_case__ : Any = None
snake_case__ : Union[str, Any] = None
snake_case__ : str = W_supports["input_ids"] == start_token_id
snake_case__ : List[str] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(UpperCamelCase__ ):
if i == 0:
snake_case__ : Any = 0
else:
snake_case__ : Tuple = support_sizes[i - 1]
snake_case__ : Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
snake_case__ : str = S[s : s + size][end_token_masks[s : s + size]]
snake_case__ : int = torch.matmul(q[i], s_start.T ).sum(1 ).softmax(0 )
snake_case__ : Tuple = torch.matmul(q[i], s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
snake_case__ : Optional[int] = torch.vstack((p_starts, p_start) )
snake_case__ : List[Any] = torch.vstack((p_ends, p_end) )
else:
snake_case__ : str = p_start
snake_case__ : int = p_end
return p_starts, p_ends
| 277 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : str = DebertaTokenizer
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : int = DebertaTokenizerFast
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
snake_case : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case : Optional[Any] = {"unk_token": "[UNK]"}
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = "lower newer"
snake_case : Any = "lower newer"
return input_text, output_text
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[int] = self.get_tokenizer()
snake_case : str = "lower newer"
snake_case : Dict = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case : Optional[Any] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[Any] = tokens + [tokenizer.unk_token]
snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Tuple = self.get_tokenizer()
snake_case : Dict = tokenizer("Hello" , "World" )
snake_case : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , UpperCamelCase__ )
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[Any] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case : Dict = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase__ )
snake_case : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase__ )
snake_case : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
snake_case : Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
snake_case : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
snake_case : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case : str = tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case : Union[str, Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
snake_case : Optional[int] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ )
snake_case : Optional[int] = [tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) for seq in encoding["input_ids"]]
# fmt: off
snake_case : Dict = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case : int = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , UpperCamelCase__ )
for expected, decoded in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 203 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
a_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase ) )
] # the reference grid
a_ = 1
a_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCAmelCase ) )
] # the action grid
a_ = init[0]
a_ = init[1]
a_ = 0
a_ = g + heuristic[x][y] # cost from starting cell to destination cell
a_ = [[f, g, x, y]]
a_ = False # flag that is set when search is complete
a_ = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCAmelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
a_ = cell.pop()
a_ = next_cell[2]
a_ = next_cell[3]
a_ = next_cell[1]
if x == goal[0] and y == goal[1]:
a_ = True
else:
for i in range(len(UpperCAmelCase ) ): # to try out different valid actions
a_ = x + DIRECTIONS[i][0]
a_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
a_ = g + cost
a_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
a_ = 1
a_ = i
a_ = []
a_ = goal[0]
a_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
a_ = x - DIRECTIONS[action[x][y]][0]
a_ = y - DIRECTIONS[action[x][y]][1]
a_ = xa
a_ = ya
invpath.append([x, y] )
a_ = []
for i in range(len(UpperCAmelCase ) ):
path.append(invpath[len(UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase_ = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase_ = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase_ = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase_ = 99
UpperCamelCase_ , UpperCamelCase_ = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 357 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCamelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
return k
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->PegasusForConditionalGeneration:
"""simple docstring"""
a_ = DEFAULTS.copy()
cfg_kwargs.update(UpperCAmelCase )
a_ = PegasusConfig(**UpperCAmelCase )
a_ = PegasusForConditionalGeneration(UpperCAmelCase )
a_ = torch_model.model.state_dict()
a_ = {}
for k, v in tf_weights.items():
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
a_ = v.T
a_ = torch.tensor(UpperCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
a_ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
a_ = mapping["shared.weight"]
a_ = mapping["shared.weight"]
a_ = {k: torch.zeros_like(UpperCAmelCase ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**UpperCAmelCase )
a_ , a_ = torch_model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
a_ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( UpperCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) ->Dict:
"""simple docstring"""
a_ = tf.train.list_variables(UpperCAmelCase )
a_ = {}
a_ = ["Adafactor", "global_step"]
for name, shape in tqdm(UpperCAmelCase , desc="converting tf checkpoint to dict" ):
a_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ = tf.train.load_variable(UpperCAmelCase , UpperCAmelCase )
a_ = array
return tf_weights
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = Path(UpperCAmelCase ).parent.name
a_ = task_specific_params[F'''summarization_{dataset}''']["max_position_embeddings"]
a_ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=UpperCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(UpperCAmelCase )
# convert model
a_ = get_tf_weights_as_numpy(UpperCAmelCase )
a_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
a_ = task_specific_params
a_ = convert_pegasus(UpperCAmelCase , UpperCAmelCase )
torch_model.save_pretrained(UpperCAmelCase )
a_ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(UpperCAmelCase , Path(UpperCAmelCase ) / "pytorch_model.bin" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase_ = parser.parse_args()
if args.save_dir is None:
UpperCamelCase_ = Path(args.tf_ckpt_path).parent.name
UpperCamelCase_ = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 303 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = DiTPipeline
__lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__lowerCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_snake_case , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_snake_case , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_snake_case , 1e-3 )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_snake_case , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase = pipe.get_label_ids(_snake_case )
_lowerCAmelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(_snake_case , _snake_case ):
_lowerCAmelCase = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase = ["""vase""", """umbrella"""]
_lowerCAmelCase = pipe.get_label_ids(_snake_case )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_snake_case , generator=_snake_case , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(_snake_case , _snake_case ):
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 82 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowercase_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
lowercase_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
lowercase_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_a = new_id
# turn into Numpy arrays
_a = np.array(__A)
_a = np.array(__A)
if reduce_labels:
_a = 255
_a = label - 1
_a = 255
_a = label != ignore_index
_a = np.not_equal(__A , __A)
_a = pred_label[mask]
_a = np.array(__A)[mask]
_a = pred_label[pred_label == label]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = False , ):
"""simple docstring"""
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
for result, gt_seg_map in zip(__A , __A):
_a , _a , _a , _a = intersect_and_union(
__A , __A , __A , __A , __A , __A)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = None , __A = False , ):
"""simple docstring"""
_a , _a , _a , _a = total_intersect_and_union(
__A , __A , __A , __A , __A , __A)
# compute metrics
_a = {}
_a = total_area_intersect.sum() / total_area_label.sum()
_a = total_area_intersect / total_area_union
_a = total_area_intersect / total_area_label
_a = np.nanmean(__A)
_a = np.nanmean(__A)
_a = all_acc
_a = iou
_a = acc
if nan_to_num is not None:
_a = {metric: np.nan_to_num(__A , nan=__A) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def a__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def a__ (self , A , A , A , A , A = None , A = None , A = False , ) -> List[Any]:
"""simple docstring"""
_a = mean_iou(
results=A , gt_seg_maps=A , num_labels=A , ignore_index=A , nan_to_num=A , label_map=A , reduce_labels=A , )
return iou_result
| 211 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase : Union[str, Any] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : Dict = size if size is not None else {'''shortest_edge''': 256}
A : int = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A : Optional[int] = do_resize
A : Union[str, Any] = size
A : str = resample
A : Optional[int] = do_center_crop
A : Optional[int] = crop_size
A : Dict = do_rescale
A : List[Any] = rescale_factor
A : str = do_normalize
A : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A : Optional[int] = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
A : Dict = do_resize if do_resize is not None else self.do_resize
A : Tuple = size if size is not None else self.size
A : Dict = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : Dict = resample if resample is not None else self.resample
A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
A : Optional[int] = crop_size if crop_size is not None else self.crop_size
A : str = get_size_dict(SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A : Dict = do_rescale if do_rescale is not None else self.do_rescale
A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Dict = do_normalize if do_normalize is not None else self.do_normalize
A : Optional[int] = image_mean if image_mean is not None else self.image_mean
A : Union[str, Any] = image_std if image_std is not None else self.image_std
A : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A : Dict = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
A : List[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A : Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
A : int = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[str]:
"""simple docstring"""
A : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(SCREAMING_SNAKE_CASE ):
A : str = target_sizes.numpy()
A : Tuple = []
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=SCREAMING_SNAKE_CASE )
A : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE )
else:
A : List[Any] = logits.argmax(dim=1 )
A : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 311 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
A : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , )
A : Optional[Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
| 311 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = dataset
_snake_case = process
_snake_case = params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.dataset[i]
_snake_case = self.process(lowerCAmelCase_ , **self.params )
return processed
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = loader
_snake_case = infer
_snake_case = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_snake_case = None
_snake_case = loader_batch_size
# Internal bookkeeping
_snake_case = None
_snake_case = None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
_snake_case = iter(self.loader )
return self
def lowerCamelCase ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_snake_case = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_snake_case = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# Convert ModelOutput to tuple first
_snake_case = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_snake_case = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_snake_case = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_snake_case = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_snake_case = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_snake_case = self._loader_batch_data.__class__(lowerCAmelCase_ )
self._loader_batch_index += 1
return result
def lowerCamelCase ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_snake_case = next(self.iterator )
_snake_case = self.infer(lowerCAmelCase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCAmelCase_ , torch.Tensor ):
_snake_case = processed
else:
_snake_case = list(processed.keys() )[0]
_snake_case = processed[key]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = len(lowerCAmelCase_ )
else:
_snake_case = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case = observed_batch_size
# Setting internal index to unwrap the batch
_snake_case = processed
_snake_case = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __iter__( self ):
"""simple docstring"""
_snake_case = iter(self.loader )
_snake_case = None
return self
def lowerCamelCase ( self ):
"""simple docstring"""
if self.subiterator is None:
_snake_case = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_snake_case = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_snake_case = self.infer(next(self.iterator ) , **self.params )
_snake_case = next(self.subiterator )
return processed
class __UpperCAmelCase ( _lowerCamelCase ):
def __iter__( self ):
"""simple docstring"""
_snake_case = iter(self.loader )
return self
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = False
_snake_case = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_snake_case = self.loader_batch_item()
_snake_case = item.pop('is_last' )
accumulator.append(lowerCAmelCase_ )
if is_last:
return accumulator
while not is_last:
_snake_case = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCAmelCase_ , torch.Tensor ):
_snake_case = processed
else:
_snake_case = list(processed.keys() )[0]
_snake_case = processed[key]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = len(lowerCAmelCase_ )
else:
_snake_case = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case = observed_batch_size
_snake_case = processed
_snake_case = 0
while self._loader_batch_index < self.loader_batch_size:
_snake_case = self.loader_batch_item()
_snake_case = item.pop('is_last' )
accumulator.append(lowerCAmelCase_ )
if is_last:
return accumulator
else:
_snake_case = processed
_snake_case = item.pop('is_last' )
accumulator.append(lowerCAmelCase_ )
return accumulator
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = dataset
_snake_case = key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowerCAmelCase_ ):
"""simple docstring"""
return self.dataset[i][self.key]
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = dataset
_snake_case = keya
_snake_case = keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , lowerCAmelCase_ ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 42 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_UpperCamelCase : Tuple = int(input('Enter number: ').strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 220 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
def lowerCamelCase ( UpperCAmelCase__ : str ) -> dict:
lowercase_ : Any = F'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(UpperCAmelCase__ ).json()
def lowerCamelCase ( UpperCAmelCase__ : int = 10 ) -> list[dict]:
lowercase_ : Optional[int] = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
lowercase_ : str = requests.get(UpperCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCAmelCase__ ) for story_id in story_ids]
def lowerCamelCase ( UpperCAmelCase__ : int = 10 ) -> str:
lowercase_ : List[Any] = hackernews_top_stories(UpperCAmelCase__ )
return "\n".join("""* [{title}]({url})""".format(**UpperCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 21 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : List[Any] ):
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str=None , lowercase_ : List[Any]=None , lowercase_ : Dict=None ):
lowercase_ : Optional[Any] = {}
lowercase_ : Tuple = {}
if prompt is not None:
lowercase_ : Tuple = prompt
if generate_kwargs is not None:
lowercase_ : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase_ : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase_ : str = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ):
return super().__call__(lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None ):
lowercase_ : List[Any] = load_image(lowercase_ )
if prompt is not None:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowercase_ )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase_ : List[Any] = self.model.config.model_type
if model_type == "git":
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : Union[str, Any] = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_ ).input_ids
lowercase_ : int = [self.tokenizer.cls_token_id] + input_ids
lowercase_ : List[Any] = torch.tensor(lowercase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase_ : Union[str, Any] = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase_ : Dict = self.image_processor(images=lowercase_ , return_tensors=self.framework )
lowercase_ : List[str] = self.tokenizer(lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowercase_ : List[str] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase_ : str = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowercase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase_ : Any = None
if generate_kwargs is None:
lowercase_ : Optional[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase_ : Dict = model_inputs.pop(self.model.main_input_name )
lowercase_ : Any = self.model.generate(lowercase_ , **lowercase_ , **lowercase_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] ):
lowercase_ : List[str] = []
for output_ids in model_outputs:
lowercase_ : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , )
}
records.append(lowercase_ )
return records
| 21 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = tempfile.mkdtemp()
lowercase = BlipImageProcessor()
lowercase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''')
lowercase = BlipProcessor(A__ ,A__)
processor.save_pretrained(self.tmpdirname)
def A__ ( self ,**A__):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A__).tokenizer
def A__ ( self ,**A__):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A__).image_processor
def A__ ( self):
shutil.rmtree(self.tmpdirname)
def A__ ( self):
lowercase = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta)]
lowercase = [Image.fromarray(np.moveaxis(A__ ,0 ,-1)) for x in image_inputs]
return image_inputs
def A__ ( self):
lowercase = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
lowercase = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''')
lowercase = self.get_image_processor(do_normalize=A__ ,padding_value=1.0)
lowercase = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=A__ ,padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer ,A__)
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor ,A__)
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=A__ ,image_processor=A__)
lowercase = self.prepare_image_inputs()
lowercase = image_processor(A__ ,return_tensors='''np''')
lowercase = processor(images=A__ ,return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2)
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=A__ ,image_processor=A__)
lowercase = '''lower newer'''
lowercase = processor(text=A__)
lowercase = tokenizer(A__ ,return_token_type_ids=A__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key])
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=A__ ,image_processor=A__)
lowercase = '''lower newer'''
lowercase = self.prepare_image_inputs()
lowercase = processor(text=A__ ,images=A__)
self.assertListEqual(list(inputs.keys()) ,['''pixel_values''', '''input_ids''', '''attention_mask'''])
# test if it raises when no input is passed
with pytest.raises(A__):
processor()
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=A__ ,image_processor=A__)
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(A__)
lowercase = tokenizer.batch_decode(A__)
self.assertListEqual(A__ ,A__)
def A__ ( self):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=A__ ,image_processor=A__)
lowercase = '''lower newer'''
lowercase = self.prepare_image_inputs()
lowercase = processor(text=A__ ,images=A__)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) ,['''pixel_values''', '''input_ids''', '''attention_mask'''])
| 101 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _A ( snake_case ) -> Tuple:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def _A ( snake_case , snake_case ) -> Union[str, Any]:
_lowercase : List[str] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_lowercase : Tuple = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
_lowercase : str = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
_lowercase : List[Any] = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
_lowercase : str = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
_lowercase : Optional[int] = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
_lowercase : Optional[int] = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
_lowercase : Optional[int] = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
_lowercase : Any = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
_lowercase : Tuple = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
_lowercase : Tuple = key.replace("image_encoder.module" , "flava.image_model" )
_lowercase : str = key.replace("text_encoder.module" , "flava.text_model" )
_lowercase : str = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
_lowercase : Dict = key.replace("mm_encoder.module" , "flava.multimodal_model" )
_lowercase : Dict = key.replace("text_projection" , "flava.text_projection" )
_lowercase : Union[str, Any] = key.replace("image_projection" , "flava.image_projection" )
_lowercase : Tuple = value.float()
for key, value in codebook_state_dict.items():
_lowercase : Dict = value
return upgrade
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case , snake_case=None ) -> int:
if config_path is not None:
_lowercase : Any = FlavaConfig.from_pretrained(snake_case )
else:
_lowercase : Tuple = FlavaConfig()
_lowercase : Any = FlavaForPreTraining(snake_case ).eval()
_lowercase : Union[str, Any] = convert_dalle_checkpoint(snake_case , snake_case , save_checkpoint=snake_case )
if os.path.exists(snake_case ):
_lowercase : Tuple = torch.load(snake_case , map_location="cpu" )
else:
_lowercase : List[str] = torch.hub.load_state_dict_from_url(snake_case , map_location="cpu" )
_lowercase : Tuple = upgrade_state_dict(snake_case , snake_case )
hf_model.load_state_dict(snake_case )
_lowercase : int = hf_model.state_dict()
_lowercase : int = count_parameters(snake_case )
_lowercase : Optional[int] = count_parameters(snake_case ) + count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
hf_model.save_pretrained(snake_case )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_snake_case = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 199 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_snake_case = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , ):
"""simple docstring"""
_lowercase : str = [file for file in os.listdir(_UpperCamelCase ) if os.path.isfile(os.path.join(_UpperCamelCase , _UpperCamelCase ) )]
if identifier is not None:
_lowercase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
for n_ in n_identifier:
_lowercase : Dict = [file for file in files if n_ not in file]
else:
_lowercase : Optional[Any] = [file for file in files if n_identifier not in file]
_lowercase : Dict = ignore_files or []
ignore_files.append("__init__.py" )
_lowercase : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , _UpperCamelCase )
if only_modules:
_lowercase : Optional[Any] = file.split("." )[0]
try:
_lowercase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase )
_lowercase : Optional[int] = doctest.DocTestSuite(_UpperCamelCase )
_lowercase : Tuple = unittest.TextTestRunner().run(_UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
_lowercase : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = Path("src/transformers" )
_lowercase : str = "modeling"
_lowercase : Tuple = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase , ignore_files=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = Path("src/transformers" )
_lowercase : Any = "tokenization"
self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = Path("src/transformers" )
_lowercase : Optional[Any] = "configuration"
self.analyze_directory(_UpperCamelCase , identifier=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = Path("src/transformers" )
_lowercase : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(_UpperCamelCase , n_identifier=_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = Path("docs/source" )
_lowercase : int = ["favicon.ico"]
self.analyze_directory(_UpperCamelCase , ignore_files=_UpperCamelCase , only_modules=_UpperCamelCase )
| 199 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :int ):
'''simple docstring'''
# Checks if the entire collection has been sorted
if len(lowerCamelCase_ ) <= 1 or n <= 1:
return
insert_next(lowerCamelCase_ , n - 1 )
rec_insertion_sort(lowerCamelCase_ , n - 1 )
def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :int ):
'''simple docstring'''
# Checks order between adjacent elements
if index >= len(lowerCamelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case_ , snake_case_ : Union[str, Any] = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase_ , index + 1 )
if __name__ == "__main__":
__A : Optional[int] = input('Enter integers separated by spaces: ')
__A : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 8 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__A : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,)
super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
| 8 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Any = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class _a ( UpperCAmelCase__ ):
A = 'van'
def __init__(self, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=[7, 3, 3, 3], SCREAMING_SNAKE_CASE_=[4, 2, 2, 2], SCREAMING_SNAKE_CASE_=[64, 128, 320, 512], SCREAMING_SNAKE_CASE_=[3, 3, 12, 3], SCREAMING_SNAKE_CASE_=[8, 8, 4, 4], SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-6, SCREAMING_SNAKE_CASE_=1E-2, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, **SCREAMING_SNAKE_CASE_, ) -> str:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_: Tuple = image_size
UpperCAmelCase_: str = num_channels
UpperCAmelCase_: int = patch_sizes
UpperCAmelCase_: Union[str, Any] = strides
UpperCAmelCase_: List[str] = hidden_sizes
UpperCAmelCase_: Any = depths
UpperCAmelCase_: Tuple = mlp_ratios
UpperCAmelCase_: str = hidden_act
UpperCAmelCase_: List[Any] = initializer_range
UpperCAmelCase_: Optional[Any] = layer_norm_eps
UpperCAmelCase_: int = layer_scale_init_value
UpperCAmelCase_: Any = drop_path_rate
UpperCAmelCase_: Optional[int] = dropout_rate
| 147 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A: Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
__lowerCAmelCase : Optional[datasets.Features] = None
__lowerCAmelCase : str = "utf-8"
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : Optional[str] = None
__lowerCAmelCase : bool = True # deprecated
__lowerCAmelCase : Optional[int] = None # deprecated
__lowerCAmelCase : int = 10 << 20 # 10MB
__lowerCAmelCase : Optional[bool] = None
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
__lowerCAmelCase : int = JsonConfig
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCAmelCase : Any = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCAmelCase : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_SCREAMING_SNAKE_CASE , (str, list, tuple) ):
UpperCAmelCase : int = data_files
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = [files]
UpperCAmelCase : List[str] = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCAmelCase : List[str] = []
for split_name, files in data_files.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Dict = [files]
UpperCAmelCase : str = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCAmelCase : Tuple = self.config.features.arrow_schema.field(_SCREAMING_SNAKE_CASE ).type
UpperCAmelCase : int = pa_table.append_column(_SCREAMING_SNAKE_CASE , pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=_SCREAMING_SNAKE_CASE ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase : Any = table_cast(_SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_SCREAMING_SNAKE_CASE , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase : Union[str, Any] = json.load(_SCREAMING_SNAKE_CASE )
# We keep only the field we are interested in
UpperCAmelCase : Dict = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
UpperCAmelCase : Optional[Any] = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase : Union[str, Any] = {col: [row.get(_SCREAMING_SNAKE_CASE ) for row in dataset] for col in keys}
else:
UpperCAmelCase : Optional[Any] = dataset
UpperCAmelCase : List[str] = pa.Table.from_pydict(_SCREAMING_SNAKE_CASE )
yield file_idx, self._cast_table(_SCREAMING_SNAKE_CASE )
# If the file has one json object per line
else:
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f:
UpperCAmelCase : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase : int = max(self.config.chunksize // 32 , 16 << 10 )
UpperCAmelCase : Dict = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCAmelCase : Tuple = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_SCREAMING_SNAKE_CASE )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase : Tuple = batch.decode(self.config.encoding , errors=_SCREAMING_SNAKE_CASE ).encode("""utf-8""" )
try:
while True:
try:
UpperCAmelCase : int = paj.read_json(
io.BytesIO(_SCREAMING_SNAKE_CASE ) , read_options=paj.ReadOptions(block_size=_SCREAMING_SNAKE_CASE ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_SCREAMING_SNAKE_CASE , pa.ArrowInvalid )
and "straddling" not in str(_SCREAMING_SNAKE_CASE )
or block_size > len(_SCREAMING_SNAKE_CASE )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(_SCREAMING_SNAKE_CASE )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_SCREAMING_SNAKE_CASE , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCAmelCase : str = json.load(_SCREAMING_SNAKE_CASE )
except json.JSONDecodeError:
logger.error(F"Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # list is the only sequence type supported in JSON
try:
UpperCAmelCase : Dict = set().union(*[row.keys() for row in dataset] )
UpperCAmelCase : Union[str, Any] = {col: [row.get(_SCREAMING_SNAKE_CASE ) for row in dataset] for col in keys}
UpperCAmelCase : Dict = pa.Table.from_pydict(_SCREAMING_SNAKE_CASE )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}" )
raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(_SCREAMING_SNAKE_CASE )
break
else:
logger.error(F"Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}" )
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_SCREAMING_SNAKE_CASE )
batch_idx += 1
| 109 | 0 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A ( lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = np.zeros_like(lowerCAmelCase_ )
lowerCAmelCase__ = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase__ = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase__ = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase__ = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCamelCase = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
UpperCamelCase = np.array(Image.open(lena_path))
# kernel to be applied
UpperCamelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCamelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCamelCase = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 371 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = 'src/diffusers'
UpperCamelCase = '.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase = spec.loader.load_module()
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return line.startswith(lowerCAmelCase_ ) or len(lowerCAmelCase_ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , lowerCAmelCase_ ) is not None
def _A ( lowerCAmelCase_ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = object_name.split("." )
lowerCAmelCase__ = 0
# First let's find the module where our object lives.
lowerCAmelCase__ = parts[i]
while i < len(lowerCAmelCase_ ) and not os.path.isfile(os.path.join(lowerCAmelCase_ , F'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase_ ):
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , parts[i] )
if i >= len(lowerCAmelCase_ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase_ , F'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase__ = ""
lowerCAmelCase__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase_ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase_ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase__ = line_index
while line_index < len(lowerCAmelCase_ ) and _should_continue(lines[line_index] , lowerCAmelCase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase__ = lines[start_index:line_index]
return "".join(lowerCAmelCase_ )
UpperCamelCase = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase = re.compile(R'<FILL\s+[^>]*>')
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = code.split("\n" )
lowerCAmelCase__ = 0
while idx < len(lowerCAmelCase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase_ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(get_indent(lowerCAmelCase_ ) ) > 0
if has_indent:
lowerCAmelCase__ = F'class Bla:\n{code}'
lowerCAmelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase_ )
lowerCAmelCase__ = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = style_docstrings_in_code(lowerCAmelCase_ )
return result[len("class Bla:\n" ) :] if has_indent else result
def _A ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=False ):
"""simple docstring"""
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase_ ):
lowerCAmelCase__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = search.groups()
lowerCAmelCase__ = find_code_in_diffusers(lowerCAmelCase_ )
lowerCAmelCase__ = get_indent(lowerCAmelCase_ )
lowerCAmelCase__ = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase__ = theoretical_indent
lowerCAmelCase__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase__ = True
while line_index < len(lowerCAmelCase_ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase_ ):
break
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _should_continue(lowerCAmelCase_ , lowerCAmelCase_ ) and re.search(F'^{indent}# End copy' , lowerCAmelCase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase__ = lines[start_index:line_index]
lowerCAmelCase__ = "".join(lowerCAmelCase_ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase__ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowerCAmelCase_ ) is None]
lowerCAmelCase__ = "\n".join(lowerCAmelCase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase__ = replace_pattern.replace("with" , "" ).split("," )
lowerCAmelCase__ = [_re_replace_pattern.search(lowerCAmelCase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = pattern.groups()
lowerCAmelCase__ = re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if option.strip() == "all-casing":
lowerCAmelCase__ = re.sub(obja.lower() , obja.lower() , lowerCAmelCase_ )
lowerCAmelCase__ = re.sub(obja.upper() , obja.upper() , lowerCAmelCase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase__ = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase__ = start_index + 1
if overwrite and len(lowerCAmelCase_ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCAmelCase_ )
return diffs
def _A ( lowerCAmelCase_ : bool = False ):
"""simple docstring"""
lowerCAmelCase__ = glob.glob(os.path.join(lowerCAmelCase_ , "**/*.py" ) , recursive=lowerCAmelCase_ )
lowerCAmelCase__ = []
for filename in all_files:
lowerCAmelCase__ = is_copy_consistent(lowerCAmelCase_ , lowerCAmelCase_ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(lowerCAmelCase_ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 221 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.