code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =HfArgumentParser(lowercase__ )
UpperCAmelCase_ =parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ =TensorFlowBenchmark(args=lowercase__ )
try:
UpperCAmelCase_ =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ ="Arg --no_{0} is no longer used, please use --no-{0} instead."
UpperCAmelCase_ =" ".join(str(lowercase__ ).split(" " )[:-1] )
UpperCAmelCase_ =""
UpperCAmelCase_ =eval(str(lowercase__ ).split(" " )[-1] )
UpperCAmelCase_ =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase__ )
if len(lowercase__ ) > 0:
UpperCAmelCase_ =full_error_msg + begin_error_msg + str(lowercase__ )
raise ValueError(lowercase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 54 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __lowercase , )
class A ( __lowercase ):
_snake_case =RobertaConfig
_snake_case ='''roberta'''
def __init__( self: Any , _lowerCAmelCase: Tuple ) -> Any:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
UpperCAmelCase_ =RobertaEmbeddings(_lowerCAmelCase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , __lowercase , )
class A ( __lowercase ):
_snake_case =RobertaConfig
_snake_case ='''roberta'''
def __init__( self: int , _lowerCAmelCase: List[str] ) -> Any:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
UpperCAmelCase_ =config.num_labels
UpperCAmelCase_ =config.num_hidden_layers
UpperCAmelCase_ =DeeRobertaModel(_lowerCAmelCase )
UpperCAmelCase_ =nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase_ =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Any=None , _lowerCAmelCase: int=None , _lowerCAmelCase: Optional[Any]=None , _lowerCAmelCase: Dict=None , _lowerCAmelCase: str=None , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Optional[int]=None , _lowerCAmelCase: List[str]=-1 , _lowerCAmelCase: Dict=False , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.num_layers
try:
UpperCAmelCase_ =self.roberta(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , position_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase , inputs_embeds=_lowerCAmelCase , )
UpperCAmelCase_ =outputs[1]
UpperCAmelCase_ =self.dropout(_lowerCAmelCase )
UpperCAmelCase_ =self.classifier(_lowerCAmelCase )
UpperCAmelCase_ =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ =e.message
UpperCAmelCase_ =e.exit_layer
UpperCAmelCase_ =outputs[0]
if not self.training:
UpperCAmelCase_ =entropy(_lowerCAmelCase )
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ =MSELoss()
UpperCAmelCase_ =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ =CrossEntropyLoss()
UpperCAmelCase_ =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase_ =[]
for highway_exit in outputs[-1]:
UpperCAmelCase_ =highway_exit[0]
if not self.training:
highway_logits_all.append(_lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ =MSELoss()
UpperCAmelCase_ =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase_ =CrossEntropyLoss()
UpperCAmelCase_ =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowerCAmelCase )
if train_highway:
UpperCAmelCase_ =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ =(loss,) + outputs
if not self.training:
UpperCAmelCase_ =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 54 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase : List[str] =logging.get_logger(__name__)
__lowercase : Optional[int] =TypeVar("""DatasetType""", Dataset, IterableDataset)
def a__ ( lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(lowercase__ ):
if not isinstance(lowercase__ , (Dataset, IterableDataset) ):
if isinstance(lowercase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowercase__ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowercase__ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase__ ).__name__}.' )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ =(
(Dataset, IterableDataset) if isinstance(lowercase__ , lowercase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase__ , lowercase__ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase__ , lowercase__ , lowercase__ , info=lowercase__ , split=lowercase__ , stopping_strategy=lowercase__ )
else:
return _interleave_iterable_datasets(
lowercase__ , lowercase__ , lowercase__ , info=lowercase__ , split=lowercase__ , stopping_strategy=lowercase__ )
def a__ ( lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(lowercase__ ):
if not isinstance(lowercase__ , (Dataset, IterableDataset) ):
if isinstance(lowercase__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(lowercase__ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(lowercase__ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase__ ).__name__}.' )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ =(
(Dataset, IterableDataset) if isinstance(lowercase__ , lowercase__ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase__ , lowercase__ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase__ , info=lowercase__ , split=lowercase__ , axis=lowercase__ )
else:
return _concatenate_iterable_datasets(lowercase__ , info=lowercase__ , split=lowercase__ , axis=lowercase__ )
| 54 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A :
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
_snake_case =field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
_snake_case =field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
_snake_case =field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
_snake_case =field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
_snake_case =field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
_snake_case =field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
_snake_case =field(
default=10000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
_snake_case =field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
_snake_case =field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
_snake_case =field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
_snake_case =field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
_snake_case =field(
default=__lowercase , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
_snake_case =field(default=50000 , metadata={'''help''': '''Maximum number of training steps.'''} )
_snake_case =field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_snake_case =field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
_snake_case =field(default=1 , metadata={'''help''': '''Training seed.'''} )
_snake_case =field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
_snake_case =field(
default=__lowercase , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
_snake_case =field(default=__lowercase , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class A :
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_snake_case =field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
_snake_case =field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
_snake_case =field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_snake_case =field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
_snake_case =field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class A :
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_snake_case =field(default=__lowercase , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
_snake_case =field(
default=__lowercase , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
_snake_case =field(
default=__lowercase , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
_snake_case =field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
_snake_case =field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
_snake_case =field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
_snake_case =field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
_snake_case =field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
_snake_case =field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
_snake_case =field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
_snake_case =field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
_snake_case =field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
_snake_case =field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class A :
_snake_case =field(
default=__lowercase , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
_snake_case =field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
_snake_case =field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
_snake_case =field(
default=100000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
_snake_case =field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
_snake_case =field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
_snake_case =field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
_snake_case =field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
_snake_case =field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
_snake_case =field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
_snake_case =field(
default=__lowercase , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
_snake_case =field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class A :
_snake_case =field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
_snake_case =field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
_snake_case =field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
_snake_case =field(default=200000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
_snake_case =field(
default=32768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
_snake_case =field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
_snake_case =field(default=__lowercase , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class A :
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
_snake_case =field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
_snake_case =field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
_snake_case =field(default=__lowercase , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class A :
_snake_case =field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
_snake_case =field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
_snake_case =field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
_snake_case =field(default=__lowercase , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 54 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 1 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowercase : Optional[Any] =logging.get_logger(__name__)
__lowercase : str ={
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class A ( __lowercase ):
def __init__( self: Dict , _lowerCAmelCase: List[str]=None , _lowerCAmelCase: str=None , *_lowerCAmelCase: Any , **_lowerCAmelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
if config is None:
assert isinstance(self.model , _lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
UpperCAmelCase_ =self.model.config
else:
UpperCAmelCase_ =config
UpperCAmelCase_ =data_args
UpperCAmelCase_ =self.config.tgt_vocab_size if isinstance(self.config , _lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
" padding.." )
if self.args.label_smoothing == 0:
UpperCAmelCase_ =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase_ =label_smoothed_nll_loss
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
if self.optimizer is None:
UpperCAmelCase_ =["bias", "LayerNorm.weight"]
UpperCAmelCase_ =[
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
UpperCAmelCase_ =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase_ =Adafactor
UpperCAmelCase_ ={"scale_parameter": False, "relative_step": False}
else:
UpperCAmelCase_ =AdamW
UpperCAmelCase_ ={
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
UpperCAmelCase_ =self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase_ =OSS(
params=_lowerCAmelCase , optim=_lowerCAmelCase , **_lowerCAmelCase , )
else:
UpperCAmelCase_ =optimizer_cls(_lowerCAmelCase , **_lowerCAmelCase )
if self.lr_scheduler is None:
UpperCAmelCase_ =self._get_lr_scheduler(_lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase_ =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase_ =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase_ =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_lowerCAmelCase )
return scheduler
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[torch.utils.data.Sampler]:
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int] ) -> str:
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase_ =model(**_lowerCAmelCase , use_cache=_lowerCAmelCase )[0]
UpperCAmelCase_ =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase_ , UpperCAmelCase_ =model(**_lowerCAmelCase , labels=_lowerCAmelCase , use_cache=_lowerCAmelCase )[:2]
else:
# compute label smoothed loss
UpperCAmelCase_ =model(**_lowerCAmelCase , use_cache=_lowerCAmelCase )[0]
UpperCAmelCase_ =torch.nn.functional.log_softmax(_lowerCAmelCase , dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ =self.loss_fn(_lowerCAmelCase , _lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =inputs.pop("labels" )
UpperCAmelCase_ , UpperCAmelCase_ =self._compute_loss(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return loss
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: nn.Module , _lowerCAmelCase: Dict[str, Union[torch.Tensor, Any]] , _lowerCAmelCase: bool , _lowerCAmelCase: Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
'''simple docstring'''
UpperCAmelCase_ =self._prepare_inputs(_lowerCAmelCase )
UpperCAmelCase_ ={
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase_ =self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **_lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ =self._pad_tensors_to_max_len(_lowerCAmelCase , gen_kwargs["max_length"] )
UpperCAmelCase_ =inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase_ , UpperCAmelCase_ =self._compute_loss(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase_ =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase_ =self._pad_tensors_to_max_len(_lowerCAmelCase , gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F' padded to `max_length`={max_length}' )
UpperCAmelCase_ =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase_ =tensor
return padded_tensor
| 54 |
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : List[Any] =logging.get_logger(__name__)
__lowercase : List[str] ={
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class A ( __lowercase ):
_snake_case ='''roberta'''
def __init__( self: int , _lowerCAmelCase: Optional[Any]=5_0265 , _lowerCAmelCase: Dict=768 , _lowerCAmelCase: str=12 , _lowerCAmelCase: Union[str, Any]=12 , _lowerCAmelCase: Dict=3072 , _lowerCAmelCase: Optional[int]="gelu" , _lowerCAmelCase: Optional[int]=0.1 , _lowerCAmelCase: str=0.1 , _lowerCAmelCase: str=512 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Dict=0.02 , _lowerCAmelCase: Optional[Any]=1e-12 , _lowerCAmelCase: Dict=1 , _lowerCAmelCase: Union[str, Any]=0 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str="absolute" , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: int=None , **_lowerCAmelCase: Optional[Any] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =position_embedding_type
UpperCAmelCase_ =use_cache
UpperCAmelCase_ =classifier_dropout
class A ( __lowercase ):
@property
def lowerCAmelCase__ ( self: Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 54 |
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 1 |
class A :
def __init__( self: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: int , _lowerCAmelCase: str ) -> str:
'''simple docstring'''
UpperCAmelCase_ =None
UpperCAmelCase_ =None
UpperCAmelCase_ =graph
self._normalize_graph(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =len(_lowerCAmelCase )
UpperCAmelCase_ =None
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Any , _lowerCAmelCase: Dict ) -> str:
'''simple docstring'''
if sources is int:
UpperCAmelCase_ =[sources]
if sinks is int:
UpperCAmelCase_ =[sinks]
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
return
UpperCAmelCase_ =sources[0]
UpperCAmelCase_ =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_lowerCAmelCase ) > 1 or len(_lowerCAmelCase ) > 1:
UpperCAmelCase_ =0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCAmelCase_ =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCAmelCase_ =max_input_flow
UpperCAmelCase_ =0
UpperCAmelCase_ =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCAmelCase_ =max_input_flow
UpperCAmelCase_ =size - 1
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =algorithm(self )
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =flow_network
UpperCAmelCase_ =flow_network.verticesCount
UpperCAmelCase_ =flow_network.sourceIndex
UpperCAmelCase_ =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCAmelCase_ =flow_network.graph
UpperCAmelCase_ =False
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
UpperCAmelCase_ =True
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
pass
class A ( __lowercase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Optional[int] ) -> List[str]:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
# use this to save your result
UpperCAmelCase_ =-1
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class A ( __lowercase ):
def __init__( self: str , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
UpperCAmelCase_ =[[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCAmelCase_ =[0] * self.verticies_count
UpperCAmelCase_ =[0] * self.verticies_count
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCAmelCase_ =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCAmelCase_ =0
while i < len(_lowerCAmelCase ):
UpperCAmelCase_ =vertices_list[i]
UpperCAmelCase_ =self.heights[vertex_index]
self.process_vertex(_lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_lowerCAmelCase ) )
UpperCAmelCase_ =0
else:
i += 1
UpperCAmelCase_ =sum(self.preflow[self.source_index] )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_lowerCAmelCase , _lowerCAmelCase )
self.relabel(_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCAmelCase_ =self.heights[to_index]
if min_height is not None:
UpperCAmelCase_ =min_height + 1
if __name__ == "__main__":
__lowercase : int =[0]
__lowercase : Optional[int] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowercase : str =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowercase : List[str] =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowercase : Optional[int] =flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 54 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self: Any , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any]=7 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: int=18 , _lowerCAmelCase: int=30 , _lowerCAmelCase: Union[str, Any]=400 , _lowerCAmelCase: str=True , _lowerCAmelCase: List[str]=None , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: Any=[0.5, 0.5, 0.5] , _lowerCAmelCase: Union[str, Any]=[0.5, 0.5, 0.5] , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =image_size
UpperCAmelCase_ =min_resolution
UpperCAmelCase_ =max_resolution
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean
UpperCAmelCase_ =image_std
def lowerCAmelCase__ ( self: int ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A ( __lowercase , unittest.TestCase ):
_snake_case =DPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self: Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ =DPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size" ) )
def lowerCAmelCase__ ( self: Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase_ =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ =prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ =image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 54 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase : List[Any] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: int , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: bool = True , **_lowerCAmelCase: Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_center_crop
UpperCAmelCase_ =crop_size
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ =image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ =do_convert_rgb
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=size["shortest_edge"] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[int] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> int:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: int = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase: Dict , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , param_name="size" , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ =crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , param_name="crop_size" , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ =[convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ =[self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowercase : Optional[Any] =numpy.array([0, 0])
__lowercase : str =numpy.array([0.5, 0.866_0254])
__lowercase : Optional[Any] =numpy.array([1, 0])
__lowercase : List[Any] =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =initial_vectors
for _ in range(lowercase__ ):
UpperCAmelCase_ =iteration_step(lowercase__ )
return vectors
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase_ =vectors[i + 1]
new_vectors.append(lowercase__ )
UpperCAmelCase_ =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =numpy.radians(lowercase__ )
UpperCAmelCase_ , UpperCAmelCase_ =numpy.cos(lowercase__ ), numpy.sin(lowercase__ )
UpperCAmelCase_ =numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase__ , lowercase__ )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase_ , UpperCAmelCase_ =zip(*lowercase__ )
plt.plot(lowercase__ , lowercase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : List[str] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 54 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =3_8_4
UpperCAmelCase_ =7
if "tiny" in model_name:
UpperCAmelCase_ =9_6
UpperCAmelCase_ =(2, 2, 6, 2)
UpperCAmelCase_ =(3, 6, 1_2, 2_4)
elif "small" in model_name:
UpperCAmelCase_ =9_6
UpperCAmelCase_ =(2, 2, 1_8, 2)
UpperCAmelCase_ =(3, 6, 1_2, 2_4)
elif "base" in model_name:
UpperCAmelCase_ =1_2_8
UpperCAmelCase_ =(2, 2, 1_8, 2)
UpperCAmelCase_ =(4, 8, 1_6, 3_2)
UpperCAmelCase_ =1_2
UpperCAmelCase_ =5_1_2
elif "large" in model_name:
UpperCAmelCase_ =1_9_2
UpperCAmelCase_ =(2, 2, 1_8, 2)
UpperCAmelCase_ =(6, 1_2, 2_4, 4_8)
UpperCAmelCase_ =1_2
UpperCAmelCase_ =7_6_8
# set label information
UpperCAmelCase_ =1_5_0
UpperCAmelCase_ ="huggingface/label-files"
UpperCAmelCase_ ="ade20k-id2label.json"
UpperCAmelCase_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ ={int(lowercase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ ={v: k for k, v in idalabel.items()}
UpperCAmelCase_ =SwinConfig(
embed_dim=lowercase__ , depths=lowercase__ , num_heads=lowercase__ , window_size=lowercase__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
UpperCAmelCase_ =UperNetConfig(
backbone_config=lowercase__ , auxiliary_in_channels=lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm1.weight', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm1.bias', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', F'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', F'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', F'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', F'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm2.weight', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm2.bias', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', F'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', F'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', F'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', F'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.stages.{i}.downsample.reduction.weight', F'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.stages.{i}.downsample.norm.weight', F'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.stages.{i}.downsample.norm.bias', F'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =dct.pop(lowercase__ )
UpperCAmelCase_ =val
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ =state_dict.pop(F'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
UpperCAmelCase_ =state_dict.pop(F'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ =in_proj_weight[:dim, :]
UpperCAmelCase_ =in_proj_bias[: dim]
UpperCAmelCase_ =in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ =in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ =in_proj_weight[
-dim :, :
]
UpperCAmelCase_ =in_proj_bias[-dim :]
# fmt: on
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =x.shape
UpperCAmelCase_ =x.reshape(lowercase__ , 4 , in_channel // 4 )
UpperCAmelCase_ =x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowercase__ , lowercase__ )
return x
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =x.shape
UpperCAmelCase_ =x.reshape(lowercase__ , in_channel // 4 , 4 )
UpperCAmelCase_ =x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowercase__ , lowercase__ )
return x
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =x.shape[0]
UpperCAmelCase_ =x.reshape(4 , in_channel // 4 )
UpperCAmelCase_ =x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowercase__ )
return x
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =x.shape[0]
UpperCAmelCase_ =x.reshape(in_channel // 4 , 4 )
UpperCAmelCase_ =x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowercase__ )
return x
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
UpperCAmelCase_ =model_name_to_url[model_name]
UpperCAmelCase_ =torch.hub.load_state_dict_from_url(lowercase__ , map_location="cpu" , file_name=lowercase__ )[
"state_dict"
]
for name, param in state_dict.items():
print(lowercase__ , param.shape )
UpperCAmelCase_ =get_upernet_config(lowercase__ )
UpperCAmelCase_ =UperNetForSemanticSegmentation(lowercase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase_ =state_dict.pop(lowercase__ )
if "bn" in key:
UpperCAmelCase_ =key.replace("bn" , "batch_norm" )
UpperCAmelCase_ =val
# rename keys
UpperCAmelCase_ =create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
UpperCAmelCase_ =reverse_correct_unfold_reduction_order(lowercase__ )
if "norm" in key:
UpperCAmelCase_ =reverse_correct_unfold_norm_order(lowercase__ )
model.load_state_dict(lowercase__ )
# verify on image
UpperCAmelCase_ ="https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
UpperCAmelCase_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("RGB" )
UpperCAmelCase_ =SegformerImageProcessor()
UpperCAmelCase_ =processor(lowercase__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
UpperCAmelCase_ =model(lowercase__ )
UpperCAmelCase_ =outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
UpperCAmelCase_ =torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
UpperCAmelCase_ =torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
UpperCAmelCase_ =torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
UpperCAmelCase_ =torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase__ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
__lowercase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase : Dict =parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 54 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __lowercase ):
_snake_case =42
_snake_case =42
def __init__( self: Tuple , _lowerCAmelCase: UNetaDModel , _lowerCAmelCase: ScoreSdeVeScheduler ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self: int , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 2000 , _lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase: Optional[str] = "pil" , _lowerCAmelCase: bool = True , **_lowerCAmelCase: Tuple , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase_ =self.unet.config.sample_size
UpperCAmelCase_ =(batch_size, 3, img_size, img_size)
UpperCAmelCase_ =self.unet
UpperCAmelCase_ =randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase ) * self.scheduler.init_noise_sigma
UpperCAmelCase_ =sample.to(self.device )
self.scheduler.set_timesteps(_lowerCAmelCase )
self.scheduler.set_sigmas(_lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase_ =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCAmelCase_ =self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
UpperCAmelCase_ =self.scheduler.step_correct(_lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
# prediction step
UpperCAmelCase_ =model(_lowerCAmelCase , _lowerCAmelCase ).sample
UpperCAmelCase_ =self.scheduler.step_pred(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =output.prev_sample, output.prev_sample_mean
UpperCAmelCase_ =sample_mean.clamp(0 , 1 )
UpperCAmelCase_ =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ =self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 54 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__lowercase : Optional[Any] ="""src/diffusers"""
# Matches is_xxx_available()
__lowercase : int =re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
__lowercase : Any =re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
__lowercase : Dict ="""
{0} = None
"""
__lowercase : Dict ="""
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
__lowercase : str ="""
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =_re_backend.findall(lowercase__ )
if len(lowercase__ ) == 0:
return None
return "_and_".join(lowercase__ )
def a__ ( ):
'''simple docstring'''
with open(os.path.join(lowercase__ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase_ =0
UpperCAmelCase_ ={}
# Go through the end of the file
while line_index < len(lowercase__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(lowercase__ ) and len(lines[line_index] ) > 1:
UpperCAmelCase_ =lines[line_index]
UpperCAmelCase_ =_re_single_line_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase__ ) > 0:
UpperCAmelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(lowercase__ )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase__ , lowercase__ )
else:
return DUMMY_CLASS.format(lowercase__ , lowercase__ )
def a__ ( lowercase__=None ):
'''simple docstring'''
if backend_specific_objects is None:
UpperCAmelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase_ ={}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase_ ="[" + ", ".join(F'"{b}"' for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase_ ="# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase__ , lowercase__ ) for o in objects] )
UpperCAmelCase_ =dummy_file
return dummy_files
def a__ ( lowercase__=False ):
'''simple docstring'''
UpperCAmelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase_ ={"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase_ =os.path.join(lowercase__ , "utils" )
UpperCAmelCase_ ={
backend: os.path.join(lowercase__ , F'dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCAmelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase__ ):
with open(lowercase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ =f.read()
else:
UpperCAmelCase_ =""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py as the main '
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'diffusers.utils.dummy_{short_names.get(lowercase__ , lowercase__ )}_objects.py. Run `make fix-copies` '
"to fix this." )
if __name__ == "__main__":
__lowercase : Any =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowercase : Optional[Any] =parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 54 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 | 1 |
from __future__ import annotations
class A :
def __init__( self: Dict , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
UpperCAmelCase_ =order
# a_{0} ... a_{k}
UpperCAmelCase_ =[1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase_ =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase_ =[0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase_ =[0.0] * self.order
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: list[float] , _lowerCAmelCase: list[float] ) -> None:
'''simple docstring'''
if len(_lowerCAmelCase ) < self.order:
UpperCAmelCase_ =[1.0, *a_coeffs]
if len(_lowerCAmelCase ) != self.order + 1:
UpperCAmelCase_ =(
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(_lowerCAmelCase )}'
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != self.order + 1:
UpperCAmelCase_ =(
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(_lowerCAmelCase )}'
)
raise ValueError(_lowerCAmelCase )
UpperCAmelCase_ =a_coeffs
UpperCAmelCase_ =b_coeffs
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: float ) -> float:
'''simple docstring'''
UpperCAmelCase_ =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase_ =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase_ =self.input_history[:-1]
UpperCAmelCase_ =self.output_history[:-1]
UpperCAmelCase_ =sample
UpperCAmelCase_ =result
return result
| 54 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ =FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ =0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
UpperCAmelCase_ =time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ="a" * 1_0_0_0 + ".lock"
UpperCAmelCase_ =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
UpperCAmelCase_ =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 )
| 54 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: int , _lowerCAmelCase: int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =jnp.ones((batch_size, length) ) / length
return scores
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =None
UpperCAmelCase_ =20
UpperCAmelCase_ =self._get_uniform_logits(batch_size=2 , length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
UpperCAmelCase_ =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase_ =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase_ =jax.nn.softmax(_lowerCAmelCase , axis=-1 )
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase_ =jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
UpperCAmelCase_ =jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =None
UpperCAmelCase_ =10
UpperCAmelCase_ =2
# create ramp distribution
UpperCAmelCase_ =np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase_ =ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase_ =FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ =top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase_ =5
UpperCAmelCase_ =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase_ =np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase_ =top_k_warp_safety_check(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCAmelCase__ ( self: Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =None
UpperCAmelCase_ =10
UpperCAmelCase_ =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase_ =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase_ =np.exp(top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase_ =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase_ =np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase_ =ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase_ =top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =20
UpperCAmelCase_ =4
UpperCAmelCase_ =0
UpperCAmelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
UpperCAmelCase_ =ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase_ =5
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =15
UpperCAmelCase_ =min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =20
UpperCAmelCase_ =4
UpperCAmelCase_ =0
UpperCAmelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase_ =ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase_ =1
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase_ =3
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =20
UpperCAmelCase_ =4
UpperCAmelCase_ =0
UpperCAmelCase_ =5
UpperCAmelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase_ =ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase_ =4
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase_ =3
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =4
UpperCAmelCase_ =10
UpperCAmelCase_ =15
UpperCAmelCase_ =2
UpperCAmelCase_ =1
UpperCAmelCase_ =15
# dummy input_ids and scores
UpperCAmelCase_ =ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
UpperCAmelCase_ =input_ids.copy()
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =scores.copy()
# instantiate all dist processors
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ =FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =10
# no processor list
UpperCAmelCase_ =temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# with processor list
UpperCAmelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase_ =processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =4
UpperCAmelCase_ =10
UpperCAmelCase_ =15
UpperCAmelCase_ =2
UpperCAmelCase_ =1
UpperCAmelCase_ =15
# dummy input_ids and scores
UpperCAmelCase_ =ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
UpperCAmelCase_ =input_ids.copy()
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =scores.copy()
# instantiate all dist processors
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ =FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =10
# no processor list
def run_no_processor_list(_lowerCAmelCase: int , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Dict ):
UpperCAmelCase_ =temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[int] ):
UpperCAmelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase_ =processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
UpperCAmelCase_ =jax.jit(_lowerCAmelCase )
UpperCAmelCase_ =jax.jit(_lowerCAmelCase )
UpperCAmelCase_ =jitted_run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =jitted_run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 54 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54 | 1 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : int =logging.get_logger(__name__)
__lowercase : Optional[Any] ={
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class A ( __lowercase ):
_snake_case ='''align_text_model'''
def __init__( self: Optional[Any] , _lowerCAmelCase: Optional[Any]=3_0522 , _lowerCAmelCase: int=768 , _lowerCAmelCase: List[str]=12 , _lowerCAmelCase: Dict=12 , _lowerCAmelCase: Optional[Any]=3072 , _lowerCAmelCase: Tuple="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=0.1 , _lowerCAmelCase: Dict=512 , _lowerCAmelCase: Optional[int]=2 , _lowerCAmelCase: Any=0.02 , _lowerCAmelCase: Union[str, Any]=1e-12 , _lowerCAmelCase: Tuple=0 , _lowerCAmelCase: Tuple="absolute" , _lowerCAmelCase: int=True , **_lowerCAmelCase: Tuple , ) -> Dict:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =position_embedding_type
UpperCAmelCase_ =use_cache
UpperCAmelCase_ =pad_token_id
@classmethod
def lowerCAmelCase__ ( cls: str , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: Any ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
UpperCAmelCase_ =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class A ( __lowercase ):
_snake_case ='''align_vision_model'''
def __init__( self: int , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 600 , _lowerCAmelCase: float = 2.0 , _lowerCAmelCase: float = 3.1 , _lowerCAmelCase: int = 8 , _lowerCAmelCase: List[int] = [3, 3, 5, 3, 5, 5, 3] , _lowerCAmelCase: List[int] = [32, 16, 24, 40, 80, 112, 192] , _lowerCAmelCase: List[int] = [16, 24, 40, 80, 112, 192, 320] , _lowerCAmelCase: List[int] = [] , _lowerCAmelCase: List[int] = [1, 2, 2, 2, 1, 2, 1] , _lowerCAmelCase: List[int] = [1, 2, 2, 3, 3, 4, 1] , _lowerCAmelCase: List[int] = [1, 6, 6, 6, 6, 6, 6] , _lowerCAmelCase: float = 0.25 , _lowerCAmelCase: str = "swish" , _lowerCAmelCase: int = 2560 , _lowerCAmelCase: str = "mean" , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: float = 0.0_01 , _lowerCAmelCase: float = 0.99 , _lowerCAmelCase: float = 0.2 , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =image_size
UpperCAmelCase_ =width_coefficient
UpperCAmelCase_ =depth_coefficient
UpperCAmelCase_ =depth_divisor
UpperCAmelCase_ =kernel_sizes
UpperCAmelCase_ =in_channels
UpperCAmelCase_ =out_channels
UpperCAmelCase_ =depthwise_padding
UpperCAmelCase_ =strides
UpperCAmelCase_ =num_block_repeats
UpperCAmelCase_ =expand_ratios
UpperCAmelCase_ =squeeze_expansion_ratio
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dim
UpperCAmelCase_ =pooling_type
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =batch_norm_eps
UpperCAmelCase_ =batch_norm_momentum
UpperCAmelCase_ =drop_connect_rate
UpperCAmelCase_ =sum(_lowerCAmelCase ) * 4
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: Optional[int] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
UpperCAmelCase_ =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class A ( __lowercase ):
_snake_case ='''align'''
_snake_case =True
def __init__( self: str , _lowerCAmelCase: Optional[Any]=None , _lowerCAmelCase: List[str]=None , _lowerCAmelCase: List[Any]=640 , _lowerCAmelCase: Any=1.0 , _lowerCAmelCase: Tuple=0.02 , **_lowerCAmelCase: List[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
if text_config is None:
UpperCAmelCase_ ={}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ ={}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
UpperCAmelCase_ =AlignTextConfig(**_lowerCAmelCase )
UpperCAmelCase_ =AlignVisionConfig(**_lowerCAmelCase )
UpperCAmelCase_ =projection_dim
UpperCAmelCase_ =temperature_init_value
UpperCAmelCase_ =initializer_range
@classmethod
def lowerCAmelCase__ ( cls: Any , _lowerCAmelCase: AlignTextConfig , _lowerCAmelCase: AlignVisionConfig , **_lowerCAmelCase: Optional[Any] ) -> List[str]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =copy.deepcopy(self.__dict__ )
UpperCAmelCase_ =self.text_config.to_dict()
UpperCAmelCase_ =self.vision_config.to_dict()
UpperCAmelCase_ =self.__class__.model_type
return output
| 54 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54 | 1 |
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =""
for i in table:
res += inp[i - 1]
return res
def a__ ( lowercase__ ):
'''simple docstring'''
return data[1:] + data[0]
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =""
for i in range(len(lowercase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int("0b" + data[0] + data[-1] , 2 )
UpperCAmelCase_ =int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =message[:4]
UpperCAmelCase_ =message[4:]
UpperCAmelCase_ =apply_table(lowercase__ , lowercase__ )
UpperCAmelCase_ =xor(lowercase__ , lowercase__ )
UpperCAmelCase_ =apply_sbox(lowercase__ , temp[:4] ) # noqa: E741
UpperCAmelCase_ =apply_sbox(lowercase__ , temp[4:] )
UpperCAmelCase_ ="0" * (2 - len(lowercase__ )) + l # noqa: E741
UpperCAmelCase_ ="0" * (2 - len(lowercase__ )) + r
UpperCAmelCase_ =apply_table(l + r , lowercase__ )
UpperCAmelCase_ =xor(lowercase__ , lowercase__ )
return temp + right
if __name__ == "__main__":
__lowercase : Union[str, Any] =input("""Enter 10 bit key: """)
__lowercase : Optional[Any] =input("""Enter 8 bit message: """)
__lowercase : str =[6, 3, 7, 4, 8, 5, 10, 9]
__lowercase : List[Any] =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__lowercase : Dict =[2, 4, 3, 1]
__lowercase : Optional[Any] =[2, 6, 3, 1, 4, 8, 5, 7]
__lowercase : int =[4, 1, 3, 5, 7, 2, 8, 6]
__lowercase : Any =[4, 1, 2, 3, 2, 3, 4, 1]
__lowercase : int =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__lowercase : List[str] =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__lowercase : List[str] =apply_table(key, paa_table)
__lowercase : Dict =temp[:5]
__lowercase : List[Any] =temp[5:]
__lowercase : Union[str, Any] =left_shift(left)
__lowercase : Optional[Any] =left_shift(right)
__lowercase : Tuple =apply_table(left + right, pa_table)
__lowercase : List[str] =left_shift(left)
__lowercase : List[str] =left_shift(right)
__lowercase : int =left_shift(left)
__lowercase : Any =left_shift(right)
__lowercase : Tuple =apply_table(left + right, pa_table)
# encryption
__lowercase : Union[str, Any] =apply_table(message, IP)
__lowercase : Dict =function(expansion, sa, sa, keya, temp)
__lowercase : Dict =temp[4:] + temp[:4]
__lowercase : str =function(expansion, sa, sa, keya, temp)
__lowercase : Tuple =apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__lowercase : List[str] =apply_table(CT, IP)
__lowercase : str =function(expansion, sa, sa, keya, temp)
__lowercase : int =temp[4:] + temp[:4]
__lowercase : int =function(expansion, sa, sa, keya, temp)
__lowercase : Optional[int] =apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 54 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowercase : str =logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase : Dict ="""
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def a__ ( lowercase__ , lowercase__ , lowercase__=8 ):
'''simple docstring'''
UpperCAmelCase_ =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A ( __lowercase ):
def __init__( self: Optional[int] , _lowerCAmelCase: UNetaDConditionModel , _lowerCAmelCase: DDPMScheduler , _lowerCAmelCase: VQModel , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , movq=_lowerCAmelCase , )
UpperCAmelCase_ =2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Any ) -> str:
'''simple docstring'''
if latents is None:
UpperCAmelCase_ =randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
UpperCAmelCase_ =latents.to(_lowerCAmelCase )
UpperCAmelCase_ =latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Dict=0 ) -> int:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ =torch.device(F'cuda:{gpu_id}' )
UpperCAmelCase_ =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Dict=0 ) -> Tuple:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ =torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ =None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ =cpu_offload_with_hook(_lowerCAmelCase , _lowerCAmelCase , prev_module_hook=_lowerCAmelCase )
# We'll offload the last model manually.
UpperCAmelCase_ =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self: List[str] , _lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: int = 512 , _lowerCAmelCase: int = 512 , _lowerCAmelCase: int = 100 , _lowerCAmelCase: float = 4.0 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: Optional[str] = "pil" , _lowerCAmelCase: bool = True , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self._execution_device
UpperCAmelCase_ =guidance_scale > 1.0
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=0 )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=0 )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=0 )
UpperCAmelCase_ =image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase_ =image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
UpperCAmelCase_ =negative_image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
UpperCAmelCase_ =hint.repeat_interleave(_lowerCAmelCase , dim=0 )
UpperCAmelCase_ =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
UpperCAmelCase_ =torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
UpperCAmelCase_ =self.scheduler.timesteps
UpperCAmelCase_ =self.movq.config.latent_channels
UpperCAmelCase_ , UpperCAmelCase_ =downscale_height_and_width(_lowerCAmelCase , _lowerCAmelCase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ ={"image_embeds": image_embeds, "hint": hint}
UpperCAmelCase_ =self.unet(
sample=_lowerCAmelCase , timestep=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , added_cond_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ =noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ =noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ =variance_pred.chunk(2 )
UpperCAmelCase_ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ =self.scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase , )[0]
# post-processing
UpperCAmelCase_ =self.movq.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
UpperCAmelCase_ =image * 0.5 + 0.5
UpperCAmelCase_ =image.clamp(0 , 1 )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ =self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 54 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class A ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_snake_case =field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_snake_case =Features({'''text''': Value('''string''' )} )
_snake_case =Features({'''summary''': Value('''string''' )} )
_snake_case ="text"
_snake_case ="summary"
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 54 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_uncond_unet
UpperCAmelCase_ =KarrasVeScheduler()
UpperCAmelCase_ =KarrasVePipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.manual_seed(0 )
UpperCAmelCase_ =pipe(num_inference_steps=2 , generator=_lowerCAmelCase , output_type="numpy" ).images
UpperCAmelCase_ =torch.manual_seed(0 )
UpperCAmelCase_ =pipe(num_inference_steps=2 , generator=_lowerCAmelCase , output_type="numpy" , return_dict=_lowerCAmelCase )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ ="google/ncsnpp-celebahq-256"
UpperCAmelCase_ =UNetaDModel.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =KarrasVeScheduler()
UpperCAmelCase_ =KarrasVePipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.manual_seed(0 )
UpperCAmelCase_ =pipe(num_inference_steps=20 , generator=_lowerCAmelCase , output_type="numpy" ).images
UpperCAmelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ =np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 54 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54 | 1 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54 | 1 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =split_dict._to_yaml_list()
assert len(lowercase__ ) == len(lowercase__ )
UpperCAmelCase_ =SplitDict._from_yaml_list(lowercase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase_ =None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase_ =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=lowercase__ ), SplitInfo(dataset_name="my_dataset" )] )
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 54 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowercase : Optional[int] =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Path , _lowerCAmelCase: Union[str, None] = None , _lowerCAmelCase: Union[List[str], None] = None , _lowerCAmelCase: Union[str, List[str], None] = None , _lowerCAmelCase: bool = True , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =[file for file in os.listdir(_lowerCAmelCase ) if os.path.isfile(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )]
if identifier is not None:
UpperCAmelCase_ =[file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for n_ in n_identifier:
UpperCAmelCase_ =[file for file in files if n_ not in file]
else:
UpperCAmelCase_ =[file for file in files if n_identifier not in file]
UpperCAmelCase_ =ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ =[file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , _lowerCAmelCase )
if only_modules:
UpperCAmelCase_ =file.split("." )[0]
try:
UpperCAmelCase_ =getattr(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =doctest.DocTestSuite(_lowerCAmelCase )
UpperCAmelCase_ =unittest.TextTestRunner().run(_lowerCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
UpperCAmelCase_ =doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ ="modeling"
UpperCAmelCase_ =[
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase , ignore_files=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ ="tokenization"
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ ="configuration"
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =Path("src/transformers" )
UpperCAmelCase_ =["configuration", "modeling", "tokenization"]
self.analyze_directory(_lowerCAmelCase , n_identifier=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =Path("docs/source" )
UpperCAmelCase_ =["favicon.ico"]
self.analyze_directory(_lowerCAmelCase , ignore_files=_lowerCAmelCase , only_modules=_lowerCAmelCase )
| 54 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 | 1 |
import math
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if (
not isinstance(lowercase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if (
not isinstance(lowercase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A :
def __init__( self: int , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple=13 , _lowerCAmelCase: str=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Any=False , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Optional[Any]=99 , _lowerCAmelCase: Union[str, Any]=32 , _lowerCAmelCase: List[str]=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Optional[Any]=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: Any=512 , _lowerCAmelCase: str=16 , _lowerCAmelCase: Union[str, Any]=2 , _lowerCAmelCase: Any=0.02 , _lowerCAmelCase: str=3 , _lowerCAmelCase: Any=4 , _lowerCAmelCase: List[Any]=None , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_input_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_labels
UpperCAmelCase_ =num_choices
UpperCAmelCase_ =scope
def lowerCAmelCase__ ( self: List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_input_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =None
UpperCAmelCase_ =None
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Any , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str] , _lowerCAmelCase: Any , _lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =LlamaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Any , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple , _lowerCAmelCase: int , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =True
UpperCAmelCase_ =LlamaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
UpperCAmelCase_ =model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
UpperCAmelCase_ =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: str , _lowerCAmelCase: Dict , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: Any , ) -> str:
'''simple docstring'''
UpperCAmelCase_ =True
UpperCAmelCase_ =True
UpperCAmelCase_ =LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
UpperCAmelCase_ =model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
UpperCAmelCase_ =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ =ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ =torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ =torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ =model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )["hidden_states"][0]
UpperCAmelCase_ =model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )["hidden_states"][0]
# select random slice
UpperCAmelCase_ =ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ =output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_snake_case =(LlamaForCausalLM,) if is_torch_available() else ()
_snake_case =(
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =LlamaModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[int] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ =type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ =3
UpperCAmelCase_ =input_dict["input_ids"]
UpperCAmelCase_ =input_ids.ne(1 ).to(_lowerCAmelCase )
UpperCAmelCase_ =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ =LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ =3
UpperCAmelCase_ ="single_label_classification"
UpperCAmelCase_ =input_dict["input_ids"]
UpperCAmelCase_ =input_ids.ne(1 ).to(_lowerCAmelCase )
UpperCAmelCase_ =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ =LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ =3
UpperCAmelCase_ ="multi_label_classification"
UpperCAmelCase_ =input_dict["input_ids"]
UpperCAmelCase_ =input_ids.ne(1 ).to(_lowerCAmelCase )
UpperCAmelCase_ =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ =LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ =ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_ =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ =LlamaModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
UpperCAmelCase_ =original_model(_lowerCAmelCase ).last_hidden_state
UpperCAmelCase_ =original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ ={"type": scaling_type, "factor": 10.0}
UpperCAmelCase_ =LlamaModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
UpperCAmelCase_ =scaled_model(_lowerCAmelCase ).last_hidden_state
UpperCAmelCase_ =scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-5 ) )
@require_torch
class A ( unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =[1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
UpperCAmelCase_ =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCAmelCase_ =torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ =torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =[1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
UpperCAmelCase_ =model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
UpperCAmelCase_ =torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ =torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =[1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
UpperCAmelCase_ =model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
UpperCAmelCase_ =torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ =torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def lowerCAmelCase__ ( self: Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ =[1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ =LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
UpperCAmelCase_ =model(torch.tensor(_lowerCAmelCase ) )
UpperCAmelCase_ =torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
UpperCAmelCase_ =torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Model is curently gated" )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
UpperCAmelCase_ ="Simply put, the theory of relativity states that "
UpperCAmelCase_ =LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , return_tensors="pt" )
UpperCAmelCase_ =LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=_lowerCAmelCase )
# greedy generation outputs
UpperCAmelCase_ =model.generate(_lowerCAmelCase , max_new_tokens=64 , top_p=_lowerCAmelCase , temperature=1 , do_sample=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 54 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] =logging.get_logger(__name__)
__lowercase : Optional[Any] ={
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class A ( __lowercase ):
_snake_case ='''xlm-prophetnet'''
_snake_case =['''past_key_values''']
_snake_case ={
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self: int , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[Union[str, Callable]] = "gelu" , _lowerCAmelCase: Optional[int] = 3_0522 , _lowerCAmelCase: Optional[int] = 1024 , _lowerCAmelCase: Optional[int] = 4096 , _lowerCAmelCase: Optional[int] = 12 , _lowerCAmelCase: Optional[int] = 16 , _lowerCAmelCase: Optional[int] = 4096 , _lowerCAmelCase: Optional[int] = 12 , _lowerCAmelCase: Optional[int] = 16 , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[float] = 0.1 , _lowerCAmelCase: Optional[int] = 512 , _lowerCAmelCase: Optional[float] = 0.02 , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[int] = 0 , _lowerCAmelCase: Optional[int] = 2 , _lowerCAmelCase: Optional[int] = 32 , _lowerCAmelCase: Optional[int] = 128 , _lowerCAmelCase: Optional[bool] = False , _lowerCAmelCase: Optional[float] = 0.0 , _lowerCAmelCase: Optional[bool] = True , _lowerCAmelCase: Optional[int] = 0 , _lowerCAmelCase: Optional[int] = 1 , _lowerCAmelCase: Optional[int] = 2 , **_lowerCAmelCase: str , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =encoder_ffn_dim
UpperCAmelCase_ =num_encoder_layers
UpperCAmelCase_ =num_encoder_attention_heads
UpperCAmelCase_ =decoder_ffn_dim
UpperCAmelCase_ =num_decoder_layers
UpperCAmelCase_ =num_decoder_attention_heads
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =init_std # Normal(0, this parameter)
UpperCAmelCase_ =activation_function
# parameters for xlmprophetnet
UpperCAmelCase_ =ngram
UpperCAmelCase_ =num_buckets
UpperCAmelCase_ =relative_max_distance
UpperCAmelCase_ =disable_ngram_loss
UpperCAmelCase_ =eps
# 3 Types of Dropout
UpperCAmelCase_ =attention_dropout
UpperCAmelCase_ =activation_dropout
UpperCAmelCase_ =dropout
UpperCAmelCase_ =use_cache
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
@property
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 54 |
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: Tuple , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: PILImageResampling = PIL.Image.BICUBIC , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"height": 256, "width": 256}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase )
UpperCAmelCase_ =crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_center_crop
UpperCAmelCase_ =crop_size
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: PILImageResampling = PIL.Image.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
_lowerCAmelCase , size=(size["height"], size["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: int , ) -> Any:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: List[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: str=None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[int] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase )
UpperCAmelCase_ =crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ =[self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 |
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 1 |
from __future__ import annotations
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0.00
UpperCAmelCase_ =0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase_ =F'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0.00
UpperCAmelCase_ =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase_ =F'Resistor at index {index} has a negative value!'
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowercase : Optional[Any] =TypeVar("""T""")
class A ( Generic[T] ):
def __init__( self: List[Any] , _lowerCAmelCase: T ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =data
UpperCAmelCase_ =None
def __str__( self: str ) -> str:
'''simple docstring'''
return F'{self.data}'
class A ( Generic[T] ):
def __init__( self: Optional[int] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =None
def __iter__( self: Optional[Any] ) -> Iterator[T]:
'''simple docstring'''
UpperCAmelCase_ =self.top
while node:
yield node.data
UpperCAmelCase_ =node.next
def __str__( self: Union[str, Any] ) -> str:
'''simple docstring'''
return "->".join([str(_lowerCAmelCase ) for item in self] )
def __len__( self: Union[str, Any] ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self: List[Any] ) -> bool:
'''simple docstring'''
return self.top is None
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: T ) -> None:
'''simple docstring'''
UpperCAmelCase_ =Node(_lowerCAmelCase )
if not self.is_empty():
UpperCAmelCase_ =self.top
UpperCAmelCase_ =node
def lowerCAmelCase__ ( self: List[str] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , _lowerCAmelCase )
UpperCAmelCase_ =self.top
UpperCAmelCase_ =self.top.next
return pop_node.data
def lowerCAmelCase__ ( self: Any ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self: Tuple ) -> None:
'''simple docstring'''
UpperCAmelCase_ =None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 54 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
__lowercase : List[str] =tuple[int, int]
class A :
def __init__( self: Any , _lowerCAmelCase: set[int] , _lowerCAmelCase: Mapping[EdgeT, int] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =vertices
UpperCAmelCase_ ={
(min(_lowerCAmelCase ), max(_lowerCAmelCase )): weight for edge, weight in edges.items()
}
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: EdgeT , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ =weight
def lowerCAmelCase__ ( self: str ) -> Graph:
'''simple docstring'''
UpperCAmelCase_ =Graph({min(self.vertices )} , {} )
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ =max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ =edge
UpperCAmelCase_ =weight
subgraph.add_edge(_lowerCAmelCase , _lowerCAmelCase )
return subgraph
def a__ ( lowercase__ = "p107_network.txt" ):
'''simple docstring'''
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , lowercase__ )
UpperCAmelCase_ ={}
UpperCAmelCase_ =42
UpperCAmelCase_ =42
UpperCAmelCase_ =42
with open(lowercase__ ) as f:
UpperCAmelCase_ =f.read().strip().split("\n" )
UpperCAmelCase_ =[line.split("," ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ =int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ =Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
UpperCAmelCase_ =graph.prims_algorithm()
UpperCAmelCase_ =sum(graph.edges.values() )
UpperCAmelCase_ =sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54 | 1 |
from __future__ import annotations
class A :
def __init__( self: Union[str, Any] , _lowerCAmelCase: int = 0 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =key
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: int ) -> list[str]:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCAmelCase ) ^ key ) for ch in content]
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: int ) -> list[str]:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCAmelCase ) ^ key ) for ch in content]
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: int = 0 ) -> str:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCAmelCase_ =""
for ch in content:
ans += chr(ord(_lowerCAmelCase ) ^ key )
return ans
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: int = 0 ) -> str:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCAmelCase_ =""
for ch in content:
ans += chr(ord(_lowerCAmelCase ) ^ key )
return ans
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: int = 0 ) -> bool:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
try:
with open(_lowerCAmelCase ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_lowerCAmelCase , _lowerCAmelCase ) )
except OSError:
return False
return True
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: str , _lowerCAmelCase: int ) -> bool:
'''simple docstring'''
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
try:
with open(_lowerCAmelCase ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_lowerCAmelCase , _lowerCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 54 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a__ ( *lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" ) as fh:
fcntl.flock(lowercase__ , fcntl.LOCK_EX )
try:
print(*lowercase__ )
finally:
fcntl.flock(lowercase__ , fcntl.LOCK_UN )
__lowercase : str =int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
__lowercase : Any =torch.device("""cuda""", local_rank)
__lowercase : Optional[Any] =socket.gethostname()
__lowercase : Tuple =f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase : Optional[Any] =dist.get_rank()
__lowercase : Any =dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 54 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 1 |
def a__ ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = 0 ):
'''simple docstring'''
UpperCAmelCase_ =right or len(lowercase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowercase__ , lowercase__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__lowercase : Optional[int] =random.Random()
def a__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase_ =global_rng
UpperCAmelCase_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A ( unittest.TestCase ):
def __init__( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[str]=7 , _lowerCAmelCase: Optional[Any]=400 , _lowerCAmelCase: Optional[Any]=2000 , _lowerCAmelCase: List[Any]=1 , _lowerCAmelCase: Optional[int]=0.0 , _lowerCAmelCase: Tuple=1_6000 , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: Union[str, Any]=80 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: List[Any]=64 , _lowerCAmelCase: Union[str, Any]="hann_window" , _lowerCAmelCase: Union[str, Any]=80 , _lowerCAmelCase: int=7600 , _lowerCAmelCase: Dict=1e-10 , _lowerCAmelCase: Optional[Any]=True , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =min_seq_length
UpperCAmelCase_ =max_seq_length
UpperCAmelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ =feature_size
UpperCAmelCase_ =padding_value
UpperCAmelCase_ =sampling_rate
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =num_mel_bins
UpperCAmelCase_ =hop_length
UpperCAmelCase_ =win_length
UpperCAmelCase_ =win_function
UpperCAmelCase_ =fmin
UpperCAmelCase_ =fmax
UpperCAmelCase_ =mel_floor
UpperCAmelCase_ =return_attention_mask
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Any=False , _lowerCAmelCase: Optional[Any]=False ) -> Any:
'''simple docstring'''
def _flatten(_lowerCAmelCase: Union[str, Any] ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
UpperCAmelCase_ =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase_ =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: List[str]=False , _lowerCAmelCase: str=False ) -> Optional[int]:
'''simple docstring'''
if equal_length:
UpperCAmelCase_ =[floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ =[
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class A ( __lowercase , unittest.TestCase ):
_snake_case =SpeechTaFeatureExtractor
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
UpperCAmelCase_ =SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Optional[int] ) -> Any:
'''simple docstring'''
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ =feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase_ =feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test batched
UpperCAmelCase_ =feat_extract(_lowerCAmelCase , return_tensors="np" ).input_values
UpperCAmelCase_ =feat_extract(_lowerCAmelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =["longest", "max_length", "do_not_pad"]
UpperCAmelCase_ =[None, 1600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase__ ( self: str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ =range(800 , 1400 , 200 )
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase_ =["longest", "max_length", "do_not_pad"]
UpperCAmelCase_ =[None, 1600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
UpperCAmelCase_ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1000 , padding="max_length" , return_tensors="np" )
UpperCAmelCase_ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1000 , padding="longest" , return_tensors="np" )
UpperCAmelCase_ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2000 , padding="longest" , return_tensors="np" )
UpperCAmelCase_ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ =np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase_ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ =feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase_ =feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase_ =feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase_ =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
UpperCAmelCase_ =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test batched
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_values
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_ =np.asarray(_lowerCAmelCase )
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_values
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ =feat_extract.model_input_names[0]
UpperCAmelCase_ =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
UpperCAmelCase_ =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
UpperCAmelCase_ =BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCAmelCase_ =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_ =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase__ ( self: Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ =feat_extract.model_input_names[0]
UpperCAmelCase_ =BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCAmelCase_ =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_ =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ =self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ =feat_extract.model_input_names[0]
UpperCAmelCase_ =BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_ =feat_extract.num_mel_bins # hack!
UpperCAmelCase_ =feat_extract.pad(_lowerCAmelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase_ =feat_extract.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.feat_extract_dict
UpperCAmelCase_ =True
UpperCAmelCase_ =self.feature_extraction_class(**_lowerCAmelCase )
UpperCAmelCase_ =self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ =[len(_lowerCAmelCase ) for x in speech_inputs]
UpperCAmelCase_ =feat_extract.model_input_names[0]
UpperCAmelCase_ =BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_ =feat_extract.num_mel_bins # hack!
UpperCAmelCase_ =feat_extract.pad(_lowerCAmelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.feat_extract_dict
UpperCAmelCase_ =True
UpperCAmelCase_ =self.feature_extraction_class(**_lowerCAmelCase )
UpperCAmelCase_ =self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_ =[len(_lowerCAmelCase ) for x in speech_inputs]
UpperCAmelCase_ =feat_extract.model_input_names[0]
UpperCAmelCase_ =BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_ =min(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract.num_mel_bins # hack!
UpperCAmelCase_ =feat_extract.pad(
_lowerCAmelCase , padding="max_length" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="np" )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Tuple ) -> str:
'''simple docstring'''
from datasets import load_dataset
UpperCAmelCase_ =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase_ =ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =torch.tensor(
[2.3_804e-03, 2.0_752e-03, 1.9_836e-03, 2.1_057e-03, 1.6_174e-03,
3.0_518e-04, 9.1_553e-05, 3.3_569e-04, 9.7_656e-04, 1.8_311e-03,
2.0_142e-03, 2.1_057e-03, 1.7_395e-03, 4.5_776e-04, -3.9_673e-04,
4.5_776e-04, 1.0_071e-03, 9.1_553e-05, 4.8_828e-04, 1.1_597e-03,
7.3_242e-04, 9.4_604e-04, 1.8_005e-03, 1.8_311e-03, 8.8_501e-04,
4.2_725e-04, 4.8_828e-04, 7.3_242e-04, 1.0_986e-03, 2.1_057e-03] )
# fmt: on
UpperCAmelCase_ =self._load_datasamples(1 )
UpperCAmelCase_ =SpeechTaFeatureExtractor()
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1e-6 ) )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =torch.tensor(
[-2.68_70, -3.01_04, -3.13_56, -3.53_52, -3.00_44, -3.03_53, -3.47_19, -3.67_77,
-3.15_20, -2.94_35, -2.65_53, -2.87_95, -2.99_44, -2.59_21, -3.02_79, -3.03_86,
-3.08_64, -3.12_91, -3.23_53, -2.74_44, -2.68_31, -2.72_87, -3.17_61, -3.15_71,
-3.27_26, -3.05_82, -3.10_07, -3.45_33, -3.46_95, -3.09_98] )
# fmt: on
UpperCAmelCase_ =self._load_datasamples(1 )
UpperCAmelCase_ =SpeechTaFeatureExtractor()
UpperCAmelCase_ =feature_extractor(audio_target=_lowerCAmelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1e-4 ) )
| 54 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__lowercase : Tuple =random.Random()
def a__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase_ =global_rng
UpperCAmelCase_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
def __init__( self: str , _lowerCAmelCase: str , _lowerCAmelCase: Tuple=7 , _lowerCAmelCase: Dict=400 , _lowerCAmelCase: List[Any]=2000 , _lowerCAmelCase: Any=2048 , _lowerCAmelCase: str=128 , _lowerCAmelCase: List[str]=1 , _lowerCAmelCase: Any=512 , _lowerCAmelCase: Union[str, Any]=30 , _lowerCAmelCase: Any=4_4100 , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =min_seq_length
UpperCAmelCase_ =max_seq_length
UpperCAmelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ =spectrogram_length
UpperCAmelCase_ =feature_size
UpperCAmelCase_ =num_audio_channels
UpperCAmelCase_ =hop_length
UpperCAmelCase_ =chunk_length
UpperCAmelCase_ =sampling_rate
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: str=False , _lowerCAmelCase: Optional[int]=False ) -> Any:
'''simple docstring'''
def _flatten(_lowerCAmelCase: Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
UpperCAmelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( __lowercase , unittest.TestCase ):
_snake_case =TvltFeatureExtractor
def lowerCAmelCase__ ( self: int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "spectrogram_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "feature_size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "num_audio_channels" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "hop_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "chunk_length" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "sampling_rate" ) )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ =feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract_first.to_dict()
UpperCAmelCase_ =feat_extract_second.to_dict()
UpperCAmelCase_ =dict_first.pop("mel_filters" )
UpperCAmelCase_ =dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ =os.path.join(_lowerCAmelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_lowerCAmelCase )
UpperCAmelCase_ =self.feature_extraction_class.from_json_file(_lowerCAmelCase )
UpperCAmelCase_ =feat_extract_first.to_dict()
UpperCAmelCase_ =feat_extract_second.to_dict()
UpperCAmelCase_ =dict_first.pop("mel_filters" )
UpperCAmelCase_ =dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_ =feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCAmelCase_ =feature_extractor(
_lowerCAmelCase , return_tensors="np" , sampling_rate=4_4100 , mask_audio=_lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_ =np.asarray(_lowerCAmelCase )
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCAmelCase_ =ds.sort("id" ).select(range(_lowerCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self._load_datasamples(1 )
UpperCAmelCase_ =TvltFeatureExtractor()
UpperCAmelCase_ =feature_extractor(_lowerCAmelCase , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
UpperCAmelCase_ =torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _lowerCAmelCase , atol=1e-4 ) )
| 54 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 1 |
class A :
def __init__( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ ={}
def lowerCAmelCase__ ( self: str ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , " -> " , " -> ".join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: int , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
UpperCAmelCase_ =[to_vertex]
def lowerCAmelCase__ ( self: Optional[Any] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: list ) -> None:
'''simple docstring'''
UpperCAmelCase_ =True
print(_lowerCAmelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
__lowercase : Dict =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 54 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54 | 1 |
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 | 1 |
def a__ ( lowercase__ = 1 , lowercase__ = 1_0_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =1
UpperCAmelCase_ =0
for divide_by_number in range(lowercase__ , digit + 1 ):
UpperCAmelCase_ =[]
UpperCAmelCase_ =numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase__ ):
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =divide_by_number
else:
has_been_divided.append(lowercase__ )
UpperCAmelCase_ =now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 1 |
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[0 for i in range(r + 1 )]
# nc0 = 1
UpperCAmelCase_ =1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCAmelCase_ =min(lowercase__ , lowercase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 54 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54 | 1 |
def a__ ( ):
'''simple docstring'''
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =1
UpperCAmelCase_ =2
while i * i <= n:
UpperCAmelCase_ =0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def a__ ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(lowercase__ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 54 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[Any] =logging.get_logger(__name__)
__lowercase : Dict ={
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class A ( __lowercase ):
_snake_case ='''altclip_text_model'''
def __init__( self: List[Any] , _lowerCAmelCase: Union[str, Any]=25_0002 , _lowerCAmelCase: List[str]=1024 , _lowerCAmelCase: Union[str, Any]=24 , _lowerCAmelCase: Dict=16 , _lowerCAmelCase: Optional[int]=4096 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: Union[str, Any]=0.1 , _lowerCAmelCase: Any=514 , _lowerCAmelCase: Tuple=1 , _lowerCAmelCase: Optional[Any]=0.02 , _lowerCAmelCase: int=0.02 , _lowerCAmelCase: Optional[Any]=1e-05 , _lowerCAmelCase: List[Any]=1 , _lowerCAmelCase: List[Any]=0 , _lowerCAmelCase: int=2 , _lowerCAmelCase: Dict="absolute" , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Any=768 , **_lowerCAmelCase: List[str] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =initializer_factor
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =position_embedding_type
UpperCAmelCase_ =use_cache
UpperCAmelCase_ =project_dim
class A ( __lowercase ):
_snake_case ='''altclip_vision_model'''
def __init__( self: Union[str, Any] , _lowerCAmelCase: Optional[int]=768 , _lowerCAmelCase: int=3072 , _lowerCAmelCase: int=512 , _lowerCAmelCase: List[Any]=12 , _lowerCAmelCase: Tuple=12 , _lowerCAmelCase: Any=3 , _lowerCAmelCase: Optional[Any]=224 , _lowerCAmelCase: Dict=32 , _lowerCAmelCase: Any="quick_gelu" , _lowerCAmelCase: Optional[Any]=1e-5 , _lowerCAmelCase: Optional[int]=0.0 , _lowerCAmelCase: int=0.02 , _lowerCAmelCase: Any=1.0 , **_lowerCAmelCase: Dict , ) -> List[str]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =projection_dim
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =initializer_factor
UpperCAmelCase_ =attention_dropout
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =hidden_act
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: str ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
UpperCAmelCase_ =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class A ( __lowercase ):
_snake_case ='''altclip'''
_snake_case =True
def __init__( self: Optional[int] , _lowerCAmelCase: str=None , _lowerCAmelCase: Dict=None , _lowerCAmelCase: int=768 , _lowerCAmelCase: Optional[Any]=2.65_92 , **_lowerCAmelCase: List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =kwargs.pop("text_config_dict" , _lowerCAmelCase )
UpperCAmelCase_ =kwargs.pop("vision_config_dict" , _lowerCAmelCase )
super().__init__(**_lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
UpperCAmelCase_ ={}
# This is the complete result when using `text_config_dict`.
UpperCAmelCase_ =AltCLIPTextConfig(**_lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
UpperCAmelCase_ =(
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
UpperCAmelCase_ =(
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(_lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
UpperCAmelCase_ ={}
# This is the complete result when using `vision_config_dict`.
UpperCAmelCase_ =AltCLIPVisionConfig(**_lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
UpperCAmelCase_ ={
str(_lowerCAmelCase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
UpperCAmelCase_ =(
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
UpperCAmelCase_ =(
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(_lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
UpperCAmelCase_ ={}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase_ ={}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
UpperCAmelCase_ =AltCLIPTextConfig(**_lowerCAmelCase )
UpperCAmelCase_ =AltCLIPVisionConfig(**_lowerCAmelCase )
UpperCAmelCase_ =projection_dim
UpperCAmelCase_ =logit_scale_init_value
UpperCAmelCase_ =1.0
@classmethod
def lowerCAmelCase__ ( cls: str , _lowerCAmelCase: AltCLIPTextConfig , _lowerCAmelCase: AltCLIPVisionConfig , **_lowerCAmelCase: int ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =copy.deepcopy(self.__dict__ )
UpperCAmelCase_ =self.text_config.to_dict()
UpperCAmelCase_ =self.vision_config.to_dict()
UpperCAmelCase_ =self.__class__.model_type
return output
| 54 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 | 1 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[int(lowercase__ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(lowercase__ ) == 4 and all(0 <= int(lowercase__ ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
__lowercase : str =input().strip()
__lowercase : Union[str, Any] ="""valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 54 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 1 |
from math import ceil
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =list(range(0 , lowercase__ ) )
UpperCAmelCase_ =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCAmelCase_ =[]
for i in device_map_blocks:
if device_map_blocks.count(lowercase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowercase__ )
# Missing blocks
UpperCAmelCase_ =[i for i in blocks if i not in device_map_blocks]
UpperCAmelCase_ =[i for i in device_map_blocks if i not in blocks]
if len(lowercase__ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(lowercase__ ) )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =list(range(lowercase__ ) )
UpperCAmelCase_ =int(ceil(n_layers / len(lowercase__ ) ) )
UpperCAmelCase_ =[layers[i : i + n_blocks] for i in range(0 , lowercase__ , lowercase__ )]
return dict(zip(lowercase__ , lowercase__ ) )
| 54 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A ( __lowercase ):
def __init__( self: int , *_lowerCAmelCase: List[Any] , _lowerCAmelCase: Any=None , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: Any ) -> List[str]:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =eval_examples
UpperCAmelCase_ =post_process_function
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Optional[Dataset] = None , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Optional[List[str]] = None , _lowerCAmelCase: str = "eval" , **_lowerCAmelCase: Any , ) -> Dict[str, float]:
'''simple docstring'''
UpperCAmelCase_ =gen_kwargs.copy()
UpperCAmelCase_ =(
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
UpperCAmelCase_ =(
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase_ =gen_kwargs
UpperCAmelCase_ =self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_ =self.get_eval_dataloader(_lowerCAmelCase )
UpperCAmelCase_ =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ =self.compute_metrics
UpperCAmelCase_ =None
UpperCAmelCase_ =time.time()
UpperCAmelCase_ =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ =eval_loop(
_lowerCAmelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
UpperCAmelCase_ =compute_metrics
UpperCAmelCase_ =self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_ =self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCAmelCase_ =metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase_ =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_ =self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCAmelCase )
return metrics
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: str = "test" , **_lowerCAmelCase: Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =gen_kwargs.copy()
UpperCAmelCase_ =self.get_test_dataloader(_lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_ =self.compute_metrics
UpperCAmelCase_ =None
UpperCAmelCase_ =time.time()
UpperCAmelCase_ =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_ =eval_loop(
_lowerCAmelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
UpperCAmelCase_ =compute_metrics
UpperCAmelCase_ =self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_ =self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , "predict" )
UpperCAmelCase_ =self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
UpperCAmelCase_ =metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCAmelCase )
| 54 |
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A ( nn.Module ):
def __init__( self: Dict , _lowerCAmelCase: int = 16 , _lowerCAmelCase: int = 88 , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: int = 1 , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: int = 32 , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: str = "geglu" , _lowerCAmelCase: Optional[int] = None , ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_lowerCAmelCase , attention_head_dim=_lowerCAmelCase , in_channels=_lowerCAmelCase , num_layers=_lowerCAmelCase , dropout=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , cross_attention_dim=_lowerCAmelCase , attention_bias=_lowerCAmelCase , sample_size=_lowerCAmelCase , num_vector_embeds=_lowerCAmelCase , activation_fn=_lowerCAmelCase , num_embeds_ada_norm=_lowerCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ =[77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ =[1, 0]
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Optional[int]=None , _lowerCAmelCase: Dict=None , _lowerCAmelCase: bool = True , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =hidden_states
UpperCAmelCase_ =[]
UpperCAmelCase_ =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ =self.transformer_index_for_condition[i]
UpperCAmelCase_ =self.transformers[transformer_index](
_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , timestep=_lowerCAmelCase , cross_attention_kwargs=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_lowerCAmelCase )
| 54 |
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 1 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54 | 1 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase_ =_modexpt(lowercase__ , exponent // 2 , lowercase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase__ , exponent - 1 , lowercase__ )) % modulo_value
def a__ ( lowercase__ = 1_7_7_7 , lowercase__ = 1_8_5_5 , lowercase__ = 8 ):
'''simple docstring'''
UpperCAmelCase_ =base
for _ in range(1 , lowercase__ ):
UpperCAmelCase_ =_modexpt(lowercase__ , lowercase__ , 1_0**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
__lowercase : List[Any] =torch.device("""cpu""")
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def a__ ( lowercase__ ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =dct.pop(lowercase__ )
UpperCAmelCase_ =val
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
for k in state_dict.keys():
UpperCAmelCase_ =k
if ".pwconv" in k:
UpperCAmelCase_ =k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
UpperCAmelCase_ =k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
UpperCAmelCase_ =k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
UpperCAmelCase_ =k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCAmelCase_ =k_new.split("." )
if ls[2].isdigit():
UpperCAmelCase_ ="swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCAmelCase_ =k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ =1_0_0_0
UpperCAmelCase_ ="huggingface/label-files"
UpperCAmelCase_ ="imagenet-1k-id2label.json"
UpperCAmelCase_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ ={int(lowercase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ =idalabel
UpperCAmelCase_ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ =[3, 3, 6, 4]
UpperCAmelCase_ =[4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ =[3, 3, 9, 6]
UpperCAmelCase_ =[4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ =[4, 3, 1_0, 5]
UpperCAmelCase_ =[4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ =[4, 4, 1_2, 6]
UpperCAmelCase_ =[6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCAmelCase_ =torch.hub.load_state_dict_from_url(lowercase__ , map_location="cpu" , check_hash=lowercase__ )
else:
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
UpperCAmelCase_ =checkpoint
UpperCAmelCase_ =create_rename_keys(lowercase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
UpperCAmelCase_ =SwiftFormerForImageClassification(lowercase__ ).eval()
hf_model.load_state_dict(lowercase__ )
# prepare test inputs
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCAmelCase_ =processor(images=lowercase__ , return_tensors="pt" )
# compare outputs from both models
UpperCAmelCase_ =get_expected_output(lowercase__ )
UpperCAmelCase_ =hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase__ , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__lowercase : List[Any] =parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 54 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =model.config
UpperCAmelCase_ =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
UpperCAmelCase_ =MBartConfig(
is_decoder=lowercase__ , is_encoder_decoder=lowercase__ , add_cross_attention=lowercase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase__ , add_final_layer_norm=lowercase__ , )
return encoder_config, decoder_config
def a__ ( lowercase__ ):
'''simple docstring'''
if "encoder.model" in name:
UpperCAmelCase_ =name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
UpperCAmelCase_ =name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
UpperCAmelCase_ =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase_ =name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
UpperCAmelCase_ ="encoder." + name
if "attn.proj" in name:
UpperCAmelCase_ =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
UpperCAmelCase_ =name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase_ =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ =name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
UpperCAmelCase_ ="encoder.layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase_ ="encoder.layernorm.bias"
return name
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ =orig_state_dict.pop(lowercase__ )
if "qkv" in key:
UpperCAmelCase_ =key.split("." )
UpperCAmelCase_ =int(key_split[3] )
UpperCAmelCase_ =int(key_split[5] )
UpperCAmelCase_ =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ =val[:dim, :]
UpperCAmelCase_ =val[dim : dim * 2, :]
UpperCAmelCase_ =val[-dim:, :]
else:
UpperCAmelCase_ =val[:dim]
UpperCAmelCase_ =val[dim : dim * 2]
UpperCAmelCase_ =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCAmelCase_ =val
return orig_state_dict
def a__ ( lowercase__ , lowercase__=None , lowercase__=False ):
'''simple docstring'''
UpperCAmelCase_ =DonutModel.from_pretrained(lowercase__ ).eval()
# load HuggingFace model
UpperCAmelCase_ , UpperCAmelCase_ =get_configs(lowercase__ )
UpperCAmelCase_ =DonutSwinModel(lowercase__ )
UpperCAmelCase_ =MBartForCausalLM(lowercase__ )
UpperCAmelCase_ =VisionEncoderDecoderModel(encoder=lowercase__ , decoder=lowercase__ )
model.eval()
UpperCAmelCase_ =original_model.state_dict()
UpperCAmelCase_ =convert_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
# verify results on scanned document
UpperCAmelCase_ =load_dataset("hf-internal-testing/example-documents" )
UpperCAmelCase_ =dataset["test"][0]["image"].convert("RGB" )
UpperCAmelCase_ =XLMRobertaTokenizerFast.from_pretrained(lowercase__ , from_slow=lowercase__ )
UpperCAmelCase_ =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCAmelCase_ =DonutProcessor(lowercase__ , lowercase__ )
UpperCAmelCase_ =processor(lowercase__ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCAmelCase_ ="<s_docvqa><s_question>{user_input}</s_question><s_answer>"
UpperCAmelCase_ ="When is the coffee break?"
UpperCAmelCase_ =task_prompt.replace("{user_input}" , lowercase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCAmelCase_ ="<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCAmelCase_ ="<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCAmelCase_ ="s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCAmelCase_ ="<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCAmelCase_ ="hello world"
else:
raise ValueError("Model name not supported" )
UpperCAmelCase_ =original_model.decoder.tokenizer(lowercase__ , add_special_tokens=lowercase__ , return_tensors="pt" )[
"input_ids"
]
UpperCAmelCase_ =original_model.encoder.model.patch_embed(lowercase__ )
UpperCAmelCase_ , UpperCAmelCase_ =model.encoder.embeddings(lowercase__ )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
# verify encoder hidden states
UpperCAmelCase_ =original_model.encoder(lowercase__ )
UpperCAmelCase_ =model.encoder(lowercase__ ).last_hidden_state
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-2 )
# verify decoder hidden states
UpperCAmelCase_ =original_model(lowercase__ , lowercase__ , lowercase__ ).logits
UpperCAmelCase_ =model(lowercase__ , decoder_input_ids=lowercase__ ).logits
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
__lowercase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
__lowercase : str =parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 54 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Tuple =logging.get_logger(__name__)
__lowercase : str ={
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class A ( __lowercase ):
_snake_case ='''swinv2'''
_snake_case ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Any , _lowerCAmelCase: List[str]=224 , _lowerCAmelCase: Union[str, Any]=4 , _lowerCAmelCase: Tuple=3 , _lowerCAmelCase: Optional[int]=96 , _lowerCAmelCase: str=[2, 2, 6, 2] , _lowerCAmelCase: List[str]=[3, 6, 12, 24] , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Dict=4.0 , _lowerCAmelCase: str=True , _lowerCAmelCase: int=0.0 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: Optional[Any]=0.1 , _lowerCAmelCase: int="gelu" , _lowerCAmelCase: int=False , _lowerCAmelCase: Optional[Any]=0.02 , _lowerCAmelCase: Union[str, Any]=1e-5 , _lowerCAmelCase: str=32 , **_lowerCAmelCase: int , ) -> int:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =embed_dim
UpperCAmelCase_ =depths
UpperCAmelCase_ =len(_lowerCAmelCase )
UpperCAmelCase_ =num_heads
UpperCAmelCase_ =window_size
UpperCAmelCase_ =mlp_ratio
UpperCAmelCase_ =qkv_bias
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =drop_path_rate
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =use_absolute_embeddings
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ =int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
UpperCAmelCase_ =(0, 0, 0, 0)
| 54 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
UpperCAmelCase_ =re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , lowercase__ )
if matches:
UpperCAmelCase_ =float(matches[1] )
UpperCAmelCase_ =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCAmelCase_ =1_0_0_1
UpperCAmelCase_ ="imagenet-1k-id2label.json"
UpperCAmelCase_ ="huggingface/label-files"
UpperCAmelCase_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ ={int(lowercase__ ) + 1: v for k, v in idalabel.items()}
UpperCAmelCase_ ="background"
UpperCAmelCase_ =idalabel
UpperCAmelCase_ ={v: k for k, v in idalabel.items()}
return config
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
'''simple docstring'''
UpperCAmelCase_ =get_mobilenet_va_config(lowercase__ )
# Load 🤗 model
UpperCAmelCase_ =MobileNetVaForImageClassification(lowercase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase__ , lowercase__ , lowercase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCAmelCase_ =MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 3_2} , )
UpperCAmelCase_ =image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase_ =model(**lowercase__ )
UpperCAmelCase_ =outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
UpperCAmelCase_ =torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCAmelCase_ =torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
UpperCAmelCase_ =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
print("Pushing to the hub..." )
UpperCAmelCase_ ="google/" + model_name
image_processor.push_to_hub(lowercase__ )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__lowercase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase : Tuple =parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 54 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54 | 1 |
from math import sqrt
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for i in range(1 , int(sqrt(lowercase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase__ ):
total += i + n // i
elif i == sqrt(lowercase__ ):
total += i
return total - n
def a__ ( lowercase__ = 1_0_0_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =sum(
i
for i in range(1 , lowercase__ )
if sum_of_divisors(sum_of_divisors(lowercase__ ) ) == i and sum_of_divisors(lowercase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 54 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A ( __lowercase ):
_snake_case ='''Wav2Vec2FeatureExtractor'''
_snake_case ='''AutoTokenizer'''
def __init__( self: Optional[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any ) -> List[str]:
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =self.feature_extractor
UpperCAmelCase_ =False
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , _lowerCAmelCase: int , **_lowerCAmelCase: int ) -> int:
'''simple docstring'''
try:
return super().from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , _lowerCAmelCase , )
UpperCAmelCase_ =WavaVecaFeatureExtractor.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =WavaVecaCTCTokenizer.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
return cls(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
def __call__( self: Dict , *_lowerCAmelCase: List[str] , **_lowerCAmelCase: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_lowerCAmelCase , **_lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase_ =kwargs.pop("raw_speech" )
else:
UpperCAmelCase_ =kwargs.pop("audio" , _lowerCAmelCase )
UpperCAmelCase_ =kwargs.pop("sampling_rate" , _lowerCAmelCase )
UpperCAmelCase_ =kwargs.pop("text" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase_ =args[0]
UpperCAmelCase_ =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase_ =self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None:
UpperCAmelCase_ =self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ =encodings["input_ids"]
return inputs
def lowerCAmelCase__ ( self: str , *_lowerCAmelCase: str , **_lowerCAmelCase: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =kwargs.pop("input_features" , _lowerCAmelCase )
UpperCAmelCase_ =kwargs.pop("labels" , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCAmelCase_ =args[0]
UpperCAmelCase_ =args[1:]
if input_features is not None:
UpperCAmelCase_ =self.feature_extractor.pad(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
if labels is not None:
UpperCAmelCase_ =self.tokenizer.pad(_lowerCAmelCase , **_lowerCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCAmelCase_ =labels["input_ids"]
return input_features
def lowerCAmelCase__ ( self: List[Any] , *_lowerCAmelCase: Optional[int] , **_lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: str ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@contextmanager
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase_ =True
UpperCAmelCase_ =self.tokenizer
yield
UpperCAmelCase_ =self.feature_extractor
UpperCAmelCase_ =False
| 54 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54 | 1 |
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
print("The following activities are selected:" )
# The first activity is always selected
UpperCAmelCase_ =0
print(lowercase__ , end="," )
# Consider rest of the activities
for j in range(lowercase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase__ , end="," )
UpperCAmelCase_ =j
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : Dict =[1, 3, 0, 5, 8, 5]
__lowercase : List[str] =[2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 54 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A ( __lowercase ):
_snake_case =['''image_processor''', '''tokenizer''']
_snake_case ='''AutoImageProcessor'''
_snake_case ='''AutoTokenizer'''
def __init__( self: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: str ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =self.image_processor
def __call__( self: Union[str, Any] , _lowerCAmelCase: Tuple=None , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase_ =self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
UpperCAmelCase_ =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
UpperCAmelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def lowerCAmelCase__ ( self: int , *_lowerCAmelCase: Dict , **_lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , *_lowerCAmelCase: Dict , **_lowerCAmelCase: Any ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 54 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowercase : Optional[Any] =subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__lowercase : List[str] =subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
__lowercase : Optional[Any] ="""|""".join(sys.argv[1:])
__lowercase : str =re.compile(Rf"""^({joined_dirs}).*?\.py$""")
__lowercase : Dict =[x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 54 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =os.path.join(args.tf_model_dir , "parameters.json" )
UpperCAmelCase_ =json.loads(open(lowercase__ ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith(".pt" ):
UpperCAmelCase_ =args.output + ".pt"
UpperCAmelCase_ =OrderedDict()
with tf.device("/CPU:0" ):
UpperCAmelCase_ =tf.train.load_checkpoint(args.tf_model_dir )
UpperCAmelCase_ =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCAmelCase_ =reader.get_tensor(lowercase__ ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
UpperCAmelCase_ =int(key_name[9] )
elif key_name.startswith("pasts/out" ):
UpperCAmelCase_ =8
UpperCAmelCase_ ="model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCAmelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.startswith("model/moe" ):
UpperCAmelCase_ =int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
UpperCAmelCase_ ="model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
UpperCAmelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.endswith("/softmlp/kernel" ):
UpperCAmelCase_ ="model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
UpperCAmelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
UpperCAmelCase_ =key_name[-9:-7]
for i in range(1_6 ):
UpperCAmelCase_ ="model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
UpperCAmelCase_ =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.startswith("model/mlp" ):
UpperCAmelCase_ =int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
UpperCAmelCase_ ="model.blocks.%d.feed_forward.mlp.wi.weight" % player
UpperCAmelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.endswith("/p1/bias" ):
UpperCAmelCase_ ="model.blocks.%d.feed_forward.mlp.wi.bias" % player
UpperCAmelCase_ =vnp.copy() # same because it is one dimensional
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.endswith("/p2/kernel" ):
UpperCAmelCase_ ="model.blocks.%d.feed_forward.mlp.wo.weight" % player
UpperCAmelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.endswith("/p2/bias" ):
UpperCAmelCase_ ="model.blocks.%d.feed_forward.mlp.wo.bias" % player
UpperCAmelCase_ =vnp.copy() # same because it is one dimensional
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.startswith("model/ln" ):
UpperCAmelCase_ =int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
UpperCAmelCase_ ="model.blocks.%d.feed_forward.norm.bias" % player
UpperCAmelCase_ =vnp.copy() # same because it is one dimensional
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.endswith("/g" ):
UpperCAmelCase_ ="model.blocks.%d.feed_forward.norm.weight" % player
UpperCAmelCase_ =vnp.copy() # same because it is one dimensional
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.startswith("model/att" ):
UpperCAmelCase_ =int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
UpperCAmelCase_ =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCAmelCase_ =state[:, 0, :, :]
UpperCAmelCase_ =state[:, 1, :, :]
UpperCAmelCase_ =state[:, 2, :, :]
UpperCAmelCase_ =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ ="model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
UpperCAmelCase_ =torch.tensor(lowercase__ )
UpperCAmelCase_ ="model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
UpperCAmelCase_ =torch.tensor(lowercase__ )
UpperCAmelCase_ ="model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.endswith("/o/kernel" ):
UpperCAmelCase_ ="model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
UpperCAmelCase_ =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.startswith("model/an" ):
UpperCAmelCase_ =int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
UpperCAmelCase_ ="model.blocks.%d.self_attn.norm.bias" % player
UpperCAmelCase_ =vnp.copy() # same because it is one dimensional
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.endswith("/g" ):
UpperCAmelCase_ ="model.blocks.%d.self_attn.norm.weight" % player
UpperCAmelCase_ =vnp.copy() # same because it is one dimensional
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
UpperCAmelCase_ ={"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
UpperCAmelCase_ ="model.%s.weight" % nlayer
UpperCAmelCase_ =vnp.copy() # same in embedded
UpperCAmelCase_ =torch.tensor(lowercase__ )
if key_name.startswith("model/wte" ):
UpperCAmelCase_ ="lm_head.weight"
UpperCAmelCase_ =vnp.copy() # same in embedded
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name.startswith("model/wob" ):
UpperCAmelCase_ ="final_logits_bias"
UpperCAmelCase_ =vnp.copy() # same in embedded
UpperCAmelCase_ =state.reshape((1, -1) )
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name == "model/dense/kernel":
UpperCAmelCase_ ="model.last_project.weight"
UpperCAmelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCAmelCase_ =torch.tensor(lowercase__ )
elif key_name == "model/dense_1/bias":
UpperCAmelCase_ ="model.last_project.bias"
UpperCAmelCase_ =vnp.copy() # same because it is one dimensional
UpperCAmelCase_ =torch.tensor(lowercase__ )
torch.save(lowercase__ , args.output )
if __name__ == "__main__":
__lowercase : Optional[int] =argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
__lowercase : List[str] =parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 54 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 | 1 |
from scipy.stats import spearmanr
import datasets
__lowercase : str ="""
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__lowercase : Optional[Any] ="""
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__lowercase : Dict =R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: int=False ) -> str:
'''simple docstring'''
UpperCAmelCase_ =spearmanr(_lowerCAmelCase , _lowerCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 54 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
for line in lines:
UpperCAmelCase_ =re.sub(R"#.*" , "" , lowercase__ ) # remove comments
if line:
filtered_lines.append(lowercase__ )
UpperCAmelCase_ ="\n".join(lowercase__ )
# Make a hash from all this code
UpperCAmelCase_ =full_str.encode("utf-8" )
return shaaaa(lowercase__ ).hexdigest()
# get importable module names and hash for caching
__lowercase : Tuple ={
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowercase : Optional[int] ={
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowercase : Dict ={"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__lowercase : Dict[str, List[str]] ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 54 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 1 |
import argparse
import os
import re
import packaging.version
__lowercase : Tuple ="""examples/"""
__lowercase : Optional[Any] ={
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
__lowercase : Union[str, Any] ={
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
__lowercase : Any ="""README.md"""
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ =f.read()
UpperCAmelCase_ , UpperCAmelCase_ =REPLACE_PATTERNS[pattern]
UpperCAmelCase_ =replace.replace("VERSION" , lowercase__ )
UpperCAmelCase_ =re_pattern.sub(lowercase__ , lowercase__ )
with open(lowercase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowercase__ )
def a__ ( lowercase__ ):
'''simple docstring'''
for folder, directories, fnames in os.walk(lowercase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern="examples" )
def a__ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase__ , lowercase__ , lowercase__ )
if not patch:
update_version_in_examples(lowercase__ )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ ="🤗 Transformers currently provides the following architectures"
UpperCAmelCase_ ="1. Want to contribute a new model?"
with open(lowercase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ =f.readlines()
# Find the start of the list.
UpperCAmelCase_ =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase_ =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCAmelCase_ =lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(lowercase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase__ )
def a__ ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCAmelCase_ =f.read()
UpperCAmelCase_ =REPLACE_PATTERNS["init"][0].search(lowercase__ ).groups()[0]
return packaging.version.parse(lowercase__ )
def a__ ( lowercase__=False ):
'''simple docstring'''
UpperCAmelCase_ =get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCAmelCase_ =default_version.base_version
elif patch:
UpperCAmelCase_ =F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCAmelCase_ =F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCAmelCase_ =input(F'Which version are you releasing? [{default_version}]' )
if len(lowercase__ ) == 0:
UpperCAmelCase_ =default_version
print(F'Updating version to {version}.' )
global_version_update(lowercase__ , patch=lowercase__ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =get_version()
UpperCAmelCase_ =F'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCAmelCase_ =current_version.base_version
# Check with the user we got that right.
UpperCAmelCase_ =input(F'Which version are we developing now? [{dev_version}]' )
if len(lowercase__ ) == 0:
UpperCAmelCase_ =dev_version
print(F'Updating version to {version}.' )
global_version_update(lowercase__ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowercase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
__lowercase : List[Any] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 54 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__lowercase : Dict =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__lowercase : str ={
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
__lowercase : List[Any] ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase : Dict ="""facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__lowercase : Any ="""allenai"""
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =dict((re.sub(R"@@$" , "" , lowercase__ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , lowercase__ ), v) for k, v in d.items() )
UpperCAmelCase_ ="<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
UpperCAmelCase_ =d[k] # restore
return da
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert os.path.exists(lowercase__ )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
UpperCAmelCase_ =basename(lowercase__ )
UpperCAmelCase_ =dirname(lowercase__ )
UpperCAmelCase_ =fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase_ =cls.hub_models()
UpperCAmelCase_ ={"bpe": "fastbpe", "tokenizer": "moses"}
UpperCAmelCase_ ="."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
UpperCAmelCase_ =hub_utils.from_pretrained(
lowercase__ , lowercase__ , lowercase__ , archive_map=lowercase__ , **lowercase__ )
UpperCAmelCase_ =vars(chkpt["args"]["model"] )
UpperCAmelCase_ =args["source_lang"]
UpperCAmelCase_ =args["target_lang"]
UpperCAmelCase_ =dirname(lowercase__ )
UpperCAmelCase_ =basename(lowercase__ )
# dicts
UpperCAmelCase_ =os.path.join(lowercase__ , F'dict.{src_lang}.txt' )
UpperCAmelCase_ =os.path.join(lowercase__ , F'dict.{tgt_lang}.txt' )
UpperCAmelCase_ =Dictionary.load(lowercase__ )
UpperCAmelCase_ =rewrite_dict_keys(src_dict.indices )
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =os.path.join(lowercase__ , "vocab-src.json" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase_ =True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase_ =False
break
UpperCAmelCase_ =Dictionary.load(lowercase__ )
UpperCAmelCase_ =rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =os.path.join(lowercase__ , "vocab-tgt.json" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) )
# merges_file (bpecodes)
UpperCAmelCase_ =os.path.join(lowercase__ , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase_ =os.path.join(lowercase__ , lowercase__ )
if os.path.exists(lowercase__ ):
break
with open(lowercase__ , encoding="utf-8" ) as fin:
UpperCAmelCase_ =fin.read()
UpperCAmelCase_ =re.sub(R" \d+$" , "" , lowercase__ , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(lowercase__ , "w" , encoding="utf-8" ) as fout:
fout.write(lowercase__ )
# model config
UpperCAmelCase_ =os.path.join(lowercase__ , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
UpperCAmelCase_ ={
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
UpperCAmelCase_ =5
UpperCAmelCase_ =False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase_ =best_score_hparams[model_dir]["length_penalty"]
else:
UpperCAmelCase_ =1.0
print(F'Generating {fsmt_model_config_file}' )
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) )
# tokenizer config
UpperCAmelCase_ =os.path.join(lowercase__ , lowercase__ )
UpperCAmelCase_ ={
"langs": [src_lang, tgt_lang],
"model_max_length": 1_0_2_4,
"do_lower_case": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase__ , ensure_ascii=lowercase__ , indent=lowercase__ ) )
# model
UpperCAmelCase_ =chkpt["models"][0]
UpperCAmelCase_ =model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase_ =OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase_ =[
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(lowercase__ , lowercase__ )
UpperCAmelCase_ =FSMTConfig.from_pretrained(lowercase__ )
UpperCAmelCase_ =FSMTForConditionalGeneration(lowercase__ )
# check that it loads ok
model_new.load_state_dict(lowercase__ , strict=lowercase__ )
# save
UpperCAmelCase_ =os.path.join(lowercase__ , lowercase__ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase__ , lowercase__ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
__lowercase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowercase : Dict =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 54 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54 | 1 |
def a__ ( lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) <= 1:
return [tuple(lowercase__ )]
UpperCAmelCase_ =[]
def generate(lowercase__ , lowercase__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
UpperCAmelCase_ , UpperCAmelCase_ =arr[k - 1], arr[i]
else: # k is odd
UpperCAmelCase_ , UpperCAmelCase_ =arr[k - 1], arr[0]
generate(k - 1 , lowercase__ )
generate(len(lowercase__ ) , lowercase__ )
return res
if __name__ == "__main__":
__lowercase : Dict =input("""Enter numbers separated by a comma:\n""").strip()
__lowercase : List[str] =[int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 54 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 1 |
import operator
def a__ ( lowercase__ , lowercase__ = False , lowercase__ = None ):
'''simple docstring'''
UpperCAmelCase_ =operator.lt if reverse else operator.gt
UpperCAmelCase_ =solution or []
if not arr:
return solution
UpperCAmelCase_ =[arr.pop(0 )]
for i, item in enumerate(lowercase__ ):
if _operator(lowercase__ , sublist[-1] ):
sublist.append(lowercase__ )
arr.pop(lowercase__ )
# merging sublist into solution list
if not solution:
solution.extend(lowercase__ )
else:
while sublist:
UpperCAmelCase_ =sublist.pop(0 )
for i, xx in enumerate(lowercase__ ):
if not _operator(lowercase__ , lowercase__ ):
solution.insert(lowercase__ , lowercase__ )
break
else:
solution.append(lowercase__ )
strand_sort(lowercase__ , lowercase__ , lowercase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =BlipImageProcessor()
UpperCAmelCase_ =GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
UpperCAmelCase_ =BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
UpperCAmelCase_ =InstructBlipProcessor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: str , **_lowerCAmelCase: str ) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer
def lowerCAmelCase__ ( self: Tuple , **_lowerCAmelCase: Any ) -> Tuple:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor
def lowerCAmelCase__ ( self: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).qformer_tokenizer
def lowerCAmelCase__ ( self: List[str] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: str ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ =[Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ =self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase_ =InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =image_processor(_lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ =processor(images=_lowerCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =processor(text=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
UpperCAmelCase_ =qformer_tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ =processor.batch_decode(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_qformer_tokenizer()
UpperCAmelCase_ =InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 54 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 1 |
import numpy as np
class A :
def __init__( self: Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =(0, 0)
UpperCAmelCase_ =None
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
def __eq__( self: List[Any] , _lowerCAmelCase: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.position == cell.position
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
print(self.position )
class A :
def __init__( self: Dict , _lowerCAmelCase: Optional[int]=(5, 5) ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =np.zeros(_lowerCAmelCase )
UpperCAmelCase_ =world_size[0]
UpperCAmelCase_ =world_size[1]
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
print(self.w )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =[
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase_ =cell.position[0]
UpperCAmelCase_ =cell.position[1]
UpperCAmelCase_ =[]
for n in neughbour_cord:
UpperCAmelCase_ =current_x + n[0]
UpperCAmelCase_ =current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase_ =Cell()
UpperCAmelCase_ =(x, y)
UpperCAmelCase_ =cell
neighbours.append(_lowerCAmelCase )
return neighbours
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
_open.append(lowercase__ )
while _open:
UpperCAmelCase_ =np.argmin([n.f for n in _open] )
UpperCAmelCase_ =_open[min_f]
_closed.append(_open.pop(lowercase__ ) )
if current == goal:
break
for n in world.get_neigbours(lowercase__ ):
for c in _closed:
if c == n:
continue
UpperCAmelCase_ =current.g + 1
UpperCAmelCase_ , UpperCAmelCase_ =n.position
UpperCAmelCase_ , UpperCAmelCase_ =goal.position
UpperCAmelCase_ =(ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase_ =n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowercase__ )
UpperCAmelCase_ =[]
while current.parent is not None:
path.append(current.position )
UpperCAmelCase_ =current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__lowercase : List[Any] =Gridworld()
# Start position and goal
__lowercase : List[Any] =Cell()
__lowercase : List[Any] =(0, 0)
__lowercase : str =Cell()
__lowercase : Tuple =(4, 4)
print(f"""path from {start.position} to {goal.position}""")
__lowercase : str =astar(world, start, goal)
# Just for visual reasons.
for i in s:
__lowercase : Optional[int] =1
print(world.w)
| 54 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 1 |
import re
from filelock import FileLock
try:
import nltk
__lowercase : str =True
except (ImportError, ModuleNotFoundError):
__lowercase : Optional[Any] =False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def a__ ( lowercase__ ):
'''simple docstring'''
re.sub("<n>" , "" , lowercase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase__ ) )
| 54 |
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase : Tuple =logging.get_logger(__name__)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCAmelCase_ =os.path.abspath(lowercase__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCAmelCase_ =convert_pytorch_state_dict_to_flax(lowercase__ , lowercase__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase_ =convert_pytorch_sharded_state_dict_to_flax(lowercase__ , lowercase__ )
return flax_state_dict
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(lowercase__ ) -> bool:
return len(set(lowercase__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase_ =pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase_ =pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase_ =pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase_ =pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowercase__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowercase__ ):
UpperCAmelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ =pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowercase__ ):
UpperCAmelCase_ =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ =pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ =pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase_ =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase_ =pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase_ =pt_tuple_key[-2] + "_v"
if name is not None:
UpperCAmelCase_ =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase_ =flax_model.params["params"]
else:
UpperCAmelCase_ =flax_model.params
UpperCAmelCase_ =flatten_dict(lowercase__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ =flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(lowercase__ )
UpperCAmelCase_ ={}
UpperCAmelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ =tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ =rename_key_and_reshape_tensor(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# add model prefix if necessary
UpperCAmelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase_ =jnp.asarray(lowercase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowercase__ , lowercase__ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ =jnp.asarray(lowercase__ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ =jnp.asarray(lowercase__ )
return unflatten_dict(lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
import torch
# Load the index
UpperCAmelCase_ ={}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase_ =torch.load(lowercase__ )
UpperCAmelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ =flax_model.params["params"]
UpperCAmelCase_ =flatten_dict(lowercase__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCAmelCase_ =flax_model.params
UpperCAmelCase_ =flatten_dict(lowercase__ )
UpperCAmelCase_ =(model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ =(model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ =tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ =pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ =rename_key_and_reshape_tensor(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# add model prefix if necessary
UpperCAmelCase_ =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase_ =jnp.asarray(lowercase__ )
continue
if "var" in flax_key[-1]:
UpperCAmelCase_ =jnp.asarray(lowercase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowercase__ , lowercase__ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ =jnp.asarray(lowercase__ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ =jnp.asarray(lowercase__ )
return unflatten_dict(lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =os.path.abspath(lowercase__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCAmelCase_ =getattr(lowercase__ , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(lowercase__ , "rb" ) as state_f:
try:
UpperCAmelCase_ =from_bytes(lowercase__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCAmelCase_ =flatten_dict(jax.tree_util.tree_map(lambda lowercase__ : x.dtype == jnp.bfloataa , lowercase__ ) ).values()
if any(lowercase__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCAmelCase_ =jax.tree_util.tree_map(
lambda lowercase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase__ )
UpperCAmelCase_ =flatten_dict(lowercase__ )
UpperCAmelCase_ =pt_model.state_dict()
UpperCAmelCase_ =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase_ =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase_ =[]
UpperCAmelCase_ =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase_ =".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowercase__ ) not in pt_model_dict:
# conv layer
UpperCAmelCase_ =flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ =jnp.transpose(lowercase__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase__ ) not in pt_model_dict:
# linear layer
UpperCAmelCase_ =flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase_ =flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase_ =flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase_ =flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCAmelCase_ =".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase_ =".".join(lowercase__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase_ ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase_ =key.split("." )
UpperCAmelCase_ =None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase_ =key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase_ =key_components[-2] + "_v"
if name is not None:
UpperCAmelCase_ =key_components[:-3] + [name]
UpperCAmelCase_ =".".join(lowercase__ )
UpperCAmelCase_ =key
if flax_key in special_pt_names:
UpperCAmelCase_ =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCAmelCase_ =np.asarray(lowercase__ ) if not isinstance(lowercase__ , np.ndarray ) else flax_tensor
UpperCAmelCase_ =torch.from_numpy(lowercase__ )
# remove from missing keys
missing_keys.remove(lowercase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase__ )
pt_model.load_state_dict(lowercase__ )
# re-transform missing_keys to list
UpperCAmelCase_ =list(lowercase__ )
if len(lowercase__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(lowercase__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 54 |
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 1 |
from manim import *
class A ( __lowercase ):
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_ =Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase_ =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_ =[mem.copy() for i in range(6 )]
UpperCAmelCase_ =[mem.copy() for i in range(6 )]
UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
UpperCAmelCase_ =VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
UpperCAmelCase_ =Text("CPU" , font_size=24 )
UpperCAmelCase_ =Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
UpperCAmelCase_ =[mem.copy() for i in range(4 )]
UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
UpperCAmelCase_ =Text("GPU" , font_size=24 )
UpperCAmelCase_ =Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCAmelCase )
UpperCAmelCase_ =[mem.copy() for i in range(6 )]
UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
UpperCAmelCase_ =Text("Model" , font_size=24 )
UpperCAmelCase_ =Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCAmelCase )
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
for i, rect in enumerate(_lowerCAmelCase ):
rect.set_stroke(_lowerCAmelCase )
UpperCAmelCase_ =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowerCAmelCase , buff=0.0 )
self.add(_lowerCAmelCase )
model_cpu_arr.append(_lowerCAmelCase )
self.add(*_lowerCAmelCase , *_lowerCAmelCase , *_lowerCAmelCase )
UpperCAmelCase_ =[mem.copy() for i in range(6 )]
UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
UpperCAmelCase_ =Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase_ =Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowerCAmelCase )
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
for i, rect in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =fill.copy().set_fill(_lowerCAmelCase , opacity=0.7 )
target.move_to(_lowerCAmelCase )
ckpt_arr.append(_lowerCAmelCase )
UpperCAmelCase_ =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowerCAmelCase )
self.add(*_lowerCAmelCase , *_lowerCAmelCase )
UpperCAmelCase_ =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_ =MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCAmelCase )
UpperCAmelCase_ =MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCAmelCase_ =[meta_mem.copy() for i in range(6 )]
UpperCAmelCase_ =[meta_mem.copy() for i in range(6 )]
UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
UpperCAmelCase_ =VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
UpperCAmelCase_ =VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
UpperCAmelCase_ =Text("Disk" , font_size=24 )
UpperCAmelCase_ =Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) , Write(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) )
UpperCAmelCase_ =[]
for i, rect in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowerCAmelCase , run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(FadeOut(_lowerCAmelCase ) )
UpperCAmelCase_ =MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) )
self.play(
FadeOut(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , *_lowerCAmelCase ) , )
self.wait()
| 54 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self: List[str] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=13 , _lowerCAmelCase: Optional[Any]=32 , _lowerCAmelCase: Optional[int]=3 , _lowerCAmelCase: List[str]=4 , _lowerCAmelCase: Any=[10, 20, 30, 40] , _lowerCAmelCase: Tuple=[2, 2, 3, 2] , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Any=True , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: Optional[Any]=0.02 , _lowerCAmelCase: List[str]=["stage2", "stage3", "stage4"] , _lowerCAmelCase: List[Any]=[2, 3, 4] , _lowerCAmelCase: Any=None , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =num_stages
UpperCAmelCase_ =hidden_sizes
UpperCAmelCase_ =depths
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =num_labels
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =out_features
UpperCAmelCase_ =out_indices
UpperCAmelCase_ =scope
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =ConvNextModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =ConvNextForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_ =None
UpperCAmelCase_ =ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_snake_case =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_snake_case =True
_snake_case =False
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =ConvNextModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int ):
UpperCAmelCase_ =model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ =model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ =self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ =ConvNextModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Tuple ) -> List[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(_lowerCAmelCase )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class A ( unittest.TestCase , __lowercase ):
_snake_case =(ConvNextBackbone,) if is_torch_available() else ()
_snake_case =ConvNextConfig
_snake_case =False
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =ConvNextModelTester(self )
| 54 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( lowercase__ , lowercase__=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCAmelCase_ =n - 1
UpperCAmelCase_ =0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCAmelCase_ =0
while count < prec:
UpperCAmelCase_ =random.randint(2 , n - 1 )
UpperCAmelCase_ =bin_exp_mod(lowercase__ , lowercase__ , lowercase__ )
if b != 1:
UpperCAmelCase_ =True
for _ in range(lowercase__ ):
if b == n - 1:
UpperCAmelCase_ =False
break
UpperCAmelCase_ =b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__lowercase : str =abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 54 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 | 1 |
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =val
UpperCAmelCase_ =None
UpperCAmelCase_ =None
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Tuple ) -> List[str]:
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
UpperCAmelCase_ =Node(_lowerCAmelCase )
else:
self.left.insert(_lowerCAmelCase )
elif val > self.val:
if self.right is None:
UpperCAmelCase_ =Node(_lowerCAmelCase )
else:
self.right.insert(_lowerCAmelCase )
else:
UpperCAmelCase_ =val
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if root:
inorder(root.left , lowercase__ )
res.append(root.val )
inorder(root.right , lowercase__ )
def a__ ( lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return arr
UpperCAmelCase_ =Node(arr[0] )
for i in range(1 , len(lowercase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCAmelCase_ =[]
inorder(lowercase__ , lowercase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 54 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 1 |
import csv
import tweepy
# Twitter API credentials
__lowercase : Any =""""""
__lowercase : int =""""""
__lowercase : Dict =""""""
__lowercase : List[Any] =""""""
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =tweepy.OAuthHandler(lowercase__ , lowercase__ )
auth.set_access_token(lowercase__ , lowercase__ )
UpperCAmelCase_ =tweepy.API(lowercase__ )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase_ =[]
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase_ =api.user_timeline(screen_name=lowercase__ , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowercase__ )
# save the id of the oldest tweet less one
UpperCAmelCase_ =alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase__ ) > 0:
print(F'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase_ =api.user_timeline(
screen_name=lowercase__ , count=2_0_0 , max_id=lowercase__ )
# save most recent tweets
alltweets.extend(lowercase__ )
# update the id of the oldest tweet less one
UpperCAmelCase_ =alltweets[-1].id - 1
print(F'...{len(lowercase__ )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase_ =[[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'new_{screen_name}_tweets.csv' , "w" ) as f:
UpperCAmelCase_ =csv.writer(lowercase__ )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(lowercase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 54 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a__ ( lowercase__ ):
'''simple docstring'''
return np.dot(lowercase__ , lowercase__ )
class A :
def __init__( self: List[Any] , *,
_lowerCAmelCase: float = np.inf , _lowerCAmelCase: str = "linear" , _lowerCAmelCase: float = 0.0 , ) -> None:
'''simple docstring'''
UpperCAmelCase_ =regularization
UpperCAmelCase_ =gamma
if kernel == "linear":
UpperCAmelCase_ =self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
UpperCAmelCase_ =self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase_ =F'Unknown kernel: {kernel}'
raise ValueError(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: ndarray , _lowerCAmelCase: ndarray ) -> float:
'''simple docstring'''
return np.dot(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: ndarray , _lowerCAmelCase: ndarray ) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: list[ndarray] , _lowerCAmelCase: ndarray ) -> None:
'''simple docstring'''
UpperCAmelCase_ =observations
UpperCAmelCase_ =classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase_) , ) =np.shape(_lowerCAmelCase )
def to_minimize(_lowerCAmelCase: ndarray ) -> float:
UpperCAmelCase_ =0
((UpperCAmelCase_) , ) =np.shape(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowerCAmelCase )
UpperCAmelCase_ =LinearConstraint(_lowerCAmelCase , 0 , 0 )
UpperCAmelCase_ =Bounds(0 , self.regularization )
UpperCAmelCase_ =minimize(
_lowerCAmelCase , np.ones(_lowerCAmelCase ) , bounds=_lowerCAmelCase , constraints=[ly_contraint] ).x
UpperCAmelCase_ =l_star
# calculating mean offset of separation plane to points
UpperCAmelCase_ =0
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase_ =s / n
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: ndarray ) -> int:
'''simple docstring'''
UpperCAmelCase_ =sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowerCAmelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __lowercase , unittest.TestCase ):
_snake_case =KandinskyVaaImgaImgPipeline
_snake_case =['''image_embeds''', '''negative_image_embeds''', '''image''']
_snake_case =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_snake_case =[
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case =False
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ ={
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.dummy_unet
UpperCAmelCase_ =self.dummy_movq
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ =DDIMScheduler(**_lowerCAmelCase )
UpperCAmelCase_ ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[Any]=0 ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create init_image
UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
UpperCAmelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ =Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert("RGB" ).resize((256, 256) )
if str(_lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase )
else:
UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
UpperCAmelCase_ ={
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ ="cpu"
UpperCAmelCase_ =self.get_dummy_components()
UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase )
UpperCAmelCase_ =pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
UpperCAmelCase_ =output.images
UpperCAmelCase_ =pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
UpperCAmelCase_ =image[0, -3:, -3:, -1]
UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ =np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
UpperCAmelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ ="A red cartoon frog, 4k"
UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
UpperCAmelCase_ =KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCAmelCase_ =pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
UpperCAmelCase_ =torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ =pipeline(
image=_lowerCAmelCase , image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 54 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : Optional[int] =get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class A ( __lowercase , unittest.TestCase ):
_snake_case =PegasusTokenizer
_snake_case =PegasusTokenizerFast
_snake_case =True
_snake_case =True
def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ =PegasusTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowerCAmelCase__ ( self: Dict , **_lowerCAmelCase: int ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
return ("This is a test", "This is a test")
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ ="</s>"
UpperCAmelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(_lowerCAmelCase ) , 1103 )
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ =(
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
UpperCAmelCase_ =rust_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
UpperCAmelCase_ =py_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase_ ="<mask_1> To ensure a <mask_2> flow of bank resolutions."
UpperCAmelCase_ =[2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
UpperCAmelCase_ =tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase_ ="To ensure a smooth flow of bank resolutions."
UpperCAmelCase_ =[413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
UpperCAmelCase_ =tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =["This is going to be way too long." * 150, "short example"]
UpperCAmelCase_ =["not super long but more than 5 tokens", "tiny"]
UpperCAmelCase_ =self._large_tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
UpperCAmelCase_ =self._large_tokenizer(
text_target=_lowerCAmelCase , max_length=5 , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ ={"input_ids": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class A ( __lowercase , unittest.TestCase ):
_snake_case =PegasusTokenizer
_snake_case =PegasusTokenizerFast
_snake_case =True
_snake_case =True
def lowerCAmelCase__ ( self: List[str] ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ =PegasusTokenizer(_lowerCAmelCase , offset=0 , mask_token_sent=_lowerCAmelCase , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowerCAmelCase__ ( self: List[str] , **_lowerCAmelCase: Tuple ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Any ) -> List[str]:
'''simple docstring'''
return ("This is a test", "This is a test")
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase_ =(
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
UpperCAmelCase_ =rust_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
UpperCAmelCase_ =py_tokenizer([raw_input_str] , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =["This is going to be way too long." * 1000, "short example"]
UpperCAmelCase_ =["not super long but more than 5 tokens", "tiny"]
UpperCAmelCase_ =self._large_tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
UpperCAmelCase_ =self._large_tokenizer(
text_target=_lowerCAmelCase , max_length=5 , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCAmelCase ) == 2 # input_ids, attention_mask.
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =(
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
UpperCAmelCase_ =self._large_tokenizer(_lowerCAmelCase ).input_ids
self.assertListEqual(
_lowerCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 54 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 54 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowercase : Tuple =logging.getLogger(__name__)
__lowercase : Optional[int] =tf.data.AUTOTUNE
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase__ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase__ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase__ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase__ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase__ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase__ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase__ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase__ , default=2**1_8 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase__ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase__ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase__ , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase__ , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase__ , default=5_1_2 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase__ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase__ , required=lowercase__ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase__ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCAmelCase_ =parser.parse_args()
return args
def a__ ( lowercase__ ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase__ )
tf.tpu.experimental.initialize_tpu_system(lowercase__ )
return tpu
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =0
for file in file_list:
UpperCAmelCase_ =file.split("/" )[-1]
UpperCAmelCase_ =re.search(R"-\d+-(\d+)\.tfrecord" , lowercase__ ).group(1 )
UpperCAmelCase_ =int(lowercase__ )
num_samples += sample_count
return num_samples
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =tf.data.Dataset.from_tensor_slices(lowercase__ )
if shuffle:
UpperCAmelCase_ =dataset.shuffle(len(lowercase__ ) )
UpperCAmelCase_ =tf.data.TFRecordDataset(lowercase__ , num_parallel_reads=lowercase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ =dataset.apply(tf.data.experimental.assert_cardinality(lowercase__ ) )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ =dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ =dataset.batch(lowercase__ , drop_remainder=lowercase__ )
UpperCAmelCase_ =dataset.map(lowercase__ , num_parallel_calls=lowercase__ )
UpperCAmelCase_ =dataset.prefetch(lowercase__ )
return dataset
def a__ ( lowercase__ ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ =initialize_tpu(lowercase__ )
UpperCAmelCase_ =tf.distribute.TPUStrategy(lowercase__ )
else:
UpperCAmelCase_ =tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCAmelCase_ =AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ =AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ =tokenizer.vocab_size
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
UpperCAmelCase_ =tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
UpperCAmelCase_ =count_samples(lowercase__ )
UpperCAmelCase_ =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ =steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ =TFAutoModelForMaskedLM.from_config(lowercase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_ , UpperCAmelCase_ =create_optimizer(
num_train_steps=lowercase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase__ , metrics=["accuracy"] )
def decode_fn(lowercase__ ):
UpperCAmelCase_ ={
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase__ , lowercase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ =DataCollatorForLanguageModeling(
tokenizer=lowercase__ , mlm_probability=args.mlm_probability , mlm=lowercase__ , return_tensors="tf" )
def mask_with_collator(lowercase__ ):
# TF really needs an isin() function
UpperCAmelCase_ =(
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCAmelCase_ , UpperCAmelCase_ =data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase__ , )
return batch
UpperCAmelCase_ =args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ =prepare_dataset(
lowercase__ , decode_fn=lowercase__ , mask_fn=lowercase__ , batch_size=lowercase__ , shuffle=lowercase__ , )
UpperCAmelCase_ =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase__ ) )
model.fit(
lowercase__ , validation_data=lowercase__ , epochs=args.num_epochs , callbacks=lowercase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowercase : Union[str, Any] =parse_args()
main(args)
| 54 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a__ ( lowercase__ ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class A ( __lowercase ):
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase: ArgumentParser ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=_lowerCAmelCase , help="Name of the model to download" )
download_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: bool , _lowerCAmelCase: bool ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =model
UpperCAmelCase_ =cache
UpperCAmelCase_ =force
UpperCAmelCase_ =trust_remote_code
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 54 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def lowerCAmelCase__ ( *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
_snake_case =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ =[
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =vqa_pipeline(_lowerCAmelCase , top_k=1 )
self.assertEqual(
_lowerCAmelCase , [
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
[{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}],
] , )
@require_torch
def lowerCAmelCase__ ( self: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [{"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}, {"score": ANY(_lowerCAmelCase ), "answer": ANY(_lowerCAmelCase )}] )
@slow
@require_torch
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
UpperCAmelCase_ ="./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ ="How many cats are there?"
UpperCAmelCase_ =vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}] )
UpperCAmelCase_ =vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{"score": 0.87_99, "answer": "2"}, {"score": 0.2_96, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
pass
| 54 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[Any] =logging.get_logger(__name__)
__lowercase : Optional[Any] ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A ( __lowercase ):
_snake_case ='''bridgetower_vision_model'''
def __init__( self: Union[str, Any] , _lowerCAmelCase: Optional[int]=768 , _lowerCAmelCase: Tuple=12 , _lowerCAmelCase: Tuple=3 , _lowerCAmelCase: Optional[int]=16 , _lowerCAmelCase: List[Any]=288 , _lowerCAmelCase: int=1 , _lowerCAmelCase: List[Any]=1e-05 , _lowerCAmelCase: int=False , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: List[str]=False , **_lowerCAmelCase: Tuple , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =initializer_factor
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =stop_gradient
UpperCAmelCase_ =share_layernorm
UpperCAmelCase_ =remove_last_layer
@classmethod
def lowerCAmelCase__ ( cls: str , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: Dict ) -> "PretrainedConfig":
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
if config_dict.get("model_type" ) == "bridgetower":
UpperCAmelCase_ =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class A ( __lowercase ):
_snake_case ='''bridgetower_text_model'''
def __init__( self: List[str] , _lowerCAmelCase: List[str]=5_0265 , _lowerCAmelCase: List[Any]=768 , _lowerCAmelCase: Optional[int]=12 , _lowerCAmelCase: Tuple=12 , _lowerCAmelCase: List[Any]=1 , _lowerCAmelCase: List[str]=3072 , _lowerCAmelCase: int="gelu" , _lowerCAmelCase: Optional[int]=0.1 , _lowerCAmelCase: str=0.1 , _lowerCAmelCase: int=514 , _lowerCAmelCase: Tuple=1 , _lowerCAmelCase: Optional[Any]=1e-05 , _lowerCAmelCase: str=1 , _lowerCAmelCase: Optional[int]=0 , _lowerCAmelCase: Dict=2 , _lowerCAmelCase: int="absolute" , _lowerCAmelCase: Tuple=True , **_lowerCAmelCase: Tuple , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =initializer_factor
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =position_embedding_type
UpperCAmelCase_ =use_cache
UpperCAmelCase_ =pad_token_id
UpperCAmelCase_ =bos_token_id
UpperCAmelCase_ =eos_token_id
@classmethod
def lowerCAmelCase__ ( cls: Dict , _lowerCAmelCase: Union[str, os.PathLike] , **_lowerCAmelCase: str ) -> "PretrainedConfig":
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
if config_dict.get("model_type" ) == "bridgetower":
UpperCAmelCase_ =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class A ( __lowercase ):
_snake_case ='''bridgetower'''
def __init__( self: List[Any] , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: Tuple=768 , _lowerCAmelCase: Optional[Any]=1 , _lowerCAmelCase: Optional[int]=1e-05 , _lowerCAmelCase: List[Any]=False , _lowerCAmelCase: Union[str, Any]="add" , _lowerCAmelCase: int=12 , _lowerCAmelCase: Optional[Any]=6 , _lowerCAmelCase: List[Any]=False , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Optional[Any]=None , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: Dict , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =kwargs.pop("text_config_dict" , _lowerCAmelCase )
UpperCAmelCase_ =kwargs.pop("vision_config_dict" , _lowerCAmelCase )
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =share_cross_modal_transformer_layers
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =initializer_factor
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =share_link_tower_layers
UpperCAmelCase_ =link_tower_type
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =tie_word_embeddings
UpperCAmelCase_ =init_layernorm_from_vision_encoder
if text_config is None:
UpperCAmelCase_ ={}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase_ ={}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
UpperCAmelCase_ =BridgeTowerTextConfig(**_lowerCAmelCase )
UpperCAmelCase_ =BridgeTowerVisionConfig(**_lowerCAmelCase )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , _lowerCAmelCase: BridgeTowerTextConfig , _lowerCAmelCase: BridgeTowerVisionConfig , **_lowerCAmelCase: int ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =copy.deepcopy(self.__dict__ )
UpperCAmelCase_ =self.text_config.to_dict()
UpperCAmelCase_ =self.vision_config.to_dict()
UpperCAmelCase_ =self.__class__.model_type
return output
| 54 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__lowercase : str =[
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
__lowercase : Any =[
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
__lowercase : Tuple =(
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
__lowercase : Dict =(
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
__lowercase : Tuple =[
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
for tf_name, hf_name in patterns:
UpperCAmelCase_ =k.replace(lowercase__ , lowercase__ )
return k
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =BigBirdPegasusConfig(**lowercase__ )
UpperCAmelCase_ =BigBirdPegasusForConditionalGeneration(lowercase__ )
UpperCAmelCase_ =torch_model.state_dict()
UpperCAmelCase_ ={}
# separating decoder weights
UpperCAmelCase_ ={k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
UpperCAmelCase_ ={k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
UpperCAmelCase_ =[k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
UpperCAmelCase_ =DECODER_PATTERNS
UpperCAmelCase_ =rename_state_dict_key(lowercase__ , lowercase__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
UpperCAmelCase_ =v.T
UpperCAmelCase_ =torch.from_numpy(lowercase__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
UpperCAmelCase_ =[k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
UpperCAmelCase_ =REMAINING_PATTERNS
UpperCAmelCase_ =rename_state_dict_key(lowercase__ , lowercase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
UpperCAmelCase_ =v.T
UpperCAmelCase_ =torch.from_numpy(lowercase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
UpperCAmelCase_ =mapping["model.embed_positions.weight"]
UpperCAmelCase_ =mapping.pop("model.embed_positions.weight" )
UpperCAmelCase_ , UpperCAmelCase_ =torch_model.load_state_dict(lowercase__ , strict=lowercase__ )
UpperCAmelCase_ =[
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =tf.train.list_variables(lowercase__ )
UpperCAmelCase_ ={}
UpperCAmelCase_ =["global_step"]
for name, shape in tqdm(lowercase__ , desc="converting tf checkpoint to dict" ):
UpperCAmelCase_ =any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase_ =tf.train.load_variable(lowercase__ , lowercase__ )
UpperCAmelCase_ =array
return tf_weights
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =get_tf_weights_as_numpy(lowercase__ )
UpperCAmelCase_ =convert_bigbird_pegasus(lowercase__ , lowercase__ )
torch_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
__lowercase : Dict =parser.parse_args()
__lowercase : List[str] ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 54 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase : Union[str, Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 | 1 |
__lowercase : Tuple =8.314_4598
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__lowercase : Any =300
__lowercase : Optional[int] =28
__lowercase : str =rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 54 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( lowercase__ , lowercase__ , lowercase__=1_0_2_4 , lowercase__=1_0_2_4 , lowercase__=False , **lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="train" , **lowercase__ )
UpperCAmelCase_ =tok.pad_token_id
def get_lens(lowercase__ ):
UpperCAmelCase_ =tqdm(
DataLoader(lowercase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowercase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ =[]
for batch in dl:
UpperCAmelCase_ =batch["input_ids"].ne(lowercase__ ).sum(1 ).tolist()
UpperCAmelCase_ =batch["labels"].ne(lowercase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase__ , lowercase__ ):
max_lens.append(max(lowercase__ , lowercase__ ) )
else:
max_lens.extend(lowercase__ )
return max_lens
UpperCAmelCase_ =get_lens(lowercase__ )
UpperCAmelCase_ =SeqaSeqDataset(lowercase__ , lowercase__ , lowercase__ , lowercase__ , type_path="val" , **lowercase__ )
UpperCAmelCase_ =get_lens(lowercase__ )
pickle_save(lowercase__ , train_ds.len_file )
pickle_save(lowercase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 54 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] =logging.get_logger(__name__)
__lowercase : Tuple ={
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class A ( __lowercase ):
_snake_case ='''data2vec-audio'''
def __init__( self: str , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=768 , _lowerCAmelCase: Tuple=12 , _lowerCAmelCase: List[str]=12 , _lowerCAmelCase: Optional[int]=3072 , _lowerCAmelCase: List[Any]="gelu" , _lowerCAmelCase: Union[str, Any]=0.1 , _lowerCAmelCase: Optional[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: int=0.0 , _lowerCAmelCase: List[str]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.02 , _lowerCAmelCase: Optional[int]=1e-5 , _lowerCAmelCase: Union[str, Any]="gelu" , _lowerCAmelCase: Dict=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase: int=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase: Any=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase: List[Any]=False , _lowerCAmelCase: List[Any]=16 , _lowerCAmelCase: Tuple=19 , _lowerCAmelCase: int=5 , _lowerCAmelCase: List[str]=0.05 , _lowerCAmelCase: List[str]=10 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Any=0.0 , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: List[str]=0 , _lowerCAmelCase: Optional[int]="sum" , _lowerCAmelCase: List[Any]=False , _lowerCAmelCase: List[str]=False , _lowerCAmelCase: Dict=256 , _lowerCAmelCase: Union[str, Any]=(512, 512, 512, 512, 1500) , _lowerCAmelCase: List[Any]=(5, 3, 3, 1, 1) , _lowerCAmelCase: Any=(1, 2, 3, 1, 1) , _lowerCAmelCase: Optional[int]=512 , _lowerCAmelCase: List[Any]=0 , _lowerCAmelCase: int=1 , _lowerCAmelCase: Optional[Any]=2 , _lowerCAmelCase: Union[str, Any]=False , _lowerCAmelCase: Union[str, Any]=3 , _lowerCAmelCase: Optional[int]=2 , _lowerCAmelCase: str=3 , _lowerCAmelCase: List[str]=None , **_lowerCAmelCase: List[str] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =feat_extract_activation
UpperCAmelCase_ =list(_lowerCAmelCase )
UpperCAmelCase_ =list(_lowerCAmelCase )
UpperCAmelCase_ =list(_lowerCAmelCase )
UpperCAmelCase_ =conv_bias
UpperCAmelCase_ =num_conv_pos_embeddings
UpperCAmelCase_ =num_conv_pos_embedding_groups
UpperCAmelCase_ =conv_pos_kernel_size
UpperCAmelCase_ =len(self.conv_dim )
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =hidden_dropout
UpperCAmelCase_ =attention_dropout
UpperCAmelCase_ =activation_dropout
UpperCAmelCase_ =feat_proj_dropout
UpperCAmelCase_ =final_dropout
UpperCAmelCase_ =layerdrop
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ =mask_time_prob
UpperCAmelCase_ =mask_time_length
UpperCAmelCase_ =mask_time_min_masks
UpperCAmelCase_ =mask_feature_prob
UpperCAmelCase_ =mask_feature_length
UpperCAmelCase_ =mask_feature_min_masks
# ctc loss
UpperCAmelCase_ =ctc_loss_reduction
UpperCAmelCase_ =ctc_zero_infinity
# adapter
UpperCAmelCase_ =add_adapter
UpperCAmelCase_ =adapter_kernel_size
UpperCAmelCase_ =adapter_stride
UpperCAmelCase_ =num_adapter_layers
UpperCAmelCase_ =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ =list(_lowerCAmelCase )
UpperCAmelCase_ =list(_lowerCAmelCase )
UpperCAmelCase_ =list(_lowerCAmelCase )
UpperCAmelCase_ =xvector_output_dim
@property
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
return math.prod(self.conv_stride )
| 54 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self: Any , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: Dict=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Tuple=32 , _lowerCAmelCase: str=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Dict=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: Optional[int]=None , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ =(image_size // patch_size) ** 2
UpperCAmelCase_ =num_patches + 1
def lowerCAmelCase__ ( self: Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel(config=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
UpperCAmelCase_ =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.type_sequence_label_size
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase_ =self.image_size // 2
UpperCAmelCase_ =pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase_ =model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ =1
UpperCAmelCase_ =TFViTForImageClassification(_lowerCAmelCase )
UpperCAmelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_snake_case =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =TFViTModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Dict ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase_ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 )
| 54 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A ( __lowercase ):
_snake_case =['''image_processor''', '''tokenizer''']
_snake_case ='''Pix2StructImageProcessor'''
_snake_case =('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self: Optional[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self: Tuple , _lowerCAmelCase: str=None , _lowerCAmelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase: Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[int] = 2048 , _lowerCAmelCase: int = 0 , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , **_lowerCAmelCase: int , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase_ =self.tokenizer
UpperCAmelCase_ =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase_ =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_patches=_lowerCAmelCase , **_lowerCAmelCase )
else:
# add pixel_values and bbox
UpperCAmelCase_ =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase_ =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
if "attention_mask" in text_encoding:
UpperCAmelCase_ =text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
UpperCAmelCase_ =text_encoding.pop("input_ids" )
else:
UpperCAmelCase_ =None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def lowerCAmelCase__ ( self: Optional[int] , *_lowerCAmelCase: Any , **_lowerCAmelCase: int ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , *_lowerCAmelCase: List[str] , **_lowerCAmelCase: List[str] ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer.model_input_names
UpperCAmelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return False
UpperCAmelCase_ =len(lowercase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowercase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowercase__ )
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter numbers separated by comma:\n""").strip()
__lowercase : Optional[Any] =[int(item.strip()) for item in user_input.split(""",""")]
__lowercase : List[Any] =int(input("""Enter the number to be found in the list:\n""").strip())
__lowercase : Optional[Any] ="""""" if binary_search(sequence, target) else """not """
print(f"""{target} was {not_str}found in {sequence}""")
| 54 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __lowercase ):
_snake_case =42
_snake_case =42
def __init__( self: int , _lowerCAmelCase: UNetaDModel , _lowerCAmelCase: KarrasVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self: int , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 50 , _lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase: Optional[str] = "pil" , _lowerCAmelCase: bool = True , **_lowerCAmelCase: Union[str, Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
UpperCAmelCase_ =self.unet.config.sample_size
UpperCAmelCase_ =(batch_size, 3, img_size, img_size)
UpperCAmelCase_ =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase_ =randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase_ =self.scheduler.schedule[t]
UpperCAmelCase_ =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase_ , UpperCAmelCase_ =self.scheduler.add_noise_to_input(_lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase_ =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase_ =self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase_ =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase_ =self.scheduler.step_correct(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , step_output.prev_sample , step_output["derivative"] , )
UpperCAmelCase_ =step_output.prev_sample
UpperCAmelCase_ =(sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ =self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 54 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase : int =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''pixel_values''']
def __init__( self: List[Any] , _lowerCAmelCase: bool = True , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase: bool = True , _lowerCAmelCase: Union[int, float] = 1 / 255 , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , **_lowerCAmelCase: Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =size if size is not None else {"shortest_edge": 384}
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =do_resize
UpperCAmelCase_ =size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase_ =crop_pct if crop_pct is not None else 224 / 256
UpperCAmelCase_ =resample
UpperCAmelCase_ =do_rescale
UpperCAmelCase_ =rescale_factor
UpperCAmelCase_ =do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Dict[str, int] , _lowerCAmelCase: float , _lowerCAmelCase: PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Any , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase_ =size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase_ =int(shortest_edge / crop_pct )
UpperCAmelCase_ =get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[int, float] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: str , ) -> Optional[Any]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: np.ndarray , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Union[float, List[float]] , _lowerCAmelCase: Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase: Dict , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: ImageInput , _lowerCAmelCase: bool = None , _lowerCAmelCase: Dict[str, int] = None , _lowerCAmelCase: float = None , _lowerCAmelCase: PILImageResampling = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: float = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[float, List[float]]] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase: Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase_ =do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ =crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase_ =resample if resample is not None else self.resample
UpperCAmelCase_ =do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ =do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ =image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ =image_std if image_std is not None else self.image_std
UpperCAmelCase_ =size if size is not None else self.size
UpperCAmelCase_ =get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
UpperCAmelCase_ =make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ =[to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ =[self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ =[self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
UpperCAmelCase_ =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
UpperCAmelCase_ ={"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 54 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase : List[Any] =datasets.utils.logging.get_logger(__name__)
@dataclass
class A ( datasets.BuilderConfig ):
_snake_case =10000
_snake_case =None
_snake_case =None
class A ( datasets.ArrowBasedBuilder ):
_snake_case =ParquetConfig
def lowerCAmelCase__ ( self: Optional[int] ) -> Dict:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
UpperCAmelCase_ =dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
UpperCAmelCase_ =data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ =[dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase_ =[]
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase_ =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase_ =[dl_manager.iter_files(_lowerCAmelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowerCAmelCase ):
with open(_lowerCAmelCase , "rb" ) as f:
UpperCAmelCase_ =datasets.Features.from_arrow_schema(pq.read_schema(_lowerCAmelCase ) )
break
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: pa.Table ) -> pa.Table:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ =table_cast(_lowerCAmelCase , self.info.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , "rb" ) as f:
UpperCAmelCase_ =pq.ParquetFile(_lowerCAmelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCAmelCase_ =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(_lowerCAmelCase )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}' )
raise
| 54 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowercase : List[Any] =WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =test_results.split(" " )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase_ =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =None
UpperCAmelCase_ =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , lowercase__ ):
UpperCAmelCase_ =True
UpperCAmelCase_ =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
UpperCAmelCase_ =line
UpperCAmelCase_ =False
return failures
class A :
def __init__( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =title
UpperCAmelCase_ =doc_test_results["time_spent"].split("," )[0]
UpperCAmelCase_ =doc_test_results["success"]
UpperCAmelCase_ =doc_test_results["failures"]
UpperCAmelCase_ =self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase_ =doc_test_results
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self._time_spent]
UpperCAmelCase_ =0
for time in time_spent:
UpperCAmelCase_ =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCAmelCase ) == 1:
UpperCAmelCase_ =[0, 0, time_parts[0]]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'{int(_lowerCAmelCase )}h{int(_lowerCAmelCase )}m{int(_lowerCAmelCase )}s'
@property
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
F' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def lowerCAmelCase__ ( self: Tuple ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =40
UpperCAmelCase_ ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(_lowerCAmelCase , _lowerCAmelCase )}
UpperCAmelCase_ =""
for category, failures in category_failures.items():
if len(_lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(_lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
UpperCAmelCase_ =F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
UpperCAmelCase_ =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =""
for key, value in failures.items():
UpperCAmelCase_ =value[:200] + " [Truncated]" if len(_lowerCAmelCase ) > 250 else value
failures_text += F'*{key}*\n_{value}_\n\n'
UpperCAmelCase_ =job_name
UpperCAmelCase_ ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
UpperCAmelCase_ ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
UpperCAmelCase_ =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
UpperCAmelCase_ =sorted(self.doc_test_results.items() , key=lambda _lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
UpperCAmelCase_ =F'*Num failures* :{len(job_result["failed"] )} \n'
UpperCAmelCase_ =job_result["failures"]
UpperCAmelCase_ =self.get_reply_blocks(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text=_lowerCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F'Results for {job}' , blocks=_lowerCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =os.environ["GITHUB_RUN_ID"]
UpperCAmelCase_ =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
UpperCAmelCase_ =requests.get(lowercase__ ).json()
UpperCAmelCase_ ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCAmelCase_ =math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
UpperCAmelCase_ =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase__ )
return {}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={}
if os.path.exists(lowercase__ ):
UpperCAmelCase_ =os.listdir(lowercase__ )
for file in files:
try:
with open(os.path.join(lowercase__ , lowercase__ ) , encoding="utf-8" ) as f:
UpperCAmelCase_ =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase__ , lowercase__ )}.' ) from e
return _artifact
def a__ ( ):
'''simple docstring'''
class A :
def __init__( self: Tuple , _lowerCAmelCase: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =name
UpperCAmelCase_ =[]
def __str__( self: Optional[int] ) -> Tuple:
'''simple docstring'''
return self.name
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: str ) -> List[Any]:
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
UpperCAmelCase_ ={}
UpperCAmelCase_ =filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase_ =directory
if artifact_name not in _available_artifacts:
UpperCAmelCase_ =Artifact(lowercase__ )
_available_artifacts[artifact_name].add_path(lowercase__ )
return _available_artifacts
if __name__ == "__main__":
__lowercase : str =get_job_links()
__lowercase : Dict =retrieve_available_artifacts()
__lowercase : Optional[int] =collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowercase : Any ={
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowercase : Tuple =github_actions_job_links.get("""run_doctests""")
__lowercase : int =available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowercase : str =retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowercase , __lowercase , __lowercase : Tuple =handle_test_results(artifact["""stats"""])
__lowercase : int =failed
__lowercase : int =success
__lowercase : str =time_spent[1:-1] + """, """
__lowercase : str =extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowercase : int =line.replace("""FAILED """, """""")
__lowercase : List[Any] =line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowercase , __lowercase : Any =line.split("""::""")
else:
__lowercase , __lowercase : Dict =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowercase : Optional[int] =docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowercase : Tuple =all_failures[test] if test in all_failures else """N/A"""
__lowercase : Optional[int] =failure
break
__lowercase : Optional[int] =Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 54 | 1 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class A ( __lowercase ):
def lowerCAmelCase__ ( self: Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =8
# DPR tok
UpperCAmelCase_ =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ =os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase_ =os.path.join(_lowerCAmelCase , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase_ =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase_ =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase_ =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ ={"unk_token": "<unk>"}
UpperCAmelCase_ =os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCAmelCase_ =os.path.join(_lowerCAmelCase , BART_VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ =os.path.join(_lowerCAmelCase , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def lowerCAmelCase__ ( self: Optional[Any] ) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowerCAmelCase__ ( self: List[str] ) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowerCAmelCase__ ( self: str ) -> int:
'''simple docstring'''
UpperCAmelCase_ =os.path.join(self.tmpdirname , "rag_tokenizer" )
UpperCAmelCase_ =RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
UpperCAmelCase_ =RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_lowerCAmelCase )
rag_tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =RagTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _lowerCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _lowerCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowerCAmelCase__ ( self: Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =RagTokenizer.from_pretrained("facebook/rag-token-nq" )
UpperCAmelCase_ =[
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
UpperCAmelCase_ =[
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 54 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 1 |
def a__ ( lowercase__ ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ =head.next, head
while fast and fast.next:
UpperCAmelCase_ =fast.next.next
UpperCAmelCase_ =slow.next
UpperCAmelCase_ =slow.next
UpperCAmelCase_ =None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ =None
while second:
UpperCAmelCase_ =second.next
UpperCAmelCase_ =node
UpperCAmelCase_ =second
UpperCAmelCase_ =nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ =node.next
UpperCAmelCase_ =head.next
return True
def a__ ( lowercase__ ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ =UpperCAmelCase_ =UpperCAmelCase_ =head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ =fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ =[slow.val]
while slow.next:
UpperCAmelCase_ =slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ =cur.next
return True
def a__ ( lowercase__ ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ ={}
UpperCAmelCase_ =0
while head:
if head.val in d:
d[head.val].append(lowercase__ )
else:
UpperCAmelCase_ =[pos]
UpperCAmelCase_ =head.next
pos += 1
UpperCAmelCase_ =pos - 1
UpperCAmelCase_ =0
for v in d.values():
if len(lowercase__ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ =0
for i in range(0 , len(lowercase__ ) ):
if v[i] + v[len(lowercase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 54 |
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54 | 1 |
import math
def a__ ( lowercase__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( lowercase__ = 1_0_0_0_1 ):
'''simple docstring'''
try:
UpperCAmelCase_ =int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase_ =[]
UpperCAmelCase_ =2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 54 |
from math import loga
def a__ ( lowercase__ ):
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] ={
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] =["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] =[
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict =[
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowercase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Union[str, Any] =logging.get_logger(__name__)
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )
if "model" in sd.keys():
UpperCAmelCase_ =torch.load(lowercase__ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCAmelCase_ =[
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase__ )
UpperCAmelCase_ ={
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase_ =sd.pop(lowercase__ )
UpperCAmelCase_ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase_ =sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase_ =key.replace(".qkv_proj." , ".q_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".k_proj." )
UpperCAmelCase_ =key.replace(".qkv_proj." , ".v_proj." )
UpperCAmelCase_ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =torch.split(lowercase__ , depth // 3 , dim=0 )
UpperCAmelCase_ =q
UpperCAmelCase_ =k
UpperCAmelCase_ =v
del sd[key]
return sd
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ , lowercase__=None ):
'''simple docstring'''
UpperCAmelCase_ =load_checkpoint(lowercase__ )
if config is not None:
UpperCAmelCase_ =OPTConfig.from_pretrained(lowercase__ )
else:
UpperCAmelCase_ =OPTConfig()
UpperCAmelCase_ =OPTModel(lowercase__ ).half().eval()
model.load_state_dict(lowercase__ )
# Check results
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowercase : str =parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 54 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__lowercase : str ={
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__lowercase : Any ={
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =(images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ =numpy_to_pil(lowercase__ )
return images
def a__ ( lowercase__ ):
'''simple docstring'''
if images.ndim == 3:
UpperCAmelCase_ =images[None, ...]
UpperCAmelCase_ =(images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ =[Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
UpperCAmelCase_ =[Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 54 | 1 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ = None ):
'''simple docstring'''
UpperCAmelCase_ =word_bank or []
# create a table
UpperCAmelCase_ =len(lowercase__ ) + 1
UpperCAmelCase_ =[]
for _ in range(lowercase__ ):
table.append([] )
# seed value
UpperCAmelCase_ =[[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase__ )] == word:
UpperCAmelCase_ =[
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase__ )]:
combination.reverse()
return table[len(lowercase__ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 54 |
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54 | 1 |
def a__ ( lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ =0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__lowercase : List[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: List[Any] , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 | 1 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : str =logging.get_logger(__name__)
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =os.path.abspath(lowercase__ )
logger.info(F'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
UpperCAmelCase_ =tf.train.list_variables(lowercase__ )
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
UpperCAmelCase_ =[]
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCAmelCase_ =full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(F'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCAmelCase_ =name[1:]
# figure out how many levels deep the name is
UpperCAmelCase_ =0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(lowercase__ )
# read data
UpperCAmelCase_ =tf.train.load_variable(lowercase__ , lowercase__ )
names.append("/".join(lowercase__ ) )
arrays.append(lowercase__ )
logger.info(F'Read a total of {len(lowercase__ ):,} layers' )
# Sanity check
if len(set(lowercase__ ) ) != 1:
raise ValueError(F'Found layer names with different depths (layer depth {list(set(lowercase__ ) )})' )
UpperCAmelCase_ =list(set(lowercase__ ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(lowercase__ , lowercase__ ):
UpperCAmelCase_ =full_name.split("/" )
UpperCAmelCase_ =model
UpperCAmelCase_ =[]
for i, m_name in enumerate(lowercase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
UpperCAmelCase_ =int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
UpperCAmelCase_ =getattr(lowercase__ , "embeddings" )
UpperCAmelCase_ =getattr(lowercase__ , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
UpperCAmelCase_ =getattr(lowercase__ , "encoder" )
UpperCAmelCase_ =getattr(lowercase__ , "layer" )
UpperCAmelCase_ =pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
UpperCAmelCase_ =getattr(lowercase__ , "pooler" )
UpperCAmelCase_ =getattr(lowercase__ , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
UpperCAmelCase_ =getattr(lowercase__ , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
UpperCAmelCase_ =getattr(lowercase__ , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
UpperCAmelCase_ =getattr(lowercase__ , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
UpperCAmelCase_ =getattr(lowercase__ , "token_type_embeddings" )
else:
raise ValueError(F'Unknown embedding layer with name {full_name}' )
trace.append("weight" )
UpperCAmelCase_ =getattr(lowercase__ , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
UpperCAmelCase_ =getattr(lowercase__ , "attention" )
UpperCAmelCase_ =getattr(lowercase__ , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
UpperCAmelCase_ =getattr(lowercase__ , "attention" )
UpperCAmelCase_ =getattr(lowercase__ , "output" )
UpperCAmelCase_ =getattr(lowercase__ , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
UpperCAmelCase_ =getattr(lowercase__ , "attention" )
UpperCAmelCase_ =getattr(lowercase__ , "output" )
UpperCAmelCase_ =getattr(lowercase__ , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
UpperCAmelCase_ =getattr(lowercase__ , "output" )
UpperCAmelCase_ =getattr(lowercase__ , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
UpperCAmelCase_ =getattr(lowercase__ , "output" )
UpperCAmelCase_ =getattr(lowercase__ , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
UpperCAmelCase_ =getattr(lowercase__ , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
UpperCAmelCase_ =getattr(lowercase__ , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
UpperCAmelCase_ =getattr(lowercase__ , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
UpperCAmelCase_ =getattr(lowercase__ , "intermediate" )
UpperCAmelCase_ =getattr(lowercase__ , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
UpperCAmelCase_ =getattr(lowercase__ , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
UpperCAmelCase_ =getattr(lowercase__ , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
UpperCAmelCase_ =getattr(lowercase__ , "weight" )
else:
logger.warning(F'Ignored {m_name}' )
# for certain layers reshape is necessary
UpperCAmelCase_ =".".join(lowercase__ )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , lowercase__ ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , lowercase__ ):
UpperCAmelCase_ =array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCAmelCase_ =array.transpose()
if pointer.shape == array.shape:
UpperCAmelCase_ =torch.from_numpy(lowercase__ )
else:
raise ValueError(
F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
F' {array.shape}' )
logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
logger.info(F'Loading model based on config from {config_path}...' )
UpperCAmelCase_ =BertConfig.from_json_file(lowercase__ )
UpperCAmelCase_ =BertModel(lowercase__ )
# Load weights from checkpoint
logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
logger.info(F'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__lowercase : Dict =argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
__lowercase : int =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 54 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 1 |
from datetime import datetime as dt
import os
from github import Github
__lowercase : Optional[int] =[
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ =g.get_repo("huggingface/transformers" )
UpperCAmelCase_ =repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ =sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
UpperCAmelCase_ =comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 54 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase : Optional[int] ="""\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__lowercase : Dict ="""\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__lowercase : List[str] ="""\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: int ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[List[List[str]]] , _lowerCAmelCase: List[List[str]] , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_lowerCAmelCase , hypotheses=_lowerCAmelCase , min_len=_lowerCAmelCase , max_len=_lowerCAmelCase )
}
| 54 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.