code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def _A ( UpperCamelCase_ : list[int]) -> list[int]:
'''simple docstring'''
if len(UpperCamelCase_) == 0:
return array
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
# Compute the variables
__lowercase = _max - _min + 1
__lowercase ,__lowercase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__lowercase = i - _min
__lowercase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__lowercase = 0
for i in range(UpperCamelCase_):
while holes_repeat[i] > 0:
__lowercase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = input('Enter numbers separated by comma:\n')
_a = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 17 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ):
pass
def _A ( UpperCamelCase_ : Union[str, Any]) -> Any:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_a = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ):
__lowercase = pipeline(
"document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = INVOICE_URL
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
__lowercase = "What is the placebo?"
__lowercase = [
{
"image": load_image(UpperCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ):
__lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 )
self.assertEqual(
UpperCAmelCase__, [
[
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
]
]
* 3, )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" )
__lowercase = INVOICE_URL
__lowercase = "How many cats are there?"
__lowercase = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0},
]
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = []
__lowercase = []
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : List[str] ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
],
]
* 2, )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Union[str, Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self : List[Any] ):
pass
| 17 | 1 |
def A__ ( __lowerCamelCase, __lowerCamelCase ):
return int((input_a, input_a).count(0 ) != 0 )
def A__ ( ):
assert nand_gate(0, 0 ) == 1
assert nand_gate(0, 1 ) == 1
assert nand_gate(1, 0 ) == 1
assert nand_gate(1, 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 257 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =(16, 32, 96, 256)
UpperCAmelCase_ =jnp.floataa
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE_ = self.block_out_channels[i]
SCREAMING_SNAKE_CASE_ = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
SCREAMING_SNAKE_CASE_ = blocks
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.conv_in(_A )
SCREAMING_SNAKE_CASE_ = nn.silu(_A )
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = block(_A )
SCREAMING_SNAKE_CASE_ = nn.silu(_A )
SCREAMING_SNAKE_CASE_ = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCamelCase__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =32
UpperCAmelCase_ =4
UpperCAmelCase_ =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ =False
UpperCAmelCase_ =(320, 640, 1_280, 1_280)
UpperCAmelCase_ =2
UpperCAmelCase_ =8
UpperCAmelCase_ =None
UpperCAmelCase_ =1_280
UpperCAmelCase_ =0.0
UpperCAmelCase_ =False
UpperCAmelCase_ =jnp.floataa
UpperCAmelCase_ =True
UpperCAmelCase_ =0
UpperCAmelCase_ ="rgb"
UpperCAmelCase_ =(16, 32, 96, 256)
def _UpperCamelCase ( self , _A ) -> FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE_ = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE_ = jnp.zeros(_A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE_ = jnp.zeros(_A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = jax.random.split(_A )
SCREAMING_SNAKE_CASE_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.block_out_channels
SCREAMING_SNAKE_CASE_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE_ = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE_ = FlaxTimestepEmbedding(_A , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE_ = self.only_cross_attention
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE_ = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
SCREAMING_SNAKE_CASE_ = down_blocks
SCREAMING_SNAKE_CASE_ = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE_ = block_out_channels[-1]
SCREAMING_SNAKE_CASE_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ) -> Union[FlaxControlNetOutput, Tuple]:
SCREAMING_SNAKE_CASE_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE_ = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
SCREAMING_SNAKE_CASE_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(_A , 0 )
SCREAMING_SNAKE_CASE_ = self.time_proj(_A )
SCREAMING_SNAKE_CASE_ = self.time_embedding(_A )
# 2. pre-process
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.conv_in(_A )
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , _A , deterministic=not train )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE_ = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE_ = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE_ = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE_ = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE_ = self.controlnet_mid_block(_A )
# 6. scaling
SCREAMING_SNAKE_CASE_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257 | 1 |
"""simple docstring"""
from ... import PretrainedConfig
a = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase : str = '''nezha'''
def __init__( self : str , _UpperCAmelCase : List[Any]=21_128 , _UpperCAmelCase : Tuple=768 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[Any]=3_072 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : int=64 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Union[str, Any]=1E-1_2 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Tuple=True , **_UpperCAmelCase : List[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = max_relative_position
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = classifier_dropout
_A = use_cache
| 315 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = []
create_all_state(1 , _snake_case , _snake_case , [] , _snake_case )
return result
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_snake_case , total_number - level + 2 ):
current_list.append(_snake_case )
create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case )
current_list.pop()
def _snake_case ( _snake_case : list[list[int]] ) -> None:
'''simple docstring'''
for i in total_list:
print(*_snake_case )
if __name__ == "__main__":
a = 4
a = 2
a = generate_all_combinations(n, k)
print_all_state(total_list)
| 315 | 1 |
from __future__ import annotations
from fractions import Fraction
def snake_case_ ( snake_case , snake_case ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def snake_case_ ( snake_case ):
lowercase__: str = []
lowercase__: Tuple = 11
lowercase__: Optional[int] = int('1' + '0' * digit_len )
for num in range(__a , __a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__a , __a ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
lowercase__: Any = 10
return solutions
def snake_case_ ( snake_case = 2 ):
lowercase__: Tuple = 1.0
for fraction in fraction_list(__a ):
lowercase__: List[str] = Fraction(__a )
result *= frac.denominator / frac.numerator
return int(__a )
if __name__ == "__main__":
print(solution())
| 365 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowerCAmelCase = '''base_with_context'''
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
lowercase__: Optional[int] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__: List[str] = weights[f'layers_{lyr_num}']
lowercase__: List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowercase__: Any = ly_weight['attention']
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case_ ( snake_case , snake_case ) -> List[str]:
lowercase__: str = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__: str = weights[f'layers_{lyr_num}']
lowercase__: Optional[Any] = ly_weight['attention']
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def snake_case_ ( snake_case , snake_case ) -> Any:
lowercase__: int = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case )
lowercase__: Dict = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowercase__: Optional[Any] = weights[f'layers_{lyr_num}']
lowercase__: Any = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
lowercase__: List[str] = ly_weight['self_attention']
lowercase__: Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: int = ly_weight['MultiHeadDotProductAttention_0']
lowercase__: List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowercase__: Dict = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowercase__: str = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowercase__: Any = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowercase__: int = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowercase__: Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
lowercase__: List[str] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowercase__: int = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowercase__: str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowercase__: Optional[Any] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
lowercase__: Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def snake_case_ ( snake_case ) -> Any:
lowercase__: int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowercase__: Tuple = jnp.tree_util.tree_map(onp.array , snake_case )
lowercase__: List[str] = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
lowercase__: List[Any] = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
lowercase__: Optional[Any] = inference.parse_training_gin_file(snake_case , snake_case )
lowercase__: str = inference.InferenceModel(args.checkpoint_path , snake_case )
lowercase__: Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
lowercase__: List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowercase__: Dict = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowercase__: Optional[Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowercase__: Dict = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , snake_case )
lowercase__: int = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , snake_case )
lowercase__: Optional[int] = load_decoder(ta_checkpoint['target']['decoder'] , snake_case )
lowercase__: int = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
lowercase__: List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=snake_case , continuous_encoder=snake_case , decoder=snake_case , scheduler=snake_case , melgan=snake_case , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__lowerCAmelCase = parser.parse_args()
main(args)
| 288 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """deformable_detr"""
lowercase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=3, lowerCamelCase=3_00, lowerCamelCase=10_24, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase="resnet50", lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=False, lowerCamelCase=3_00, lowerCamelCase=False, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : List[str] = backbone_config.get('model_type')
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Optional[int] = config_class.from_dict(lowerCamelCase)
_lowercase : Tuple = use_timm_backbone
_lowercase : List[str] = backbone_config
_lowercase : Tuple = num_channels
_lowercase : Optional[Any] = num_queries
_lowercase : Optional[Any] = max_position_embeddings
_lowercase : Optional[int] = d_model
_lowercase : int = encoder_ffn_dim
_lowercase : List[Any] = encoder_layers
_lowercase : str = encoder_attention_heads
_lowercase : str = decoder_ffn_dim
_lowercase : Optional[Any] = decoder_layers
_lowercase : List[str] = decoder_attention_heads
_lowercase : Optional[int] = dropout
_lowercase : Optional[Any] = attention_dropout
_lowercase : int = activation_dropout
_lowercase : Any = activation_function
_lowercase : Optional[int] = init_std
_lowercase : int = init_xavier_std
_lowercase : Union[str, Any] = encoder_layerdrop
_lowercase : Tuple = auxiliary_loss
_lowercase : Union[str, Any] = position_embedding_type
_lowercase : str = backbone
_lowercase : List[Any] = use_pretrained_backbone
_lowercase : Any = dilation
# deformable attributes
_lowercase : Any = num_feature_levels
_lowercase : Dict = encoder_n_points
_lowercase : Dict = decoder_n_points
_lowercase : Dict = two_stage
_lowercase : Union[str, Any] = two_stage_num_proposals
_lowercase : str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : Tuple = class_cost
_lowercase : int = bbox_cost
_lowercase : Optional[int] = giou_cost
# Loss coefficients
_lowercase : Optional[Any] = mask_loss_coefficient
_lowercase : Dict = dice_loss_coefficient
_lowercase : Tuple = bbox_loss_coefficient
_lowercase : Optional[int] = giou_loss_coefficient
_lowercase : Union[str, Any] = eos_coefficient
_lowercase : Union[str, Any] = focal_alpha
_lowercase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_lowercase : Union[str, Any] = self.backbone_config.to_dict()
_lowercase : Tuple = self.__class__.model_type
return output
| 21 |
from collections import deque
from .hash_table import HashTable
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple:
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
snake_case_ = self.values[key]
def snake_case__( self : List[Any] ) ->str:
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase ) | 8 | 0 |
from __future__ import annotations
import math
__UpperCamelCase = '''2020.9.26'''
__UpperCamelCase = '''xcodz-dot, cclaus, dhruvmanila'''
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple[float, float]:
if not all(isinstance(UpperCAmelCase , (float, int) ) for val in locals().values() ):
snake_case_ = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(UpperCAmelCase )
snake_case_ = ((x * distance) / (z + distance)) * scale
snake_case_ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple[float, float, float]:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError('Axis must be a str' )
snake_case_ = locals()
del input_variables["axis"]
if not all(isinstance(UpperCAmelCase , (float, int) ) for val in input_variables.values() ):
snake_case_ = (
'Input values except axis must either be float or int: '
f'{list(input_variables.values() )}'
)
raise TypeError(UpperCAmelCase )
snake_case_ = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
snake_case_ = x * math.cos(UpperCAmelCase ) - y * math.sin(UpperCAmelCase )
snake_case_ = y * math.cos(UpperCAmelCase ) + x * math.sin(UpperCAmelCase )
snake_case_ = z
elif axis == "x":
snake_case_ = y * math.cos(UpperCAmelCase ) - z * math.sin(UpperCAmelCase )
snake_case_ = z * math.cos(UpperCAmelCase ) + y * math.sin(UpperCAmelCase )
snake_case_ = x
elif axis == "y":
snake_case_ = x * math.cos(UpperCAmelCase ) - z * math.sin(UpperCAmelCase )
snake_case_ = z * math.cos(UpperCAmelCase ) + x * math.sin(UpperCAmelCase )
snake_case_ = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 362 | """simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Optional[Any] = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ['MobileNetV2FeatureExtractor']
_lowercase : Union[str, Any] = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 239 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 0 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _A :
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
def A__ ( self ):
"""simple docstring"""
raise NotImplementedError()
class _A ( lowerCamelCase__ ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = tokenizer
lowercase = skip_prompt
lowercase = decode_kwargs
# variables used in the streaming process
lowercase = []
lowercase = 0
lowercase = True
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowercase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowercase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowercase = text[self.print_len :]
lowercase = []
lowercase = 0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowercase = text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowercase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def A__ ( self ):
"""simple docstring"""
if len(self.token_cache ) > 0:
lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowercase = text[self.print_len :]
lowercase = []
lowercase = 0
else:
lowercase = ''''''
lowercase = True
self.on_finalized_text(__A , stream_end=__A )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
print(__A , flush=__A , end="""""" if not stream_end else None )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
class _A ( lowerCamelCase__ ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(__A , __A , **__A )
lowercase = Queue()
lowercase = None
lowercase = timeout
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
"""simple docstring"""
self.text_queue.put(__A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
"""simple docstring"""
return self
def A__ ( self ):
"""simple docstring"""
lowercase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 360 | """simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : int = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ['CLIPFeatureExtractor']
UpperCAmelCase__ : Optional[int] = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase_ ) for s in shape] )}.npy"""
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=(4, 4, 6_4, 6_4) , lowerCAmelCase_ : List[str]=False ):
"""simple docstring"""
_A: List[str] = jnp.bfloataa if fpaa else jnp.floataa
_A: Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return image
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
_A: Tuple = jnp.bfloataa if fpaa else jnp.floataa
_A: str = '''bf16''' if fpaa else None
_A , _A: Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase_ , subfolder='''unet''' , dtype=lowerCAmelCase_ , revision=lowerCAmelCase_ )
return model, params
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : str=(4, 7_7, 7_6_8) , lowerCAmelCase_ : Dict=False ):
"""simple docstring"""
_A: Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
_A: Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase_ , lowerCAmelCase_ ) ) , dtype=lowerCAmelCase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A , _A: Optional[Any] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCAmelCase_ )
_A: List[str] = self.get_latents(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: Optional[int] = self.get_encoder_hidden_states(lowerCAmelCase_ , fpaa=lowerCAmelCase_ )
_A: List[str] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: Tuple = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __magic_name__ ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A , _A: Union[str, Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_latents(lowerCAmelCase_ , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase_ )
_A: Dict = self.get_encoder_hidden_states(lowerCAmelCase_ , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase_ )
_A: Optional[int] = model.apply(
{'''params''': params} , lowerCAmelCase_ , jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase_ , ).sample
assert sample.shape == latents.shape
_A: List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_A: List[str] = jnp.array(lowerCAmelCase_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 )
| 121 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = GPTSanJapaneseTokenizer
lowerCamelCase_ = False
lowerCamelCase_ = {'''do_clean_text''': False, '''add_prefix_space''': False}
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
_SCREAMING_SNAKE_CASE = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_SCREAMING_SNAKE_CASE = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCamelCase ) )
def lowerCAmelCase_ ( self : Dict , **__lowerCamelCase : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCAmelCase_ ( self : str , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_SCREAMING_SNAKE_CASE = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def lowerCAmelCase_ ( self : int , __lowerCamelCase : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.get_input_output_texts(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
return text, ids
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
# Testing tokenization
_SCREAMING_SNAKE_CASE = "こんにちは、世界。 こんばんは、㔺界。"
_SCREAMING_SNAKE_CASE = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Testing conversion to ids without special tokens
_SCREAMING_SNAKE_CASE = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Testing conversion to ids with special tokens
_SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
_SCREAMING_SNAKE_CASE = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
# Testing tokenization
_SCREAMING_SNAKE_CASE = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_SCREAMING_SNAKE_CASE = "こんにちは、、、、世界。こんばんは、、、、世界。"
_SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_SCREAMING_SNAKE_CASE = "こんにちは、世界。"
_SCREAMING_SNAKE_CASE = "こんばんは、㔺界。😀"
_SCREAMING_SNAKE_CASE = "こんにちは、世界。こんばんは、世界。😀"
_SCREAMING_SNAKE_CASE = tokenizer.encode(prefix_text + input_text )
_SCREAMING_SNAKE_CASE = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , prefix_text=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_SCREAMING_SNAKE_CASE = "こんにちは、世界。"
_SCREAMING_SNAKE_CASE = "こんばんは、㔺界。😀"
_SCREAMING_SNAKE_CASE = len(tokenizer.encode(__lowerCamelCase ) ) - 2
_SCREAMING_SNAKE_CASE = len(tokenizer.encode(__lowerCamelCase ) ) - 2
_SCREAMING_SNAKE_CASE = [1] + [0] * (len_prefix + len_text + 1)
_SCREAMING_SNAKE_CASE = [1] * (len_prefix + len_text + 1) + [0]
_SCREAMING_SNAKE_CASE = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_SCREAMING_SNAKE_CASE = tokenizer(prefix_text + input_text ).token_type_ids
_SCREAMING_SNAKE_CASE = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , prefix_text=__lowerCamelCase ).token_type_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("あンいワ" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("" , prefix_text="あンいワ" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(__lowerCamelCase ) , tokenizer.decode(__lowerCamelCase ) )
self.assertEqual(tokenizer.decode(__lowerCamelCase ) , tokenizer.decode(__lowerCamelCase ) )
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_SCREAMING_SNAKE_CASE = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , padding=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.batch_encode_plus(__lowerCamelCase , padding=__lowerCamelCase )
# fmt: off
_SCREAMING_SNAKE_CASE = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_SCREAMING_SNAKE_CASE = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_SCREAMING_SNAKE_CASE = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCamelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCamelCase )
self.assertListEqual(x_token.attention_mask , __lowerCamelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCamelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCamelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 111 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
if len(__A ) <= 1:
return lst
_SCREAMING_SNAKE_CASE = 1
while i < len(__A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = lst[i], lst[i - 1]
i -= 1
if i == 0:
_SCREAMING_SNAKE_CASE = 1
return lst
if __name__ == "__main__":
lowerCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 111 | 1 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float:
'''simple docstring'''
lowerCamelCase_ : Tuple = x
lowerCamelCase_ : Any = y
for step in range(_lowercase ): # noqa: B007
lowerCamelCase_ : str = a * a - b * b + x
lowerCamelCase_ : Optional[int] = 2 * a * b + y
lowerCamelCase_ : Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowercase_ ( _lowercase ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowercase_ ( _lowercase ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1 ) )
def lowercase_ ( _lowercase = 800 , _lowercase = 600 , _lowercase = -0.6 , _lowercase = 0 , _lowercase = 3.2 , _lowercase = 50 , _lowercase = True , ) -> Image.Image:
'''simple docstring'''
lowerCamelCase_ : Dict = Image.new('''RGB''' , (image_width, image_height) )
lowerCamelCase_ : str = img.load()
# loop through the image-coordinates
for image_x in range(_lowercase ):
for image_y in range(_lowercase ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase_ : Tuple = figure_width / image_width * image_height
lowerCamelCase_ : Optional[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase_ : Any = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase_ : List[Any] = get_distance(_lowercase , _lowercase , _lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase_ : Dict = get_color_coded_rgb(_lowercase )
else:
lowerCamelCase_ : List[str] = get_black_and_white_rgb(_lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__lowercase : Optional[int] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 318 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318 | 1 |
'''simple docstring'''
import numpy as np
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1e-1_2 , lowerCAmelCase_ = 100 , )-> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(lowerCAmelCase_ )[0] == np.shape(lowerCAmelCase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCAmelCase_ )[0] == np.shape(lowerCAmelCase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCAmelCase_ ) == np.iscomplexobj(lowerCAmelCase_ )
_UpperCAmelCase : Any = np.iscomplexobj(lowerCAmelCase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCAmelCase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : List[str] = 1e1_2
while not convergence:
# Multiple matrix by the vector.
_UpperCAmelCase : Optional[int] = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
# Normalize the resulting output vector.
_UpperCAmelCase : Dict = w / np.linalg.norm(lowerCAmelCase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_UpperCAmelCase : Any = vector.conj().T if is_complex else vector.T
_UpperCAmelCase : Optional[Any] = np.dot(lowerCAmelCase_ , np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Check convergence.
_UpperCAmelCase : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Any = lambda_
if is_complex:
_UpperCAmelCase : Optional[int] = np.real(lambda_ )
return lambda_, vector
def snake_case_ ( )-> None:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_UpperCAmelCase : Union[str, Any] = np.array([41, 4, 20] )
_UpperCAmelCase : Tuple = real_input_matrix.astype(np.complexaaa )
_UpperCAmelCase : Optional[Any] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_UpperCAmelCase : Optional[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_UpperCAmelCase : List[str] = real_input_matrix
_UpperCAmelCase : Any = real_vector
elif problem_type == "complex":
_UpperCAmelCase : Dict = complex_input_matrix
_UpperCAmelCase : List[str] = complex_vector
# Our implementation.
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = power_iteration(lowerCAmelCase_ , lowerCAmelCase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = np.linalg.eigh(lowerCAmelCase_ )
# Last eigenvalue is the maximum one.
_UpperCAmelCase : List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_UpperCAmelCase : Any = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCAmelCase_ ) - np.abs(lowerCAmelCase_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 349 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ : Dict = logging.getLogger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """sequence-classification"""
def __init__( self ,a_ ) -> Dict:
if type(a_ ) == dict:
_UpperCAmelCase : Tuple = Namespace(**a_ )
_UpperCAmelCase : Optional[int] = glue_output_modes[hparams.task]
_UpperCAmelCase : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(a_ ,a_ ,self.mode )
def _snake_case ( self ,**a_ ) -> Optional[Any]:
return self.model(**a_ )
def _snake_case ( self ,a_ ,a_ ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : Any = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : Any = self(**a_ )
_UpperCAmelCase : int = outputs[0]
_UpperCAmelCase : Any = self.trainer.lr_schedulers[0]["""scheduler"""]
_UpperCAmelCase : Any = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _snake_case ( self ) -> int:
_UpperCAmelCase : Optional[int] = self.hparams
_UpperCAmelCase : int = processors[args.task]()
_UpperCAmelCase : str = processor.get_labels()
for mode in ["train", "dev"]:
_UpperCAmelCase : Tuple = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" ,a_ )
else:
logger.info("""Creating features from dataset file at %s""" ,args.data_dir )
_UpperCAmelCase : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_UpperCAmelCase : Union[str, Any] = convert_examples_to_features(
a_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info("""Saving features into cached file %s""" ,a_ )
torch.save(a_ ,a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = False ) -> DataLoader:
_UpperCAmelCase : Union[str, Any] = """dev""" if mode == """test""" else mode
_UpperCAmelCase : Tuple = self._feature_file(a_ )
logger.info("""Loading features from cached file %s""" ,a_ )
_UpperCAmelCase : Union[str, Any] = torch.load(a_ )
_UpperCAmelCase : List[str] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
_UpperCAmelCase : Tuple = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
_UpperCAmelCase : str = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : Optional[int] = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : str = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(a_ ,a_ ,a_ ,a_ ) ,batch_size=a_ ,shuffle=a_ ,)
def _snake_case ( self ,a_ ,a_ ) -> Any:
_UpperCAmelCase : Any = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_UpperCAmelCase : int = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_UpperCAmelCase : List[str] = self(**a_ )
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = outputs[:2]
_UpperCAmelCase : List[str] = logits.detach().cpu().numpy()
_UpperCAmelCase : Union[str, Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _snake_case ( self ,a_ ) -> tuple:
_UpperCAmelCase : Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_UpperCAmelCase : Any = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
_UpperCAmelCase : int = np.argmax(a_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
_UpperCAmelCase : Union[str, Any] = np.squeeze(a_ )
_UpperCAmelCase : str = np.concatenate([x["""target"""] for x in outputs] ,axis=0 )
_UpperCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
_UpperCAmelCase : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task ,a_ ,a_ )}
_UpperCAmelCase : Dict = dict(results.items() )
_UpperCAmelCase : Any = results
return ret, preds_list, out_label_list
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _snake_case ( self ,a_ ) -> dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = self._eval_end(a_ )
_UpperCAmelCase : List[Any] = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _snake_case ( a_ ,a_ ) -> Any:
BaseTransformer.add_model_specific_args(a_ ,a_ )
parser.add_argument(
"""--max_seq_length""" ,default=128 ,type=a_ ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--task""" ,default="""""" ,type=a_ ,required=a_ ,help="""The GLUE task to run""" ,)
parser.add_argument(
"""--gpus""" ,default=0 ,type=a_ ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,)
parser.add_argument(
"""--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" )
return parser
def snake_case_ ( )-> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() )
_UpperCAmelCase : Optional[int] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_UpperCAmelCase : Optional[int] = os.path.join(
"""./results""" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_UpperCAmelCase : int = GLUETransformer(lowerCAmelCase_ )
_UpperCAmelCase : Any = generic_train(lowerCAmelCase_ , lowerCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_UpperCAmelCase : int = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCAmelCase_ ) )
_UpperCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349 | 1 |
from ...processing_utils import ProcessorMixin
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "WhisperFeatureExtractor"
lowerCAmelCase__ : List[str] = "WhisperTokenizer"
def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.feature_extractor
__lowercase = False
def a__ ( self : str , _UpperCAmelCase : Any=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[Any]=True ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=_UpperCAmelCase , language=_UpperCAmelCase , no_timestamps=_UpperCAmelCase )
def __call__( self : Optional[int] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : str ) -> str:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase )
__lowercase = kwargs.pop('audio' , _UpperCAmelCase )
__lowercase = kwargs.pop('sampling_rate' , _UpperCAmelCase )
__lowercase = kwargs.pop('text' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__lowercase = args[0]
__lowercase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__lowercase = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None:
__lowercase = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowercase = encodings['input_ids']
return inputs
def a__ ( self : Optional[int] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : int ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Tuple , *_UpperCAmelCase : Any , **_UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str]="np" ) -> str:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
| 325 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 1 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Union[str, Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__A : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__A : Dict = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Dict = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
lowerCAmelCase : int = True
# Deal with multi-line cases
elif (
re.search(
rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"", _lowercase, )
is not None
):
lowerCAmelCase : Union[str, Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCAmelCase : Dict = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCAmelCase : Dict = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
lowerCAmelCase : Optional[int] = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
lowerCAmelCase : Union[str, Any] = True
if not attribute_used:
lowerCAmelCase : Optional[int] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCAmelCase : str = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCAmelCase : int = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCAmelCase : int = True
elif attribute.endswith('_token_id' ):
lowerCAmelCase : Optional[int] = True
# configuration class specific cases
if not case_allowed:
lowerCAmelCase : int = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
lowerCAmelCase : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : int = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCAmelCase : str = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
lowerCAmelCase : Tuple = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCAmelCase : List[str] = {}
if len(config_class.attribute_map ) > 0:
lowerCAmelCase : List[str] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCAmelCase : List[Any] = inspect.getsourcefile(_lowercase )
lowerCAmelCase : List[str] = os.path.dirname(_lowercase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCAmelCase : Optional[Any] = [os.path.join(_lowercase, _lowercase ) for fn in os.listdir(_lowercase ) if fn.startswith('modeling_' )]
# Get the source code strings
lowerCAmelCase : str = []
for path in modeling_paths:
if os.path.isfile(_lowercase ):
with open(_lowercase ) as fp:
modeling_sources.append(fp.read() )
lowerCAmelCase : int = []
for config_param, default_value in zip(_lowercase, _lowercase ):
# `attributes` here is all the variant names for `config_param`
lowerCAmelCase : Optional[Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_lowercase, _lowercase, _lowercase, _lowercase ):
unused_attributes.append(attributes[0] )
return sorted(_lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase : int = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCAmelCase : Optional[int] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda _UpperCAmelCase : inspect.isclass(_lowercase )
and issubclass(_lowercase, _lowercase )
and inspect.getmodule(_lowercase ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
lowerCAmelCase : Union[str, Any] = check_config_attributes_being_used(_lowercase )
if len(_lowercase ) > 0:
lowerCAmelCase : str = unused_attributes
if len(_lowercase ) > 0:
lowerCAmelCase : Optional[int] = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(_lowercase )
if __name__ == "__main__":
check_config_attributes()
| 359 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError('String lengths must match!' )
lowerCAmelCase : Tuple = 0
for chara, chara in zip(_UpperCAmelCase, _UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.dummy_uncond_unet
snake_case_ : int = PNDMScheduler()
snake_case_ : Tuple = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : Dict = torch.manual_seed(0 )
snake_case_ : int = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' ).images
snake_case_ : Union[str, Any] = torch.manual_seed(0 )
snake_case_ : Tuple = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type='''numpy''' , return_dict=__magic_name__ )[0]
snake_case_ : Any = image[0, -3:, -3:, -1]
snake_case_ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = '''google/ddpm-cifar10-32'''
snake_case_ : Optional[int] = UNetaDModel.from_pretrained(__magic_name__ )
snake_case_ : str = PNDMScheduler()
snake_case_ : List[Any] = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : Union[str, Any] = torch.manual_seed(0 )
snake_case_ : str = pndm(generator=__magic_name__ , output_type='''numpy''' ).images
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ : str = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 279 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase_ = CLIPImageProcessor()
lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 279 | 1 |
'''simple docstring'''
class UpperCamelCase_ :
def __init__( self , A ) -> List[Any]:
UpperCAmelCase : Any = val
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : int = None
def _lowercase( self , A ) -> Dict:
if self.val:
if val < self.val:
if self.left is None:
UpperCAmelCase : str = Node(__SCREAMING_SNAKE_CASE )
else:
self.left.insert(__SCREAMING_SNAKE_CASE )
elif val > self.val:
if self.right is None:
UpperCAmelCase : Any = Node(__SCREAMING_SNAKE_CASE )
else:
self.right.insert(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Dict = val
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]:
# Recursive traversal
if root:
inorder(root.left , _lowercase )
res.append(root.val )
inorder(root.right , _lowercase )
def __lowerCamelCase ( _lowercase ) -> List[Any]:
# Build BST
if len(_lowercase ) == 0:
return arr
UpperCAmelCase : Any = Node(arr[0] )
for i in range(1 , len(_lowercase ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCAmelCase : Dict = []
inorder(_lowercase , _lowercase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 352 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Tuple = len(_lowercase ) + 1
UpperCAmelCase : List[Any] = len(_lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase : str = [[0 for i in range(_lowercase )] for j in range(_lowercase )]
# since string of zero length match pattern of zero length
UpperCAmelCase : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowercase ):
UpperCAmelCase : str = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowercase ):
UpperCAmelCase : Optional[Any] = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowercase ):
for j in range(1 , _lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase : Union[str, Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase : List[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase : Optional[int] = dp[i - 1][j]
else:
UpperCAmelCase : Any = 0
else:
UpperCAmelCase : str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a : List[str] = """aab"""
a : Optional[int] = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 338 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :jnp.dtype = jnp.floataa
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = hidden_states.shape
lowercase_ = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
lowercase_ = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :jnp.dtype = jnp.floataa
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowercase_ = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class lowercase__( nn.Module ):
"""simple docstring"""
a :int
a :int = None
a :float = 0.0
a :bool = None
a :jnp.dtype = jnp.floataa
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = self.in_channels if self.out_channels is None else self.out_channels
lowercase_ = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
lowercase_ = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ = nn.Dropout(self.dropout_prob )
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase_ = None
if use_nin_shortcut:
lowercase_ = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int]=True ) -> Tuple:
lowercase_ = hidden_states
lowercase_ = self.norma(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.swish(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.conva(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
lowercase_ = hidden_states + temb
lowercase_ = self.norma(SCREAMING_SNAKE_CASE_ )
lowercase_ = nn.swish(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
lowercase_ = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 30 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'convbert'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a=768 , _a=2 , _a=9 , _a=1 , _a=None , **_a , ):
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
__magic_name__ : Tuple = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : List[Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : List[str] = initializer_range
__magic_name__ : Tuple = layer_norm_eps
__magic_name__ : List[Any] = embedding_size
__magic_name__ : List[Any] = head_ratio
__magic_name__ : str = conv_kernel_size
__magic_name__ : Dict = num_groups
__magic_name__ : str = classifier_dropout
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 281 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowercase__ = {"tokenization_tapex": ["TapexTokenizer"]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 280 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCamelCase( UpperCAmelCase_ ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCAmelCase : List[Any] = list(s_dict.keys() )
for key in keys:
UpperCAmelCase : Union[str, Any] = R'.*/layers_(\d+)'
UpperCAmelCase : List[str] = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
UpperCAmelCase : str = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
UpperCAmelCase : Union[str, Any] = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
UpperCAmelCase : Tuple = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase : List[Any] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase : List[str] = s_dict[key].shape[0]
UpperCAmelCase : List[Any] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowercase__ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
UpperCAmelCase : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase : Dict = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
UpperCAmelCase : str = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
UpperCAmelCase : Union[str, Any] = str(activation[1] )
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : List[str] = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_="./" , UpperCAmelCase_=8 ):
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
UpperCAmelCase : List[Any] = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCAmelCase : str = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
UpperCAmelCase : str = flax_params['target']
UpperCAmelCase : Union[str, Any] = flatten_dict(UpperCAmelCase_ , sep='/' )
UpperCAmelCase : Tuple = rename_keys(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 280 | 1 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 109 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
lowercase = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
lowercase = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class a_ :
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase = field(default=lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase = field(
default=lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowercase = field(
default=lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowercase__ ( )-> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCamelCase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase = split["""train"""]
UpperCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase = ViTMAEForPreTraining(__UpperCamelCase )
if training_args.do_train:
UpperCamelCase = ds["""train"""].column_names
else:
UpperCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = """image"""
elif "img" in column_names:
UpperCamelCase = """img"""
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase = Compose(
[
Lambda(lambda __UpperCamelCase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCamelCase ):
UpperCamelCase = [transforms(__UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__UpperCamelCase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 321 | 0 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A = logging.get_logger(__name__)
# General docstring
__A = 'PoolFormerConfig'
# Base docstring
__A = 'sail/poolformer_s12'
__A = [1, 512, 7, 7]
# Image classification docstring
__A = 'sail/poolformer_s12'
__A = 'tabby, tabby cat'
__A = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __A ( _lowercase , _lowercase = 0.0 , _lowercase = False ):
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
_A = 1 - drop_prob
_A = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_A = keep_prob + torch.rand(_lowercase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_A = input.div(_lowercase ) * random_tensor
return output
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: Optional[Any] , __A: Optional[float] = None ) -> None:
super().__init__()
_A = drop_prob
def __A ( self: Tuple , __A: torch.Tensor ) -> torch.Tensor:
return drop_path(__A , self.drop_prob , self.training )
def __A ( self: Optional[Any] ) -> str:
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: Tuple , __A: Optional[int] , __A: Union[str, Any] , __A: str , __A: Tuple , __A: Dict , __A: Any=None ) -> Tuple:
super().__init__()
_A = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
_A = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
_A = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
_A = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
_A = norm_layer(__A ) if norm_layer else nn.Identity()
def __A ( self: Dict , __A: Optional[Any] ) -> Tuple:
_A = self.projection(__A )
_A = self.norm(__A )
return embeddings
class SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
"""simple docstring"""
def __init__( self: Union[str, Any] , __A: List[str] , **__A: str ) -> List[str]:
super().__init__(1 , __A , **__A )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: Tuple , __A: Optional[Any] ) -> str:
super().__init__()
_A = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def __A ( self: Optional[int] , __A: Union[str, Any] ) -> Dict:
return self.pool(__A ) - hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: List[str] , __A: Tuple , __A: str , __A: Tuple , __A: int ) -> List[str]:
super().__init__()
_A = nn.Convad(__A , __A , 1 )
_A = nn.Convad(__A , __A , 1 )
_A = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
_A = ACTaFN[config.hidden_act]
else:
_A = config.hidden_act
def __A ( self: Optional[Any] , __A: List[str] ) -> Any:
_A = self.conva(__A )
_A = self.act_fn(__A )
_A = self.drop(__A )
_A = self.conva(__A )
_A = self.drop(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: Dict , __A: Optional[Any] , __A: int , __A: Any , __A: Optional[int] , __A: Union[str, Any] , __A: Optional[Any] ) -> Optional[Any]:
super().__init__()
_A = PoolFormerPooling(__A )
_A = PoolFormerOutput(__A , __A , __A , __A )
_A = PoolFormerGroupNorm(__A )
_A = PoolFormerGroupNorm(__A )
# Useful for training neural nets
_A = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
_A = config.use_layer_scale
if config.use_layer_scale:
_A = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
_A = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def __A ( self: Any , __A: Union[str, Any] ) -> Dict:
if self.use_layer_scale:
_A = self.pooling(self.before_norm(__A ) )
_A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_A = hidden_states + self.drop_path(__A )
_A = ()
_A = self.output(self.after_norm(__A ) )
_A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_A = hidden_states + self.drop_path(__A )
_A = (output,) + outputs
return outputs
else:
_A = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
_A = pooling_output + hidden_states
_A = ()
# Second residual connection inside the PoolFormerOutput block
_A = self.drop_path(self.output(self.after_norm(__A ) ) )
_A = hidden_states + layer_output
_A = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: str , __A: str ) -> str:
super().__init__()
_A = config
# stochastic depth decay rule
_A = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_A = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_A = nn.ModuleList(__A )
# Transformer blocks
_A = []
_A = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_A = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
_A = nn.ModuleList(__A )
def __A ( self: Tuple , __A: Any , __A: Any=False , __A: List[Any]=True ) -> Any:
_A = () if output_hidden_states else None
_A = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_A ,_A = layers
# Get patch embeddings from hidden_states
_A = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
_A = blk(__A )
_A = layer_outputs[0]
if output_hidden_states:
_A = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = PoolFormerConfig
A_ = "poolformer"
A_ = "pixel_values"
A_ = True
def __A ( self: Dict , __A: Any ) -> Tuple:
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __A ( self: List[str] , __A: Dict , __A: List[Any]=False ) -> List[Any]:
if isinstance(__A , __A ):
_A = value
__A = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__A = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: Any , __A: Tuple ) -> Union[str, Any]:
super().__init__(__A )
_A = config
_A = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def __A ( self: Any ) -> Optional[int]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __A ( self: List[str] , __A: Optional[torch.FloatTensor] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
_A = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
_A = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self: Tuple , __A: Optional[Any] ) -> List[str]:
super().__init__()
_A = nn.Linear(config.hidden_size , config.hidden_size )
def __A ( self: str , __A: List[str] ) -> Union[str, Any]:
_A = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , snake_case , )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: int , __A: int ) -> List[Any]:
super().__init__(__A )
_A = config.num_labels
_A = PoolFormerModel(__A )
# Final norm
_A = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_A = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __A ( self: List[str] , __A: Optional[torch.FloatTensor] = None , __A: Optional[torch.LongTensor] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
_A = outputs[0]
_A = self.classifier(self.norm(__A ).mean([-2, -1] ) )
_A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A = '''single_label_classification'''
else:
_A = '''multi_label_classification'''
if self.config.problem_type == "regression":
_A = MSELoss()
if self.num_labels == 1:
_A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
_A = CrossEntropyLoss()
_A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A = BCEWithLogitsLoss()
_A = loss_fct(__A , __A )
if not return_dict:
_A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
| 75 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@require_torch
def __A ( self: Dict ) -> Optional[int]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_A = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task='''fill-mask''' , model=__A )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_A = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Dict ) -> Tuple:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_A = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task='''fill-mask''' , model=__A )
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Any ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_A = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_A = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def __A ( self: Optional[int] ) -> Dict:
_A = '''
from transformers import pipeline
'''
_A = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_A = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_A = self.get_env()
_A = '''1'''
_A = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def __A ( self: Optional[int] ) -> int:
_A = '''
from transformers import AutoModel
'''
_A = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_A = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_A = self.get_env()
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_A = '''1'''
_A = subprocess.run(__A , env=__A , check=__A , capture_output=__A )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 75 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__ ( snake_case__ ):
def __init__( self , *_A , _A=None , _A=None , **_A ):
"""simple docstring"""
super().__init__(*_A , **_A )
__lowerCAmelCase = eval_examples
__lowerCAmelCase = post_process_function
def __SCREAMING_SNAKE_CASE( self , _A = None , _A=None , _A = None , _A = "eval" , **_A , ):
"""simple docstring"""
__lowerCAmelCase = gen_kwargs.copy()
__lowerCAmelCase = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
__lowerCAmelCase = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
__lowerCAmelCase = gen_kwargs
__lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
__lowerCAmelCase = self.get_eval_dataloader(_A )
__lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase = self.compute_metrics
__lowerCAmelCase = None
__lowerCAmelCase = time.time()
__lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowerCAmelCase = eval_loop(
_A , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__lowerCAmelCase = compute_metrics
__lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowerCAmelCase = self.post_process_function(_A , _A , _A )
__lowerCAmelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__lowerCAmelCase = metrics.pop(_A )
metrics.update(output.metrics )
else:
__lowerCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=None , _A = "test" , **_A ):
"""simple docstring"""
__lowerCAmelCase = gen_kwargs.copy()
__lowerCAmelCase = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
__lowerCAmelCase = self.compute_metrics
__lowerCAmelCase = None
__lowerCAmelCase = time.time()
__lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowerCAmelCase = eval_loop(
_A , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__lowerCAmelCase = compute_metrics
__lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowerCAmelCase = self.post_process_function(_A , _A , _A , "predict" )
__lowerCAmelCase = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__lowerCAmelCase = metrics.pop(_A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
| 92 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
if hor == 1_28:
__lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase = (32, 1_28, 2_56)
__lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
__lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase = (32, 64, 1_28, 2_56)
__lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
__lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_55_36,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
__lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 1_28, 2_56),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_55_36,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
__lowerCAmelCase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
__lowerCAmelCase = model
__lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 92 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Union[str, Any] = "▁"
lowerCAmelCase : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase):
lowerCAmelCase_ = BertGenerationTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
super().setUp()
UpperCamelCase = BertGenerationTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = '<s>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(A_ ) , 1002 )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = BertGenerationTokenizer(A_ , keep_accents=A_ )
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [285, 46, 10, 170, 382] , )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = 'Hello World!'
UpperCamelCase = [18536, 2260, 101]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@require_torch
@slow
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase = ' '.join(A_ )
UpperCamelCase = self.big_tokenizer.encode_plus(A_ , return_tensors='pt' , return_token_type_ids=A_ )
UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=A_ )
UpperCamelCase = BertGenerationConfig()
UpperCamelCase = BertGenerationEncoder(A_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A_ )
model(**A_ )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 368 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( nn.Module):
lowerCAmelCase_ = 42
lowerCAmelCase_ = jnp.floataa
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = hidden_states.shape
UpperCamelCase = jax.image.resize(
A_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
UpperCamelCase = self.conv(A_ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module):
lowerCAmelCase_ = 42
lowerCAmelCase_ = jnp.floataa
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.conv(A_ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module):
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = None
lowerCAmelCase_ = jnp.floataa
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.in_channels if self.out_channels is None else self.out_channels
UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCamelCase = nn.Conv(
A_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase = nn.Dense(A_ , dtype=self.dtype )
UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
UpperCamelCase = nn.Dropout(self.dropout_prob )
UpperCamelCase = nn.Conv(
A_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
UpperCamelCase = None
if use_nin_shortcut:
UpperCamelCase = nn.Conv(
A_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , A_ , A_ , A_=True )-> str:
'''simple docstring'''
UpperCamelCase = hidden_states
UpperCamelCase = self.norma(A_ )
UpperCamelCase = nn.swish(A_ )
UpperCamelCase = self.conva(A_ )
UpperCamelCase = self.time_emb_proj(nn.swish(A_ ) )
UpperCamelCase = jnp.expand_dims(jnp.expand_dims(A_ , 1 ) , 1 )
UpperCamelCase = hidden_states + temb
UpperCamelCase = self.norma(A_ )
UpperCamelCase = nn.swish(A_ )
UpperCamelCase = self.dropout(A_ , A_ )
UpperCamelCase = self.conva(A_ )
if self.conv_shortcut is not None:
UpperCamelCase = self.conv_shortcut(A_ )
return hidden_states + residual
| 251 | 0 |
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Dict = [0] * len(_lowerCamelCase)
lowercase__ : int = []
lowercase__ : List[str] = [1] * len(_lowerCamelCase)
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCamelCase)):
if indegree[i] == 0:
queue.append(_lowerCamelCase)
while queue:
lowercase__ : Union[str, Any] = queue.pop(0)
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ : Dict = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_lowerCamelCase)
print(max(_lowerCamelCase))
# Adjacency list of Graph
UpperCamelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 87 |
'''simple docstring'''
import numpy as np
def a__ ( a__ , a__ , a__ = 1E-1_2 , a__ = 1_00 , ):
"""simple docstring"""
assert np.shape(a__ )[0] == np.shape(a__ )[1]
# Ensure proper dimensionality.
assert np.shape(a__ )[0] == np.shape(a__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a__ ) == np.iscomplexobj(a__ )
__SCREAMING_SNAKE_CASE = np.iscomplexobj(a__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1E1_2
while not convergence:
# Multiple matrix by the vector.
__SCREAMING_SNAKE_CASE = np.dot(a__ , a__ )
# Normalize the resulting output vector.
__SCREAMING_SNAKE_CASE = w / np.linalg.norm(a__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
__SCREAMING_SNAKE_CASE = np.dot(a__ , np.dot(a__ , a__ ) )
# Check convergence.
__SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = lambda_
if is_complex:
__SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__SCREAMING_SNAKE_CASE = np.array([41, 4, 20] )
__SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
__SCREAMING_SNAKE_CASE = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__SCREAMING_SNAKE_CASE = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__SCREAMING_SNAKE_CASE = real_input_matrix
__SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
__SCREAMING_SNAKE_CASE = complex_input_matrix
__SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = power_iteration(a__ , a__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.linalg.eigh(a__ )
# Last eigenvalue is the maximum one.
__SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a__ ) - np.abs(a__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 267 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A ( __UpperCAmelCase=None , __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class a_ :
UpperCamelCase__ : str =field(
metadata={"help": "The csv file to plot."} , )
UpperCamelCase__ : bool =field(
default=__lowerCAmelCase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
UpperCamelCase__ : bool =field(
default=__lowerCAmelCase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
UpperCamelCase__ : bool =field(
default=__lowerCAmelCase , metadata={"help": "Disable logarithmic scale when plotting"} , )
UpperCamelCase__ : bool =field(
default=__lowerCAmelCase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
UpperCamelCase__ : Optional[str] =field(
default=__lowerCAmelCase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
UpperCamelCase__ : Optional[List[str]] =list_field(
default=__lowerCAmelCase , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
try:
int(__UpperCAmelCase )
return True
except ValueError:
return False
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
try:
float(__UpperCAmelCase )
return True
except ValueError:
return False
class a_ :
def __init__( self :Any , _lowercase :Dict) -> Dict:
UpperCAmelCase_ = args
UpperCAmelCase_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline='''''') as csv_file:
UpperCAmelCase_ = csv.DictReader(lowerCamelCase__)
for row in reader:
UpperCAmelCase_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size''']))
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length''']))
if can_convert_to_int(row['''result''']):
# value is not None
UpperCAmelCase_ = int(row['''result'''])
elif can_convert_to_float(row['''result''']):
# value is not None
UpperCAmelCase_ = float(row['''result'''])
def __a ( self :List[Any]) -> List[Any]:
UpperCAmelCase_ = plt.subplots()
UpperCAmelCase_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''')
ax.set_yscale('''log''')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
UpperCAmelCase_ = sorted(set(self.result_dict[model_name]['''bsz''']))
UpperCAmelCase_ = sorted(set(self.result_dict[model_name]['''seq_len''']))
UpperCAmelCase_ = self.result_dict[model_name]['''result''']
(UpperCAmelCase_) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase__ , )
else:
UpperCAmelCase_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
(UpperCAmelCase_) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase_ = np.asarray(lowerCamelCase__ , lowerCamelCase__)[: len(lowerCamelCase__)]
plt.scatter(
lowerCamelCase__ , lowerCamelCase__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}")
plt.plot(lowerCamelCase__ , lowerCamelCase__ , '''--''')
title_str += f" {label_model_name} vs."
UpperCAmelCase_ = title_str[:-4]
UpperCAmelCase_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(lowerCamelCase__)
plt.xlabel(lowerCamelCase__)
plt.ylabel(lowerCamelCase__)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def A ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = HfArgumentParser(__UpperCAmelCase )
UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ = Plot(args=__UpperCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 357 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = "▁"
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : str =BigBirdTokenizer
UpperCamelCase__ : Tuple =BigBirdTokenizerFast
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : List[str] =True
def __a ( self :Any) -> List[str]:
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(_lowercase , keep_accents=_lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :str) -> str:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(_lowercase) , 1004)
def __a ( self :List[str]) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __a ( self :Tuple) -> int:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_lowercase)
UpperCAmelCase_ = rust_tokenizer.tokenize(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
self.assertListEqual(_lowercase , _lowercase)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_lowercase)
UpperCAmelCase_ = rust_tokenizer.encode(_lowercase)
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[Any]) -> List[str]:
UpperCAmelCase_ = BigBirdTokenizer(_lowercase , keep_accents=_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __a ( self :Any) -> List[Any]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def __a ( self :int) -> List[Any]:
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@slow
def __a ( self :int) -> Any:
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase))
@require_torch
@slow
def __a ( self :Dict) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
UpperCAmelCase_ = ''' '''.join(_lowercase)
UpperCAmelCase_ = self.big_tokenizer.encode_plus(_lowercase , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_lowercase)
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''')
UpperCAmelCase_ = BigBirdModel(_lowercase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowercase)
model(**_lowercase)
@slow
def __a ( self :Optional[int]) -> Any:
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def __a ( self :Dict) -> List[str]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 344 | 0 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]):
with open(lowerCAmelCase__) as metadata_file:
lowercase__ : Dict = json.load(lowerCAmelCase__)
lowercase__ : Union[str, Any] = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata["model_config"])
# Load in the weights from the checkpoint_path
lowercase__ : Optional[Any] = torch.load(lowerCAmelCase__ , map_location="cpu")
# Load the entity vocab file
lowercase__ : Dict = load_entity_vocab(lowerCAmelCase__)
lowercase__ : int = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"])
# Add special tokens to the token vocabulary for downstream tasks
lowercase__ : Union[str, Any] = AddedToken("<ent>" , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__)
lowercase__ : Any = AddedToken("<ent2>" , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__)
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]})
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''')
tokenizer.save_pretrained(lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__ , LukeTokenizer.vocab_files_names["entity_vocab_file"]) , "w") as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__)
lowercase__ : Optional[Any] = LukeTokenizer.from_pretrained(lowerCAmelCase__)
# Initialize the embeddings of the special tokens
lowercase__ : Tuple = state_dict["embeddings.word_embeddings.weight"]
lowercase__ : List[str] = word_emb[tokenizer.convert_tokens_to_ids(["@"])[0]].unsqueeze(0)
lowercase__ : Dict = word_emb[tokenizer.convert_tokens_to_ids(["#"])[0]].unsqueeze(0)
lowercase__ : List[Any] = torch.cat([word_emb, ent_emb, enta_emb])
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers):
for matrix_name in ["query.weight", "query.bias"]:
lowercase__ : Any = f'''encoder.layer.{layer_index}.attention.self.'''
lowercase__ : List[str] = state_dict[prefix + matrix_name]
lowercase__ : Any = state_dict[prefix + matrix_name]
lowercase__ : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase__ : List[Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
lowercase__ : List[str] = entity_emb[entity_vocab["[MASK]"]]
lowercase__ : Tuple = LukeModel(config=lowerCAmelCase__).eval()
lowercase__ , lowercase__ : int = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__)
if not (len(lowerCAmelCase__) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(lowerCAmelCase__)}. Expected only missing embeddings.position_ids''')
if not (all(key.startswith("entity_predictions") or key.startswith("lm_head") for key in unexpected_keys)):
raise ValueError(
"Unexpected keys"
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions") or key.startswith("lm_head"))])}''')
# Check outputs
lowercase__ : Any = LukeTokenizer.from_pretrained(lowerCAmelCase__ , task="entity_classification")
lowercase__ : Any = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
lowercase__ : str = (39, 42)
lowercase__ : Optional[Any] = tokenizer(lowerCAmelCase__ , entity_spans=[span] , add_prefix_space=lowerCAmelCase__ , return_tensors="pt")
lowercase__ : Union[str, Any] = model(**lowerCAmelCase__)
# Verify word hidden states
if model_size == "large":
lowercase__ : Any = torch.Size((1, 42, 1024))
lowercase__ : str = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]])
else: # base
lowercase__ : int = torch.Size((1, 42, 768))
lowercase__ : Tuple = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]])
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''')
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowercase__ : int = torch.Size((1, 1, 1024))
lowercase__ : List[str] = torch.tensor([[0.0466, -0.0106, -0.0179]])
else: # base
lowercase__ : str = torch.Size((1, 1, 768))
lowercase__ : str = torch.tensor([[0.1457, 0.1044, 0.0174]])
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''')
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowerCAmelCase__))
model.save_pretrained(lowerCAmelCase__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : List[Any] = {}
with open(lowerCAmelCase__ , "r" , encoding="utf-8") as f:
for index, line in enumerate(lowerCAmelCase__):
lowercase__ , lowercase__ : Tuple = line.rstrip().split("\t")
lowercase__ : int = index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 87 |
'''simple docstring'''
import numpy as np
class a :
def __init__( self ) -> List[str]:
_a = (0, 0)
_a = None
_a = 0
_a = 0
_a = 0
def __eq__( self , __magic_name__ ) -> Optional[int]:
return self.position == cell.position
def __UpperCAmelCase ( self ) -> Any:
print(self.position )
class a :
def __init__( self , __magic_name__=(5, 5) ) -> Optional[int]:
_a = np.zeros(__magic_name__ )
_a = world_size[0]
_a = world_size[1]
def __UpperCAmelCase ( self ) -> List[Any]:
print(self.w )
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
_a = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_a = cell.position[0]
_a = cell.position[1]
_a = []
for n in neughbour_cord:
_a = current_x + n[0]
_a = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_a = Cell()
_a = (x, y)
_a = cell
neighbours.append(__magic_name__ )
return neighbours
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
_a = []
_a = []
_open.append(lowerCAmelCase__ )
while _open:
_a = np.argmin([n.f for n in _open] )
_a = _open[min_f]
_closed.append(_open.pop(lowerCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(lowerCAmelCase__ ):
for c in _closed:
if c == n:
continue
_a = current.g + 1
_a , _a = n.position
_a , _a = goal.position
_a = (ya - ya) ** 2 + (xa - xa) ** 2
_a = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowerCAmelCase__ )
_a = []
while current.parent is not None:
path.append(current.position )
_a = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a_ : str = Gridworld()
# Start position and goal
a_ : str = Cell()
a_ : Dict = (0, 0)
a_ : Dict = Cell()
a_ : Optional[Any] = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
a_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a_ : Any = 1
print(world.w)
| 168 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__SCREAMING_SNAKE_CASE ="."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__SCREAMING_SNAKE_CASE =[
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ : int = SavedModel()
lowercase_ : Union[str, Any] = []
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
lowercase_ : List[str] = json.load(__SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__SCREAMING_SNAKE_CASE )] )
with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
lowercase_ : Any = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowercase_ : int = sorted(__SCREAMING_SNAKE_CASE )
lowercase_ : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__SCREAMING_SNAKE_CASE )
if strict and len(__SCREAMING_SNAKE_CASE ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__SCREAMING_SNAKE_CASE ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 321 | """simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A : List[Any] = logging.get_logger(__name__)
A : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A : Union[str, Any] = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
A : Tuple = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =VOCAB_FILES_NAMES
__UpperCAmelCase : Any =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any =["""input_ids""", """attention_mask"""]
__UpperCAmelCase : Optional[int] =GPTaTokenizer
def __init__( self , __a=None , __a=None , __a=None , __a="<|endoftext|>" , __a="<|endoftext|>" , __a="<|endoftext|>" , __a=False , **__a , ):
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
__lowerCAmelCase = kwargs.pop("add_bos_token" , __lowerCamelCase )
__lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
__lowerCAmelCase = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = pre_tok_class(**__lowerCamelCase )
__lowerCAmelCase = add_prefix_space
def snake_case ( self , *__a , **__a ):
__lowerCAmelCase = kwargs.get("is_split_into_words" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def snake_case ( self , *__a , **__a ):
__lowerCAmelCase = kwargs.get("is_split_into_words" , __lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def snake_case ( self , __a ):
__lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [self.eos_token_id] )
if len(__lowerCamelCase ) > self.model_max_length:
__lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 57 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case :str = logging.get_logger(__name__)
__snake_case :Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case :Optional[int] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
__snake_case :Optional[int] = {
'''gpt-neox-20b''': 2048,
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : int="<|endoftext|>" , __SCREAMING_SNAKE_CASE : Optional[int]="<|endoftext|>" , __SCREAMING_SNAKE_CASE : Dict="<|endoftext|>" , __SCREAMING_SNAKE_CASE : Optional[int]=False , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE) != add_prefix_space:
__a = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type'''))
__a = add_prefix_space
__a = pre_tok_class(**__SCREAMING_SNAKE_CASE)
__a = add_prefix_space
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : "Conversation"):
'''simple docstring'''
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE) + [self.eos_token_id])
if len(__SCREAMING_SNAKE_CASE) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 357 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 2, 1] , __SCREAMING_SNAKE_CASE : Optional[int]=[2, 2, 4] , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Tuple=2.0 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-5 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=10 , __SCREAMING_SNAKE_CASE : int=8 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = patch_norm
__a = layer_norm_eps
__a = initializer_range
__a = is_training
__a = scope
__a = use_labels
__a = type_sequence_label_size
__a = encoder_stride
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = SwinvaModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
__a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__a = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = SwinvaForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__a = 1
__a = SwinvaForMaskedImageModeling(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = self.type_sequence_label_size
__a = SwinvaForImageClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Dict = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase__ : Optional[int] = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : int = False
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Optional[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = SwinvaModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''')
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''')
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear))
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.attentions
__a = len(self.model_tester.depths)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = config.window_size**2
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__a = len(__SCREAMING_SNAKE_CASE)
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
if hasattr(self.model_tester , '''num_hidden_states_types'''):
__a = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__a = 2
self.assertEqual(out_len + added_hidden_states , len(__SCREAMING_SNAKE_CASE))
__a = outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.hidden_states
__a = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Swinv2 has a different seq_length
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
__a = outputs.reshaped_hidden_states
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
__a , __a , __a , __a = reshaped_hidden_states[0].shape
__a = (
reshaped_hidden_states[0].view(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = SwinvaModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = _config_zero_init(__SCREAMING_SNAKE_CASE)
for model_class in self.all_model_classes:
__a = model_class(config=__SCREAMING_SNAKE_CASE)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''')
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''').to(
__SCREAMING_SNAKE_CASE)
__a = self.default_image_processor
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
__a = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([-0.39_47, -0.43_06, 0.00_26]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 131 | 0 |
def _a ( UpperCamelCase_ : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = len(UpperCamelCase_ )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase__ = arr[mi::-1] + arr[mi + 1 : len(UpperCamelCase_ )]
# Reverse whole list
lowerCAmelCase__ = arr[cur - 1 :: -1] + arr[cur : len(UpperCamelCase_ )]
cur -= 1
return arr
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 340 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
'''simple docstring'''
import numpy as np
def __A ( lowerCAmelCase_ ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __a ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__(self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 5_0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowerCAmelCase__ ):
_UpperCAmelCase : str = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCAmelCase : int = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_UpperCAmelCase : Optional[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase : str = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : List[str] = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 170 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 177 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 177 | 1 |
import baseaa
def snake_case( __magic_name__ ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode('''utf-8''' ) )
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
return baseaa.baadecode(__magic_name__ ).decode('''utf-8''' )
if __name__ == "__main__":
lowerCAmelCase_ = 'Hello World!'
lowerCAmelCase_ = baseaa_encode(test)
print(encoded)
lowerCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 116 |
def snake_case( __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
lowercase : List[Any] = len(__magic_name__ )
lowercase : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase : List[str] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase : str = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod() | 116 | 1 |
from collections.abc import Callable
import numpy as np
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(lowercase__ ):
_UpperCAmelCase = y[k] + step_size * ode_func(lowercase__ , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(lowercase__ , y[k] ) + ode_func(x + step_size , lowercase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A ( enum.Enum ):
UpperCamelCase_ : Optional[int] =0
UpperCamelCase_ : Tuple =1
UpperCamelCase_ : Optional[int] =2
@add_end_docstrings(A_ )
class A ( A_ ):
UpperCamelCase_ : Union[str, Any] ='''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowercase= None
if self.model.config.prefix is not None:
__lowercase= self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowercase= self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowercase, __lowercase, __lowercase= self._sanitize_parameters(prefix=lowerCAmelCase , **self._forward_params )
__lowercase= {**self._preprocess_params, **preprocess_params}
__lowercase= {**self._forward_params, **forward_params}
def _A (self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
__lowercase= {}
if prefix is not None:
__lowercase= prefix
if prefix:
__lowercase= self.tokenizer(
lowerCAmelCase , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
__lowercase= prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
__lowercase= handle_long_generation
preprocess_params.update(lowerCAmelCase )
__lowercase= generate_kwargs
__lowercase= {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowercase= ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowercase= ReturnType.TENSORS
if return_type is not None:
__lowercase= return_type
if clean_up_tokenization_spaces is not None:
__lowercase= clean_up_tokenization_spaces
if stop_sequence is not None:
__lowercase= self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowercase= stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _A (self , *lowerCAmelCase , **lowerCAmelCase ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*lowerCAmelCase , **lowerCAmelCase )
def __call__(self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , **lowerCAmelCase ):
__lowercase= self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=self.framework )
__lowercase= prompt_text
if handle_long_generation == "hole":
__lowercase= inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowercase= generate_kwargs['max_new_tokens']
else:
__lowercase= generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowercase= self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowercase= inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowercase= inputs['attention_mask'][:, -keep_length:]
return inputs
def _A (self , lowerCAmelCase , **lowerCAmelCase ):
__lowercase= model_inputs['input_ids']
__lowercase= model_inputs.get('attention_mask' , lowerCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowercase= None
__lowercase= None
__lowercase= 1
else:
__lowercase= input_ids.shape[0]
__lowercase= model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowercase= generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowercase= 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowercase= generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowercase= 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowercase= self.model.generate(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , **lowerCAmelCase )
__lowercase= generated_sequence.shape[0]
if self.framework == "pt":
__lowercase= generated_sequence.reshape(lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowercase= tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _A (self , lowerCAmelCase , lowerCAmelCase=ReturnType.FULL_TEXT , lowerCAmelCase=True ):
__lowercase= model_outputs['generated_sequence'][0]
__lowercase= model_outputs['input_ids']
__lowercase= model_outputs['prompt_text']
__lowercase= generated_sequence.numpy().tolist()
__lowercase= []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowercase= {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowercase= self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowercase= 0
else:
__lowercase= len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__lowercase= prompt_text + text[prompt_length:]
else:
__lowercase= text[prompt_length:]
__lowercase= {'generated_text': all_text}
records.append(lowerCAmelCase )
return records
| 295 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
SCREAMING_SNAKE_CASE__ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def lowercase__ ( )-> Optional[int]:
UpperCamelCase = os.path.dirname(os.path.realpath(__UpperCamelCase ) )
UpperCamelCase = os.path.join(__UpperCamelCase , """words.txt""" )
UpperCamelCase = """"""
with open(__UpperCamelCase ) as f:
UpperCamelCase = f.readline()
UpperCamelCase = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
UpperCamelCase = [
word
for word in [sum(ord(__UpperCamelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 183 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( )-> Tuple:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase = json.loads(__UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase = json.loads(__UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , )
@cached_property
def A__ ( self ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
UpperCamelCase = torch.device("""cpu""" )
UpperCamelCase = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase = smp.local_rank()
UpperCamelCase = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
if device.type == "cuda":
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
return device
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> str:
"""simple docstring"""
return False
| 183 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class A__ ( A__ ):
A__ = 'audio-spectrogram-transformer'
def __init__( self : Union[str, Any] , _a : Tuple=768 , _a : List[str]=12 , _a : Any=12 , _a : Optional[Any]=3072 , _a : List[str]="gelu" , _a : Optional[Any]=0.0 , _a : Dict=0.0 , _a : Optional[Any]=0.02 , _a : int=1e-12 , _a : int=16 , _a : Optional[Any]=True , _a : Optional[int]=10 , _a : Optional[Any]=10 , _a : Optional[int]=1024 , _a : List[str]=128 , **_a : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =qkv_bias
_SCREAMING_SNAKE_CASE =frequency_stride
_SCREAMING_SNAKE_CASE =time_stride
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =num_mel_bins
| 47 |
'''simple docstring'''
lowerCamelCase : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase : int = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 47 | 1 |
import math
import tensorflow as tf
from packaging import version
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
A_ : Tuple = tf.cast(math.pi , x.dtype )
A_ : str = tf.cast(0.0_4_4_7_1_5 , x.dtype )
A_ : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(SCREAMING_SNAKE_CASE , 3 )) ))
return x * cdf
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
return x * tf.tanh(tf.math.softplus(SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
A_ : str = tf.cast(0.0_4_4_7_1_5 , x.dtype )
A_ : Optional[int] = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
A_ : List[str] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return tf.clip_by_value(_gelu(SCREAMING_SNAKE_CASE ) , -10 , 10 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ):
A_ : Any = tf.split(SCREAMING_SNAKE_CASE , 2 , axis=SCREAMING_SNAKE_CASE )
return a * tf.math.sigmoid(SCREAMING_SNAKE_CASE )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return tf.keras.activations.gelu(SCREAMING_SNAKE_CASE , approximate=SCREAMING_SNAKE_CASE )
UpperCamelCase = tf.keras.activations.gelu
UpperCamelCase = approximate_gelu_wrap
else:
UpperCamelCase = _gelu
UpperCamelCase = _gelu_new
UpperCamelCase = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 358 |
from manim import *
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : str = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Optional[Any] = Text('''CPU''' , font_size=24 )
A_ : Union[str, Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = [mem.copy() for i in range(1 )]
A_ : Any = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Dict = Text('''GPU''' , font_size=24 )
A_ : List[str] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.align_to(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = Text('''Model''' , font_size=24 )
A_ : List[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , )
A_ : int = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
A_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Any = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(_SCREAMING_SNAKE_CASE ) , Write(_SCREAMING_SNAKE_CASE ) )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Dict = []
A_ : int = []
A_ : Optional[Any] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
cpu_target.move_to(_SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
A_ : Union[str, Any] = 0.4_6 / 4
A_ : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait()
| 65 | 0 |
import warnings
from .generation import TFGenerationMixin
class __UpperCAmelCase ( lowerCamelCase__ ):
# warning at import time
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , lowerCamelCase__ , )
| 336 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LayoutLMTokenizer
UpperCamelCase = LayoutLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def __magic_name__ ( self : Any ):
super().setUp()
UpperCAmelCase : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Union[str, Any], **__A : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__A )
def __magic_name__ ( self : Optional[int], __A : int ):
UpperCAmelCase : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCAmelCase : Optional[int] = '''unwanted, running'''
return input_text, output_text
def __magic_name__ ( self : Any ):
UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ), [7, 4, 5, 1_0, 8, 9] )
def __magic_name__ ( self : Optional[int] ):
pass
| 336 | 1 |
def _UpperCamelCase ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
return "".join(chr(ord(UpperCamelCase_ ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 122 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Dict = AltDiffusionPipeline
A__ : str = TEXT_TO_IMAGE_PARAMS
A__ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
A__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
UpperCamelCase_ = CLIPTextModel(__UpperCamelCase )
UpperCamelCase_ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
UpperCamelCase_ = 7_7
UpperCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
torch.manual_seed(0 )
UpperCamelCase_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase_ = RobertaSeriesModelWithTransformation(__UpperCamelCase )
UpperCamelCase_ = text_encoder
UpperCamelCase_ = AltDiffusionPipeline(**__UpperCamelCase )
UpperCamelCase_ = alt_pipe.to(__UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ = """A photo of an astronaut"""
UpperCamelCase_ = alt_pipe(**__UpperCamelCase )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase_ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
UpperCamelCase_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCamelCase_ = RobertaSeriesModelWithTransformation(__UpperCamelCase )
UpperCamelCase_ = text_encoder
UpperCamelCase_ = AltDiffusionPipeline(**__UpperCamelCase )
UpperCamelCase_ = alt_pipe.to(__UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ = alt_pipe(**__UpperCamelCase )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase_ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__UpperCamelCase )
UpperCamelCase_ = alt_pipe.to(__UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = """A painting of a squirrel eating a burger"""
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = alt_pipe([prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="""np""" )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
UpperCamelCase_ = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase )
UpperCamelCase_ = alt_pipe.to(__UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = """A painting of a squirrel eating a burger"""
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = alt_pipe([prompt] , generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 122 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Any = DanceDiffusionPipeline
A__ : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A__ : List[Any] = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
A__ : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A__ : str = False
A__ : Any = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__UpperCamelCase , use_timestep_embedding=__UpperCamelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase_ = IPNDMScheduler()
UpperCamelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = DanceDiffusionPipeline(**__UpperCamelCase )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ = pipe(**__UpperCamelCase )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase_ = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase_ ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device
UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = torch_device
UpperCamelCase_ = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(generator=__UpperCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
UpperCamelCase_ = output.audios
UpperCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_ = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 122 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__snake_case = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 78 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCamelCase__ = logging.get_logger(__name__)
# General docstring
UpperCamelCase__ = 'ResNetConfig'
# Base docstring
UpperCamelCase__ = 'microsoft/resnet-50'
UpperCamelCase__ = [1, 2_0_4_8, 7, 7]
# Image classification docstring
UpperCamelCase__ = 'microsoft/resnet-50'
UpperCamelCase__ = 'tiger cat'
UpperCamelCase__ = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class A ( nn.Module ):
def __init__(self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "relu" ) -> int:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Convad(
__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=kernel_size // 2 , bias=__UpperCAmelCase )
UpperCAmelCase__ = nn.BatchNormad(__UpperCAmelCase )
UpperCAmelCase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase_ (self : List[Any] , __UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
UpperCAmelCase__ = self.convolution(__UpperCAmelCase )
UpperCAmelCase__ = self.normalization(__UpperCAmelCase )
UpperCAmelCase__ = self.activation(__UpperCAmelCase )
return hidden_state
class A ( nn.Module ):
def __init__(self : str , __UpperCAmelCase : ResNetConfig ) -> Any:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
UpperCAmelCase__ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
UpperCAmelCase__ = config.num_channels
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
UpperCAmelCase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
UpperCAmelCase__ = self.embedder(__UpperCAmelCase )
UpperCAmelCase__ = self.pooler(__UpperCAmelCase )
return embedding
class A ( nn.Module ):
def __init__(self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 2 ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , stride=__UpperCAmelCase , bias=__UpperCAmelCase )
UpperCAmelCase__ = nn.BatchNormad(__UpperCAmelCase )
def lowercase_ (self : List[str] , __UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
UpperCAmelCase__ = self.convolution(__UpperCAmelCase )
UpperCAmelCase__ = self.normalization(__UpperCAmelCase )
return hidden_state
class A ( nn.Module ):
def __init__(self : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "relu" ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = in_channels != out_channels or stride != 1
UpperCAmelCase__ = (
ResNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase__ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase ) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , activation=__UpperCAmelCase ) , )
UpperCAmelCase__ = ACTaFN[activation]
def lowercase_ (self : List[Any] , __UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = hidden_state
UpperCAmelCase__ = self.layer(__UpperCAmelCase )
UpperCAmelCase__ = self.shortcut(__UpperCAmelCase )
hidden_state += residual
UpperCAmelCase__ = self.activation(__UpperCAmelCase )
return hidden_state
class A ( nn.Module ):
def __init__(self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "relu" , __UpperCAmelCase : int = 4 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = in_channels != out_channels or stride != 1
UpperCAmelCase__ = out_channels // reduction
UpperCAmelCase__ = (
ResNetShortCut(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase__ = nn.Sequential(
ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase ) , ResNetConvLayer(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 , activation=__UpperCAmelCase ) , )
UpperCAmelCase__ = ACTaFN[activation]
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = hidden_state
UpperCAmelCase__ = self.layer(__UpperCAmelCase )
UpperCAmelCase__ = self.shortcut(__UpperCAmelCase )
hidden_state += residual
UpperCAmelCase__ = self.activation(__UpperCAmelCase )
return hidden_state
class A ( nn.Module ):
def __init__(self : Tuple , __UpperCAmelCase : ResNetConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
UpperCAmelCase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__UpperCAmelCase , __UpperCAmelCase , stride=__UpperCAmelCase , activation=config.hidden_act ) , *[layer(__UpperCAmelCase , __UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowercase_ (self : int , __UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
UpperCAmelCase__ = input
for layer in self.layers:
UpperCAmelCase__ = layer(__UpperCAmelCase )
return hidden_state
class A ( nn.Module ):
def __init__(self : Optional[int] , __UpperCAmelCase : ResNetConfig ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__UpperCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , depth=__UpperCAmelCase ) )
def lowercase_ (self : Any , __UpperCAmelCase : Tensor , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
UpperCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase__ = hidden_states + (hidden_state,)
UpperCAmelCase__ = stage_module(__UpperCAmelCase )
if output_hidden_states:
UpperCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase , )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = ResNetConfig
__UpperCAmelCase : List[Any] = 'resnet'
__UpperCAmelCase : List[str] = 'pixel_values'
__UpperCAmelCase : int = True
def lowercase_ (self : Optional[int] , __UpperCAmelCase : int ) -> Any:
"""simple docstring"""
if isinstance(__UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase_ (self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase__ = value
UpperCamelCase__ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase__ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , UpperCAmelCase_ , )
class A ( UpperCAmelCase_ ):
def __init__(self : str , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
UpperCAmelCase__ = config
UpperCAmelCase__ = ResNetEmbeddings(__UpperCAmelCase )
UpperCAmelCase__ = ResNetEncoder(__UpperCAmelCase )
UpperCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase_ (self : Any , __UpperCAmelCase : Tensor , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
UpperCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = self.embedder(__UpperCAmelCase )
UpperCAmelCase__ = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase )
UpperCAmelCase__ = encoder_outputs[0]
UpperCAmelCase__ = self.pooler(__UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase_ , )
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
UpperCAmelCase__ = config.num_labels
UpperCAmelCase__ = ResNetModel(__UpperCAmelCase )
# classification head
UpperCAmelCase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase_ (self : List[str] , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[torch.LongTensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = self.resnet(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase )
UpperCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase__ = self.classifier(__UpperCAmelCase )
UpperCAmelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase__ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase__ = "single_label_classification"
else:
UpperCAmelCase__ = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCAmelCase__ = MSELoss()
if self.num_labels == 1:
UpperCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCAmelCase__ = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase__ = CrossEntropyLoss()
UpperCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase__ = BCEWithLogitsLoss()
UpperCAmelCase__ = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
UpperCAmelCase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , UpperCAmelCase_ , )
class A ( UpperCAmelCase_ , UpperCAmelCase_ ):
def __init__(self : Optional[Any] , __UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
super()._init_backbone(__UpperCAmelCase )
UpperCAmelCase__ = [config.embedding_size] + config.hidden_sizes
UpperCAmelCase__ = ResNetEmbeddings(__UpperCAmelCase )
UpperCAmelCase__ = ResNetEncoder(__UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@replace_return_docstrings(output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Tensor , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None ) -> BackboneOutput:
"""simple docstring"""
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase__ = self.embedder(__UpperCAmelCase )
UpperCAmelCase__ = self.encoder(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCAmelCase__ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__UpperCAmelCase , )
| 65 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = os.path.join(args.tf_model_dir, """parameters.json""" )
snake_case_ :Any = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
snake_case_ :Optional[int] = args.output + """.pt"""
snake_case_ :List[str] = OrderedDict()
with tf.device("""/CPU:0""" ):
snake_case_ :Dict = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ :str = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ :List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
snake_case_ :Any = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
snake_case_ :Optional[int] = 8
snake_case_ :List[str] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :List[str] = torch.tensor(_lowercase )
elif key_name.startswith("""model/moe""" ):
snake_case_ :Tuple = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
snake_case_ :Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/softmlp/kernel""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
snake_case_ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
snake_case_ :Dict = key_name[-9:-7]
for i in range(16 ):
snake_case_ :str = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
snake_case_ :Tuple = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/mlp""" ):
snake_case_ :Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p1/bias""" ):
snake_case_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
snake_case_ :str = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/kernel""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/p2/bias""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
snake_case_ :Any = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("""model/ln""" ):
snake_case_ :Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :str = """model.blocks.%d.feed_forward.norm.bias""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :int = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Dict = """model.blocks.%d.feed_forward.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.startswith("""model/att""" ):
snake_case_ :List[str] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
snake_case_ :Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ :Dict = state[:, 0, :, :]
snake_case_ :int = state[:, 1, :, :]
snake_case_ :List[str] = state[:, 2, :, :]
snake_case_ :str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
snake_case_ :int = torch.tensor(_lowercase )
snake_case_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
snake_case_ :Dict = torch.tensor(_lowercase )
snake_case_ :Dict = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
snake_case_ :Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("""/o/kernel""" ):
snake_case_ :str = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
snake_case_ :str = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ :Any = torch.tensor(_lowercase )
elif key_name.startswith("""model/an""" ):
snake_case_ :Optional[int] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
snake_case_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
snake_case_ :Optional[int] = vnp.copy() # same because it is one dimensional
snake_case_ :Tuple = torch.tensor(_lowercase )
elif key_name.endswith("""/g""" ):
snake_case_ :Union[str, Any] = """model.blocks.%d.self_attn.norm.weight""" % player
snake_case_ :Dict = vnp.copy() # same because it is one dimensional
snake_case_ :Optional[int] = torch.tensor(_lowercase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
snake_case_ :List[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
snake_case_ :Optional[Any] = """model.%s.weight""" % nlayer
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
if key_name.startswith("""model/wte""" ):
snake_case_ :Tuple = """lm_head.weight"""
snake_case_ :List[str] = vnp.copy() # same in embedded
snake_case_ :List[Any] = torch.tensor(_lowercase )
elif key_name.startswith("""model/wob""" ):
snake_case_ :str = """final_logits_bias"""
snake_case_ :Any = vnp.copy() # same in embedded
snake_case_ :List[Any] = state.reshape((1, -1) )
snake_case_ :Union[str, Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
snake_case_ :str = """model.last_project.weight"""
snake_case_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ :int = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
snake_case_ :Optional[int] = """model.last_project.bias"""
snake_case_ :Tuple = vnp.copy() # same because it is one dimensional
snake_case_ :Any = torch.tensor(_lowercase )
torch.save(_lowercase, args.output )
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__a = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 66 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
__lowerCAmelCase = 3_00 # TEMPERATURE (unit = K)
def snake_case_ ( snake_case , snake_case , snake_case , ) -> Tuple:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__lowercase : Tuple = ['input_values', 'attention_mask']
def __init__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 16_000 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = False , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 64 , lowerCAmelCase__ = "hann_window" , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 7_600 , lowerCAmelCase__ = 1E-10 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Dict = do_normalize
lowercase__: Optional[Any] = return_attention_mask
lowercase__: str = num_mel_bins
lowercase__: Dict = hop_length
lowercase__: Dict = win_length
lowercase__: Optional[int] = win_function
lowercase__: Any = frame_signal_scale
lowercase__: Tuple = fmin
lowercase__: Tuple = fmax
lowercase__: Dict = mel_floor
lowercase__: int = reduction_factor
lowercase__: List[Any] = win_length * sampling_rate // 1_000
lowercase__: Optional[Any] = hop_length * sampling_rate // 1_000
lowercase__: Optional[int] = optimal_fft_length(self.sample_size )
lowercase__: Optional[Any] = (self.n_fft // 2) + 1
lowercase__: str = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase__ )
lowercase__: Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowerCAmelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowercase__: List[str] = np.array(lowerCAmelCase__ , np.intaa )
lowercase__: Tuple = []
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ):
lowercase__: int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase__: Tuple = padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
lowercase__: Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
lowercase__: List[str] = spectrogram(
lowerCAmelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
lowercase__: Dict = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
lowercase__: str = None
if audio_target is not None:
lowercase__: List[str] = self._process_audio(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , )
if inputs is None:
return inputs_target
else:
lowercase__: int = inputs_target['input_values']
lowercase__: List[str] = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
lowercase__: Optional[int] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
lowercase__: int = isinstance(lowerCAmelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase__: Tuple = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__: Dict = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
lowercase__: Optional[Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowercase__: Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__: Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
lowercase__: str = self.feature_size
# convert into correct format for padding
if is_target:
lowercase__: int = [self._extract_mel_features(lowerCAmelCase__ ) for waveform in speech]
lowercase__: Dict = BatchFeature({'input_values': features} )
lowercase__: Union[str, Any] = self.num_mel_bins
else:
lowercase__: Union[str, Any] = BatchFeature({'input_values': speech} )
lowercase__: Dict = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: List[str] = feature_size_hack
# convert input values to correct format
lowercase__: Union[str, Any] = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
lowercase__: List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowercase__: Dict = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowercase__: Tuple = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowercase__: Tuple = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase__: str = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowercase__: Tuple = (
attention_mask
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase__: str = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowerCAmelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
lowercase__: Union[str, Any] = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowercase__: int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowercase__: str = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 288 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Tuple = len(A_ )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase__ : Union[str, Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase__ : List[str] = arr[mi::-1] + arr[mi + 1 : len(A_ )]
# Reverse whole list
lowerCAmelCase__ : Dict = arr[cur - 1 :: -1] + arr[cur : len(A_ )]
cur -= 1
return arr
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 106 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Optional[Any] ,lowercase_ : int ,lowercase_ : str ):
lowerCAmelCase__ : Optional[int] = hf_hub_download(
repo_id='''nateraw/video-demo''' ,filename='''archery.mp4''' ,repo_type='''dataset''' )
lowerCAmelCase__ : Tuple = VideoClassificationPipeline(model=lowercase_ ,image_processor=lowercase_ ,top_k=2 )
lowerCAmelCase__ : Optional[int] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def __lowerCAmelCase ( self : str ,lowercase_ : int ,lowercase_ : Dict ):
for example in examples:
lowerCAmelCase__ : Dict = video_classifier(lowercase_ )
self.assertEqual(
lowercase_ ,[
{'''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ )},
{'''score''': ANY(lowercase_ ), '''label''': ANY(lowercase_ )},
] ,)
@require_torch
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : Optional[int] = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowerCAmelCase__ : List[str] = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 1_0} ,crop_size={'''height''': 1_0, '''width''': 1_0} )
lowerCAmelCase__ : Optional[Any] = pipeline(
'''video-classification''' ,model=lowercase_ ,feature_extractor=lowercase_ ,frame_sampling_rate=4 )
lowerCAmelCase__ : Optional[int] = hf_hub_download(repo_id='''nateraw/video-demo''' ,filename='''archery.mp4''' ,repo_type='''dataset''' )
lowerCAmelCase__ : Optional[int] = video_classifier(lowercase_ ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] ,)
lowerCAmelCase__ : Dict = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] ,)
@require_tf
def __lowerCAmelCase ( self : int ):
pass
| 106 | 1 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase ( unittest.TestCase ):
A__ : Dict = MODEL_FOR_MASKED_LM_MAPPING
A__ : List[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
snake_case : Dict = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 3_80_15, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 2_55_06, "token_str": " accuser"},
] , )
snake_case : Optional[int] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 3_80_15,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 2_55_06,
"token_str": " accuser",
},
] , )
snake_case : List[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 1_36_06, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 29_41, "token_str": " Te"},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE (self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
snake_case : Union[str, Any] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 3_56_76, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 1_64_16, "token_str": "ELS"},
] , )
snake_case : int = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 3_56_76,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 1_64_16, "token_str": "ELS"},
] , )
snake_case : Any = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 29_41, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 1_36_06, "token_str": " Clara"},
] , )
snake_case : Tuple = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 3_56_76,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 1_64_16, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 3_56_76,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 1_64_16, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
snake_case : Any = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
snake_case : List[str] = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : Any = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(snake_case__ )
@slow
@require_tf
def _SCREAMING_SNAKE_CASE (self : str ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{"sequence": "My name is John", "score": 0.008, "token": 6_10, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 15_73, "token_str": " Chris"},
] , )
snake_case : Any = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 22_01,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 1_27_90,
"token_str": " Lyon",
},
] , )
snake_case : List[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 1_36_06, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 29_41, "token_str": " Te"},
] , )
@require_torch
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Any:
'''simple docstring'''
snake_case : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
snake_case : str = None
snake_case : Optional[int] = None
self.run_pipeline_test(snake_case__ , [] )
@require_tf
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
snake_case : Tuple = None
snake_case : Tuple = None
self.run_pipeline_test(snake_case__ , [] )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
snake_case : int = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
snake_case : Dict = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] , snake_case__ : Dict ) -> str:
'''simple docstring'''
snake_case : List[str] = fill_masker.tokenizer
snake_case : Union[str, Any] = fill_masker.model
snake_case : List[str] = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
snake_case : Dict = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
snake_case : List[Any] = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
snake_case__ , [
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
] , )
with self.assertRaises(snake_case__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case__ ):
fill_masker("This is" )
self.run_test_top_k(snake_case__ , snake_case__ )
self.run_test_targets(snake_case__ , snake_case__ )
self.run_test_top_k_targets(snake_case__ , snake_case__ )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case__ , snake_case__ )
self.fill_mask_with_multiple_masks(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
snake_case : Dict = tokenizer.get_vocab()
snake_case : List[str] = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case : List[str] = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ , targets=snake_case__ )
snake_case : Union[str, Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
snake_case : Optional[int] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , snake_case__ )
snake_case : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(snake_case__ ) )
# Call argument
snake_case : List[Any] = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
snake_case : List[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case__ )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
snake_case : List[str] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , snake_case__ )
snake_case : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(snake_case__ ) )
# Score equivalence
snake_case : Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case__ )
snake_case : int = [top_mask["token_str"] for top_mask in outputs]
snake_case : List[str] = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case__ ) == set(snake_case__ ):
snake_case : Any = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=snake_case__ )
snake_case : Optional[int] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
# Raises with invalid
with self.assertRaises(snake_case__ ):
snake_case : Optional[int] = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case__ ):
snake_case : Dict = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""] )
with self.assertRaises(snake_case__ ):
snake_case : int = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="" )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> int:
'''simple docstring'''
snake_case : Any = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ , top_k=2 )
snake_case : List[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
snake_case : str = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
snake_case : List[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case__ , [
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
] , )
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[str, Any] , snake_case__ : int ) -> int:
'''simple docstring'''
snake_case : int = tokenizer.get_vocab()
snake_case : int = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
# top_k=2, ntargets=3
snake_case : str = sorted(vocab.keys() )[:3]
snake_case : int = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=snake_case__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case : Any = [el["token_str"] for el in sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case__ ).issubset(snake_case__ ):
snake_case : List[Any] = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=snake_case__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case__ ) , nested_simplify(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : str = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
snake_case : int = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case : Optional[int] = sorted(vocab.keys() )[:3]
snake_case : str = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case : List[Any] = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=snake_case__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case__ ) , 3 )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple , snake_case__ : str ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = FillMaskPipeline(model=snake_case__ , tokenizer=snake_case__ )
snake_case : Dict = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
snake_case__ , [
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
[
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
{"sequence": ANY(snake_case__ ), "score": ANY(snake_case__ ), "token": ANY(snake_case__ ), "token_str": ANY(snake_case__ )},
],
] , )
| 10 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 1 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any]=14 , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : int=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[int]=99 , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : Tuple=37 , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Optional[Any]=5_12 , lowerCamelCase_ : Optional[int]=16 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : str=4 , lowerCamelCase_ : Dict=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : str = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = use_mc_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : Optional[int] = self.vocab_size - 1
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , *lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = CTRLModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ )
model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , *lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = CTRLLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str , *lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = CTRLForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (CTRLLMHeadModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = CTRLModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = CTRLModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=lowerCamelCase_ ) # Legal the president is
SCREAMING_SNAKE_CASE : Tuple = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE : List[Any] = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase_ )
| 323 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def lowerCamelCase_ ( self : List[str] , **lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0](**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE : Dict = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : List[str] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1
scheduler.set_timesteps(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler.timesteps
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCamelCase_ ):
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = scheduler.timesteps
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
# 2. predict noise residual
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ , lowerCamelCase_ )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = [39, 30, 12, 15, 0]
with self.assertRaises(lowerCamelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : int = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 323 | 1 |
"""simple docstring"""
def A ( snake_case :bytes ) -> str:
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def A ( snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(snake_case ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : List[Any] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase : List[Any] = {
"Salesforce/codegen-350M-mono": 2_0_4_8,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = CodeGenTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
if kwargs.pop('add_bos_token' , __UpperCAmelCase ):
__UpperCamelCase = kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = super().decode(
token_ids=__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , **__UpperCAmelCase , )
if truncate_before_pattern is not None and len(__UpperCAmelCase ) > 0:
__UpperCamelCase = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
return decoded_text
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
def find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = pattern.search(__UpperCAmelCase , __UpperCAmelCase )
return m.start() if m else -1
__UpperCamelCase = [re.compile(__UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCamelCase = list(re.finditer('^print' , __UpperCAmelCase , re.MULTILINE ) )
if len(__UpperCAmelCase ) > 1:
__UpperCamelCase = completion[: prints[1].start()]
__UpperCamelCase = list(re.finditer('^def' , __UpperCAmelCase , re.MULTILINE ) )
if len(__UpperCAmelCase ) > 1:
__UpperCamelCase = completion[: defs[1].start()]
__UpperCamelCase = 0
__UpperCamelCase = [
pos for pos in [find_re(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for terminal in terminals] if pos != -1
]
if len(__UpperCAmelCase ) > 0:
return completion[: min(__UpperCAmelCase )]
else:
return completion
| 263 | 1 |
import random
from typing import Any
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[Any]:
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowercase : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
lowercase : Any = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
lowercase , lowercase : int = data[b], data[a]
return data
if __name__ == "__main__":
lowercase : str = [0, 1, 2, 3, 4, 5, 6, 7]
lowercase : List[str] = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 20 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''MobileViTFeatureExtractor''']
lowerCAmelCase__ : Optional[int] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 0 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 334 |
'''simple docstring'''
import os
from distutils.util import strtobool
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]:
for e in env_keys:
A: Dict = int(os.environ.get(__lowercase , -1 ) )
if val >= 0:
return val
return default
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False ) -> List[str]:
A: str = os.environ.get(__lowercase , str(__lowercase ) )
return strtobool(__lowercase ) == 1 # As its name indicates `strtobool` actually returns an int...
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase="no" ) -> str:
A: Optional[int] = os.environ.get(__lowercase , str(__lowercase ) )
return value
| 334 | 1 |
'''simple docstring'''
from __future__ import annotations
a_ : Tuple = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Tuple = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 75 |
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( __UpperCAmelCase ) -> int:
if not nums:
return 0
lowerCAmelCase__ : List[Any] = nums[0]
lowerCAmelCase__ : List[str] = 0
for num in nums[1:]:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (
max_excluding + num,
max(__UpperCAmelCase , __UpperCAmelCase ),
)
return max(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 0 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class a_ ( unittest.TestCase ):
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :Optional[int] = [10, 20, 30, 40, 50, 60]
lowercase_ :Dict = [2, 4, 6, 8, 10, 12]
lowercase_ :Tuple = 100
self.assertEqual(kp.calc_profit(A__ , A__ , A__ ) , 210 )
def lowercase__ ( self : Dict ):
"""simple docstring"""
self.assertRaisesRegex(A__ , "max_weight must greater than zero." )
def lowercase__ ( self : Dict ):
"""simple docstring"""
self.assertRaisesRegex(A__ , "Weight can not be negative." )
def lowercase__ ( self : int ):
"""simple docstring"""
self.assertRaisesRegex(A__ , "Profit can not be negative." )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
self.assertRaisesRegex(A__ , "max_weight must greater than zero." )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
self.assertRaisesRegex(
A__ , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : int ={
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str =['''CLIPFeatureExtractor''']
lowerCAmelCase : Optional[int] =['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =[
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =[
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 147 | 0 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__UpperCamelCase = [True] * (num + 1)
__UpperCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,__A ):
__UpperCamelCase = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Dict = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 349 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
try:
__UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCamelCase = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
a__ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a__ : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
a__ : Any = parse_flag_from_env('RUN_LOCAL', default=True)
a__ : List[Any] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a__ : Optional[int] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a__ : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a__ : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a__ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a__ : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a__ : str = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a__ : Tuple = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def _lowercase ( __A ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires faiss""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires regex""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires elasticsearch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__UpperCamelCase = unittest.skip("""test requires sqlalchemy""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires PyTorch""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires TensorFlow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires JAX""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__UpperCamelCase = unittest.skip("""test requires Pillow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
def _require_spacy_model(__A ):
try:
import spacy # noqa F401
spacy.load(__A )
except ImportError:
return unittest.skip("""test requires spacy""" )(__A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(__A ) )(__A )
else:
return test_case
return _require_spacy_model
def _lowercase ( __A ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(__A )
else:
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCamelCase = unittest.skip("""test is slow""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__UpperCamelCase = unittest.skip("""test is local""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCamelCase = unittest.skip("""test is packaged""" )(__A )
return test_case
def _lowercase ( __A ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCamelCase = unittest.skip("""test requires remote""" )(__A )
return test_case
def _lowercase ( *__A ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__A ) and name.startswith("""test""" ):
for decorator in decorators:
__UpperCamelCase = decorator(__A )
setattr(cls ,__A ,__A )
return cls
return decorate
class UpperCAmelCase__ ( UpperCAmelCase_):
pass
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
@contextmanager
def _lowercase ( __A=OfflineSimulationMode.CONNECTION_FAILS ,__A=1E-16 ):
'''simple docstring'''
__UpperCamelCase = requests.Session().request
def timeout_request(__A ,__A ,__A ,**__A ):
# Change the url to an invalid url so that the connection hangs
__UpperCamelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__UpperCamelCase = timeout
try:
return online_request(__A ,__A ,**__A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCamelCase = url
__UpperCamelCase = e.args[0]
__UpperCamelCase = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"OfflineMock[{url}]" ),)
__UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__A ,__A ,**__A ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=__A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,__A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,__A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,__A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _lowercase ( *__A ,**__A ):
'''simple docstring'''
__UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__A ,**__A ) as tmp_dir:
try:
os.chdir(__A )
yield
finally:
os.chdir(__A )
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ):
'''simple docstring'''
import gc
gc.collect()
__UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( __A ,__A ):
'''simple docstring'''
return deepcopy(__A ).integers(0 ,100 ,10 ).tolist() == deepcopy(__A ).integers(0 ,100 ,10 ).tolist()
def _lowercase ( __A ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__A ,*__A ,**__A ):
try:
return func(*__A ,**__A )
except HTTPError as err:
if str(__A ).startswith("""500""" ) or str(__A ).startswith("""502""" ):
pytest.xfail(str(__A ) )
raise err
return decorator.decorator(_wrapper ,__A )
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> str:
__UpperCamelCase = returncode
__UpperCamelCase = stdout
__UpperCamelCase = stderr
async def _lowercase ( __A ,__A ):
'''simple docstring'''
while True:
__UpperCamelCase = await stream.readline()
if line:
callback(__A )
else:
break
async def _lowercase ( __A ,__A=None ,__A=None ,__A=None ,__A=False ,__A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ ,""" """.join(__A ) )
__UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__A ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__A ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCamelCase = []
__UpperCamelCase = []
def tee(__A ,__A ,__A ,__A="" ):
__UpperCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A ,__A ,file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __A : tee(__A ,__A ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda __A : tee(__A ,__A ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=__A ,)
return _RunOutput(await p.wait() ,__A ,__A )
def _lowercase ( __A ,__A=None ,__A=None ,__A=180 ,__A=False ,__A=True ):
'''simple docstring'''
__UpperCamelCase = asyncio.get_event_loop()
__UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__A ,env=__A ,stdin=__A ,timeout=__A ,quiet=__A ,echo=__A ) )
__UpperCamelCase = """ """.join(__A )
if result.returncode > 0:
__UpperCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
__UpperCamelCase = re.sub(R"""^gw""" ,"""""" ,__A ,0 ,re.M )
return int(__A )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = 29_500
__UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 349 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Any:
lowercase__ : List[Any] = tempfile.mkdtemp()
lowercase__ : Optional[Any] = SamImageProcessor()
lowercase__ : Dict = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Dict , **lowercase_ : List[Any] ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Tuple ) -> Dict:
lowercase__ : str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowercase__ : List[str] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : int ) -> Optional[Any]:
lowercase__ : Union[str, Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : List[str] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowercase__ : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Any = SamProcessor(image_processor=lowercase_ )
lowercase__ : int = self.prepare_image_inputs()
lowercase__ : Tuple = image_processor(lowercase_ , return_tensors="np" )
lowercase__ : List[str] = processor(images=lowercase_ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : Optional[int] = SamProcessor(image_processor=lowercase_ )
lowercase__ : Union[str, Any] = [torch.ones((1, 3, 5, 5) )]
lowercase__ : Optional[int] = [[17_64, 26_46]]
lowercase__ : str = [[6_83, 10_24]]
lowercase__ : List[Any] = processor.post_process_masks(lowercase_ , lowercase_ , lowercase_ )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
lowercase__ : Optional[Any] = processor.post_process_masks(
lowercase_ , torch.tensor(lowercase_ ) , torch.tensor(lowercase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
lowercase__ : Any = [np.ones((1, 3, 5, 5) )]
lowercase__ : Optional[int] = processor.post_process_masks(lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
lowercase__ : Optional[Any] = [[1, 0], [0, 1]]
with self.assertRaises(lowercase_ ):
lowercase__ : Union[str, Any] = processor.post_process_masks(lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) )
@require_vision
@require_tf
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> Dict:
lowercase__ : int = tempfile.mkdtemp()
lowercase__ : Dict = SamImageProcessor()
lowercase__ : Optional[int] = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Optional[Any] ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def __UpperCamelCase ( self : str ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Any:
lowercase__ : int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowercase__ : int = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
lowercase__ : Optional[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Optional[int] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowercase__ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def __UpperCamelCase ( self : Any ) -> List[Any]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = SamProcessor(image_processor=lowercase_ )
lowercase__ : int = self.prepare_image_inputs()
lowercase__ : Optional[int] = image_processor(lowercase_ , return_tensors="np" )
lowercase__ : str = processor(images=lowercase_ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __UpperCamelCase ( self : int ) -> Dict:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : str = SamProcessor(image_processor=lowercase_ )
lowercase__ : Dict = [tf.ones((1, 3, 5, 5) )]
lowercase__ : Optional[Any] = [[17_64, 26_46]]
lowercase__ : Any = [[6_83, 10_24]]
lowercase__ : List[Any] = processor.post_process_masks(lowercase_ , lowercase_ , lowercase_ , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
lowercase__ : Any = processor.post_process_masks(
lowercase_ , tf.convert_to_tensor(lowercase_ ) , tf.convert_to_tensor(lowercase_ ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
lowercase__ : Tuple = [np.ones((1, 3, 5, 5) )]
lowercase__ : Union[str, Any] = processor.post_process_masks(
lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
lowercase__ : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowercase__ : str = processor.post_process_masks(
lowercase_ , np.array(lowercase_ ) , np.array(lowercase_ ) , return_tensors="tf" )
@require_vision
@require_torchvision
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
lowercase__ : str = tempfile.mkdtemp()
lowercase__ : Tuple = SamImageProcessor()
lowercase__ : Dict = SamProcessor(lowercase_ )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def __UpperCamelCase ( self : Any ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
lowercase__ : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowercase__ : Dict = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[int] = SamProcessor(image_processor=lowercase_ )
lowercase__ : int = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowercase__ : List[str] = [tf.convert_to_tensor(lowercase_ )]
lowercase__ : Dict = [torch.tensor(lowercase_ )]
lowercase__ : int = [[17_64, 26_46]]
lowercase__ : Dict = [[6_83, 10_24]]
lowercase__ : str = processor.post_process_masks(
lowercase_ , lowercase_ , lowercase_ , return_tensors="tf" )
lowercase__ : Tuple = processor.post_process_masks(
lowercase_ , lowercase_ , lowercase_ , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Any = SamProcessor(image_processor=lowercase_ )
lowercase__ : Optional[int] = self.prepare_image_inputs()
lowercase__ : Any = image_processor(lowercase_ , return_tensors="pt" )["pixel_values"].numpy()
lowercase__ : str = processor(images=lowercase_ , return_tensors="pt" )["pixel_values"].numpy()
lowercase__ : Optional[int] = image_processor(lowercase_ , return_tensors="tf" )["pixel_values"].numpy()
lowercase__ : Any = processor(images=lowercase_ , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
| 333 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 1 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], UpperCAmelCase__ : str = "", UpperCAmelCase__ : bool = False ):
# Mapping from the first character of the prefix of the node
__lowercase = {}
# A node will be a leaf if the tree contains its word
__lowercase = is_leaf
__lowercase = prefix
def _lowercase ( self : Tuple, UpperCAmelCase__ : str ):
__lowercase = 0
for q, w in zip(self.prefix, UpperCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _lowercase ( self : Any, UpperCAmelCase__ : list[str] ):
for word in words:
self.insert(UpperCAmelCase__ )
def _lowercase ( self : int, UpperCAmelCase__ : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__lowercase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowercase = RadixNode(prefix=UpperCAmelCase__, is_leaf=UpperCAmelCase__ )
else:
__lowercase = self.nodes[word[0]]
__lowercase ,__lowercase ,__lowercase = incoming_node.match(
UpperCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(UpperCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowercase = remaining_prefix
__lowercase = self.nodes[matching_string[0]]
__lowercase = RadixNode(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = aux_node
if remaining_word == "":
__lowercase = True
else:
self.nodes[matching_string[0]].insert(UpperCAmelCase__ )
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
__lowercase = self.nodes.get(word[0], UpperCAmelCase__ )
if not incoming_node:
return False
else:
__lowercase ,__lowercase ,__lowercase = incoming_node.match(
UpperCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : str ):
__lowercase = self.nodes.get(word[0], UpperCAmelCase__ )
if not incoming_node:
return False
else:
__lowercase ,__lowercase ,__lowercase = incoming_node.match(
UpperCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(UpperCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowercase = list(self.nodes.values() )[0]
__lowercase = merging_node.is_leaf
self.prefix += merging_node.prefix
__lowercase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowercase = False
# If there is 1 edge, we merge it with its child
else:
__lowercase = list(incoming_node.nodes.values() )[0]
__lowercase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowercase = merging_node.nodes
return True
def _lowercase ( self : Tuple, UpperCAmelCase__ : int = 0 ):
if self.prefix != "":
print("-" * height, self.prefix, " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _A ( ) -> bool:
'''simple docstring'''
__lowercase = "banana bananas bandana band apple all beast".split()
__lowercase = RadixNode()
root.insert_many(UpperCamelCase_)
assert all(root.find(UpperCamelCase_) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def _A ( ) -> None:
'''simple docstring'''
assert test_trie()
def _A ( ) -> None:
'''simple docstring'''
__lowercase = RadixNode()
__lowercase = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(UpperCamelCase_)
print("Words:", UpperCamelCase_)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 17 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17 | 1 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = '''T5Config'''
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> jnp.ndarray:
'''simple docstring'''
__lowercase= jnp.zeros_like(a__ )
__lowercase= shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__lowercase= shifted_input_ids.at[:, 0].set(a__ )
__lowercase= jnp.where(shifted_input_ids == -1_0_0 , a__ , a__ )
return shifted_input_ids
class A ( _lowercase ):
UpperCamelCase_ : int ='''mt5'''
UpperCamelCase_ : Optional[Any] =MTaConfig
class A ( _lowercase ):
UpperCamelCase_ : List[str] ='''mt5'''
UpperCamelCase_ : Any =MTaConfig
class A ( _lowercase ):
UpperCamelCase_ : str ='''mt5'''
UpperCamelCase_ : Tuple =MTaConfig
| 360 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
UpperCamelCase_ : Dict =1
@register_to_config
def __init__(self , lowerCAmelCase = 2_0_0_0 , lowerCAmelCase = 0.15 , lowerCAmelCase = 0.01 , lowerCAmelCase = 13_48.0 , lowerCAmelCase = 1E-5 , lowerCAmelCase = 1 , ):
# standard deviation of the initial noise distribution
__lowercase= sigma_max
# setable values
__lowercase= None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return sample
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowercase= torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None ):
__lowercase= sigma_min if sigma_min is not None else self.config.sigma_min
__lowercase= sigma_max if sigma_max is not None else self.config.sigma_max
__lowercase= sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
__lowercase= sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowercase= torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
__lowercase= torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__lowercase= timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__lowercase= (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowercase= timesteps.to(self.discrete_sigmas.device )
__lowercase= self.discrete_sigmas[timesteps].to(sample.device )
__lowercase= self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
__lowercase= torch.zeros_like(lowerCAmelCase )
__lowercase= (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowercase= diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__lowercase= diffusion.unsqueeze(-1 )
__lowercase= drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowercase= randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
__lowercase= sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowercase= prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowercase= randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__lowercase= torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__lowercase= (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowercase= step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowercase= step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__lowercase= step_size.unsqueeze(-1 )
__lowercase= sample + step_size * model_output
__lowercase= prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowercase= timesteps.to(original_samples.device )
__lowercase= self.discrete_sigmas.to(original_samples.device )[timesteps]
__lowercase= (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
__lowercase= noise + original_samples
return noisy_samples
def __len__(self ):
return self.config.num_train_timesteps
| 304 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_snake_case = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_snake_case = model(lowerCAmelCase_ )['last_hidden_state']
_snake_case = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
_snake_case = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 42 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase : Any =logging.get_logger(__name__)
class __a ( A__ ):
_lowerCAmelCase : List[str] = ['''pixel_values''']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
UpperCamelCase__ : int = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase__ : Dict = do_resize
UpperCamelCase__ : List[str] = size
UpperCamelCase__ : int = resample
UpperCamelCase__ : Optional[int] = do_rescale
UpperCamelCase__ : List[Any] = rescale_factor
UpperCamelCase__ : Union[str, Any] = do_center_crop
UpperCamelCase__ : int = crop_size
UpperCamelCase__ : Optional[int] = do_flip_channel_order
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase__ : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, float] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[str] = resample if resample is not None else self.resample
UpperCamelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : Any = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ : Optional[int] = size if size is not None else self.size
UpperCamelCase__ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase__ : int = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCamelCase__ : Any = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ : Any = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Tuple] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = target_sizes.numpy()
UpperCamelCase__ : Any = []
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : List[str] = logits.argmax(dim=1 )
UpperCamelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 189 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCamelCase : Union[str, Any] = get_logger(__name__)
class A__ :
def __init__( self : Any , _a : Optional[str] = None ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(
os.path.join(_a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_SCREAMING_SNAKE_CASE =Extractor
def A ( self : List[Any] , _a : str ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_SCREAMING_SNAKE_CASE =os.path.abspath(_a )
return os.path.join(self.extract_dir , hash_url_to_filename(_a ) )
def A ( self : str , _a : str , _a : bool ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(_a ) and not (os.path.isdir(_a ) and os.listdir(_a ))
)
def A ( self : List[str] , _a : str , _a : bool = False ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.extractor.infer_extractor_format(_a )
if not extractor_format:
return input_path
_SCREAMING_SNAKE_CASE =self._get_output_path(_a )
if self._do_extract(_a , _a ):
self.extractor.extract(_a , _a , _a )
return output_path
class A__ ( A__ ):
@classmethod
@abstractmethod
def A ( cls : Any , _a : Union[Path, str] , **_a : Tuple ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
...
class A__ ( A__ , A__ ):
A__ = []
@staticmethod
def A ( _a : Union[Path, str] , _a : int ) -> Dict:
'''simple docstring'''
with open(_a , 'rb' ) as f:
return f.read(_a )
@classmethod
def A ( cls : Any , _a : Union[Path, str] , _a : bytes = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
_SCREAMING_SNAKE_CASE =max(len(_a ) for cls_magic_number in cls.magic_numbers )
try:
_SCREAMING_SNAKE_CASE =cls.read_magic_number(_a , _a )
except OSError:
return False
return any(magic_number.startswith(_a ) for cls_magic_number in cls.magic_numbers )
class A__ ( A__ ):
@classmethod
def A ( cls : Optional[Any] , _a : Union[Path, str] , **_a : int ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(_a )
@staticmethod
def A ( _a : str , _a : int ) -> Any:
'''simple docstring'''
def resolved(_a : str ) -> str:
return os.path.realpath(os.path.abspath(_a ) )
def badpath(_a : str , _a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_a , _a ) ).startswith(_a )
def badlink(_a : Tuple , _a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_SCREAMING_SNAKE_CASE =resolved(os.path.join(_a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_a )
_SCREAMING_SNAKE_CASE =resolved(_a )
for finfo in members:
if badpath(finfo.name , _a ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(_a , _a ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(_a , _a ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =tarfile.open(_a )
tar_file.extractall(_a , members=TarExtractor.safemembers(_a , _a ) )
tar_file.close()
class A__ ( A__ ):
A__ = [b'\x1F\x8B']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
with gzip.open(_a , 'rb' ) as gzip_file:
with open(_a , 'wb' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class A__ ( A__ ):
A__ = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def A ( cls : str , _a : Union[Path, str] , _a : bytes = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(_a , magic_number=_a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_a , 'rb' ) as fp:
_SCREAMING_SNAKE_CASE =_EndRecData(_a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_SCREAMING_SNAKE_CASE =fp.read(_a ) # CD is where we expect it to be
if len(_a ) == sizeCentralDir:
_SCREAMING_SNAKE_CASE =struct.unpack(_a , _a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_a , exist_ok=_a )
with zipfile.ZipFile(_a , 'r' ) as zip_file:
zip_file.extractall(_a )
zip_file.close()
class A__ ( A__ ):
A__ = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
with lzma.open(_a ) as compressed_file:
with open(_a , 'wb' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class A__ ( A__ ):
A__ = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =rarfile.RarFile(_a )
rf.extractall(_a )
rf.close()
class A__ ( A__ ):
A__ = [b'\x28\xb5\x2F\xFD']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
_SCREAMING_SNAKE_CASE =zstd.ZstdDecompressor()
with open(_a , 'rb' ) as ifh, open(_a , 'wb' ) as ofh:
dctx.copy_stream(_a , _a )
class A__ ( A__ ):
A__ = [b'\x42\x5A\x68']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
with bza.open(_a , 'rb' ) as compressed_file:
with open(_a , 'wb' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class A__ ( A__ ):
A__ = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(_a , exist_ok=_a )
with pyazr.SevenZipFile(_a , 'r' ) as archive:
archive.extractall(_a )
class A__ ( A__ ):
A__ = [b'\x04\x22\x4D\x18']
@staticmethod
def A ( _a : Union[Path, str] , _a : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(_a , 'rb' ) as compressed_file:
with open(_a , 'wb' ) as extracted_file:
shutil.copyfileobj(_a , _a )
class A__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
A__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A ( cls : List[Any] ) -> Dict:
'''simple docstring'''
return max(
len(_a )
for extractor in cls.extractors.values()
if issubclass(_a , _a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A ( _a : Union[Path, str] , _a : int ) -> Any:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(_a , magic_number_length=_a )
except OSError:
return b""
@classmethod
def A ( cls : List[Any] , _a : Union[Path, str] , _a : bool = False ) -> bool:
'''simple docstring'''
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=_a , )
_SCREAMING_SNAKE_CASE =cls.infer_extractor_format(_a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A ( cls : Tuple , _a : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_SCREAMING_SNAKE_CASE =cls._get_magic_number_max_length()
_SCREAMING_SNAKE_CASE =cls._read_magic_number(_a , _a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_a , magic_number=_a ):
return extractor_format
@classmethod
def A ( cls : int , _a : Union[Path, str] , _a : Union[Path, str] , _a : Optional[str] = None , _a : Optional[BaseExtractor] = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(_a ) , exist_ok=_a )
# Prevent parallel extractions
_SCREAMING_SNAKE_CASE =str(Path(_a ).with_suffix('.lock' ) )
with FileLock(_a ):
shutil.rmtree(_a , ignore_errors=_a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_a , _a ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=_a , )
_SCREAMING_SNAKE_CASE =extractor if extractor != 'deprecated' else extractor_format
else:
_SCREAMING_SNAKE_CASE =cls.extractors[extractor_format]
return extractor.extract(_a , _a )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=_a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_a ):
return extractor.extract(_a , _a )
| 114 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =int(number**0.5 )
return number == sq * sq
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[int, int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_SCREAMING_SNAKE_CASE =x_den * y_den * z_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def _lowerCAmelCase ( _UpperCamelCase : int = 35 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =set()
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =Fraction(0 )
_SCREAMING_SNAKE_CASE =42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_SCREAMING_SNAKE_CASE =x_num * y_den + x_den * y_num
_SCREAMING_SNAKE_CASE =x_den * y_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
_SCREAMING_SNAKE_CASE =(
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_SCREAMING_SNAKE_CASE =x_den * x_den * y_den * y_den
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=-1
_SCREAMING_SNAKE_CASE =x_num * y_num
_SCREAMING_SNAKE_CASE =x_den * y_num + x_num * y_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
_SCREAMING_SNAKE_CASE =x_num * x_num * y_num * y_num
_SCREAMING_SNAKE_CASE =(
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
for num, den in unique_s:
total += Fraction(_UpperCamelCase , _UpperCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 114 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = '''trajectory_transformer'''
__UpperCamelCase : str = ['''past_key_values''']
__UpperCamelCase : Optional[Any] = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2_49 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=17 , SCREAMING_SNAKE_CASE__=25 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0006 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=5_02_56 , **SCREAMING_SNAKE_CASE__ , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = action_weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reward_weight
SCREAMING_SNAKE_CASE__ : List[Any] = value_weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = block_size
SCREAMING_SNAKE_CASE__ : Tuple = action_dim
SCREAMING_SNAKE_CASE__ : List[str] = observation_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = transition_dim
SCREAMING_SNAKE_CASE__ : List[str] = learning_rate
SCREAMING_SNAKE_CASE__ : Dict = n_layer
SCREAMING_SNAKE_CASE__ : Dict = n_head
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_embd
SCREAMING_SNAKE_CASE__ : str = embd_pdrop
SCREAMING_SNAKE_CASE__ : int = attn_pdrop
SCREAMING_SNAKE_CASE__ : Any = resid_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ : int = use_cache
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 25 |
import random
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : List[Any] )->tuple:
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase__ )
elif element > pivot:
greater.append(UpperCamelCase__ )
else:
equal.append(UpperCamelCase__ )
return less, equal, greater
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : int )->Optional[int]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(UpperCamelCase__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(UpperCamelCase__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(UpperCamelCase__ , UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase__ , UpperCamelCase__ )
# must be in larger
else:
return quick_select(UpperCamelCase__ , index - (m + count) )
| 193 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( _lowerCamelCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (DPMSolverSinglestepScheduler,)
_SCREAMING_SNAKE_CASE = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE__ ( self : Dict , **SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : Dict = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : str = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = self.dummy_sample
lowerCAmelCase_ : Optional[Any] = 0.1 * sample
lowerCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Tuple = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
lowerCAmelCase_ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
lowerCAmelCase_ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = sample, sample
for t in range(SCREAMING_SNAKE_CASE_ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase_ : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
lowerCAmelCase_ : Tuple = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : str ):
pass
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int]=0 , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Dict = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = self.dummy_sample
lowerCAmelCase_ : List[str] = 0.1 * sample
lowerCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : List[Any] = self.get_scheduler_config()
lowerCAmelCase_ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ : Dict = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
lowerCAmelCase_ : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : int ):
if scheduler is None:
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = self.scheduler_classes[0]
lowerCAmelCase_ : Tuple = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = 1_0
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Tuple = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase_ : Any = 5_0
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCAmelCase_ : Tuple = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
lowerCAmelCase_ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_mean.item() - 0.25_74 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase_ : int = self.full_loop(scheduler=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
lowerCAmelCase_ : Dict = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Any = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : List[str] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , algorithm_type='dpmsolver++' , solver_order=SCREAMING_SNAKE_CASE_ , solver_type=SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE_ , solver_type=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , algorithm_type=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : List[str] = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE_ , solver_type=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , algorithm_type=SCREAMING_SNAKE_CASE_ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE_ ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE_ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_ )
self.check_over_configs(variance_type='learned_range' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE_ , time_step=0 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Any = self.full_loop()
lowerCAmelCase_ : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = self.full_loop(use_karras_sigmas=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_mean.item() - 0.22_48 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = self.full_loop(prediction_type='v_prediction' )
lowerCAmelCase_ : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_mean.item() - 0.14_53 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_mean.item() - 0.06_49 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : Any = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE_ , dynamic_thresholding_ratio=0 )
lowerCAmelCase_ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = 1_0
lowerCAmelCase_ : List[str] = self.dummy_model()
lowerCAmelCase_ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
assert sample.dtype == torch.floataa
| 353 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """open-llama"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=1_0_0_0_0_0 , SCREAMING_SNAKE_CASE_ : Optional[int]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : List[Any]=1_1_0_0_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : str="silu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-6 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : List[str] = rms_norm_eps
lowerCAmelCase_ : List[Any] = use_cache
lowerCAmelCase_ : Optional[int] = kwargs.pop(
'use_memorry_efficient_attention' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_dropout_prob
lowerCAmelCase_ : Tuple = use_stable_embedding
lowerCAmelCase_ : Optional[Any] = shared_input_output_embedding
lowerCAmelCase_ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
lowerCAmelCase_ : int = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 289 | 0 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_UpperCamelCase: Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : List[str], lowerCAmelCase : AutoencoderKL, lowerCAmelCase : CLIPTextModel, lowerCAmelCase : CLIPTokenizer, lowerCAmelCase : UNetaDConditionModel, lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCAmelCase : StableDiffusionSafetyChecker, lowerCAmelCase : CLIPImageProcessor, ) -> List[Any]:
super().__init__()
self.register_modules(
vae=lowerCAmelCase, text_encoder=lowerCAmelCase, tokenizer=lowerCAmelCase, unet=lowerCAmelCase, scheduler=lowerCAmelCase, safety_checker=lowerCAmelCase, feature_extractor=lowerCAmelCase, )
def lowercase ( self : Any, lowerCAmelCase : Optional[Union[str, int]] = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase )
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
self.enable_attention_slicing(lowerCAmelCase )
@torch.no_grad()
def __call__( self : str, lowerCAmelCase : Union[str, List[str]], lowerCAmelCase : int = 512, lowerCAmelCase : int = 512, lowerCAmelCase : int = 50, lowerCAmelCase : float = 7.5, lowerCAmelCase : Optional[Union[str, List[str]]] = None, lowerCAmelCase : Optional[int] = 1, lowerCAmelCase : float = 0.0, lowerCAmelCase : Optional[torch.Generator] = None, lowerCAmelCase : Optional[torch.FloatTensor] = None, lowerCAmelCase : Optional[str] = "pil", lowerCAmelCase : bool = True, lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCAmelCase : int = 1, lowerCAmelCase : Optional[torch.FloatTensor] = None, **lowerCAmelCase : str, ) -> Any:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowercase : List[str] = 1
elif isinstance(lowerCAmelCase, lowerCAmelCase ):
lowercase : Union[str, Any] = len(lowerCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase, lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(lowerCAmelCase )}.''' )
# get prompt text embeddings
lowercase : List[str] = self.tokenizer(
lowerCAmelCase, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt', )
lowercase : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase : List[str] = text_embeddings.shape
lowercase : Any = text_embeddings.repeat(1, lowerCAmelCase, 1 )
lowercase : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCAmelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase : List[str]
if negative_prompt is None:
lowercase : Optional[int] = ['']
elif type(lowerCAmelCase ) is not type(lowerCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase )} !='''
f''' {type(lowerCAmelCase )}.''' )
elif isinstance(lowerCAmelCase, lowerCAmelCase ):
lowercase : str = [negative_prompt]
elif batch_size != len(lowerCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowercase : int = negative_prompt
lowercase : Union[str, Any] = text_input_ids.shape[-1]
lowercase : List[str] = self.tokenizer(
lowerCAmelCase, padding='max_length', max_length=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='pt', )
lowercase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase : str = uncond_embeddings.shape[1]
lowercase : List[str] = uncond_embeddings.repeat(lowerCAmelCase, lowerCAmelCase, 1 )
lowercase : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCAmelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase : Dict = torch.randn(
lowerCAmelCase, generator=lowerCAmelCase, device='cpu', dtype=lowerCAmelCase ).to(self.device )
lowercase : Tuple = torch.randn(lowerCAmelCase, generator=lowerCAmelCase, device='cpu', dtype=lowerCAmelCase ).to(
self.device )
else:
lowercase : Tuple = torch.randn(
lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=lowerCAmelCase )
lowercase : int = torch.randn(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=lowerCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase : Optional[Any] = latents_reference.to(self.device )
lowercase : List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase : int = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase : Tuple = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase : int = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase : str = 0 if dx < 0 else dx
lowercase : List[str] = 0 if dy < 0 else dy
lowercase : str = max(-dx, 0 )
lowercase : Optional[int] = max(-dy, 0 )
# import pdb
# pdb.set_trace()
lowercase : Optional[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase : str = {}
if accepts_eta:
lowercase : List[str] = eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : str = self.scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase )
# predict the noise residual
lowercase : Dict = self.unet(lowerCAmelCase, lowerCAmelCase, encoder_hidden_states=lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase : Tuple = noise_pred.chunk(2 )
lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase : Optional[Any] = self.scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowercase : Any = 1 / 0.1_8215 * latents
lowercase : Tuple = self.vae.decode(lowerCAmelCase ).sample
lowercase : List[Any] = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase : Tuple = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if self.safety_checker is not None:
lowercase : str = self.feature_extractor(self.numpy_to_pil(lowerCAmelCase ), return_tensors='pt' ).to(
self.device )
lowercase , lowercase : List[Any] = self.safety_checker(
images=lowerCAmelCase, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase : Any = None
if output_type == "pil":
lowercase : Optional[Any] = self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCAmelCase, nsfw_content_detected=lowerCAmelCase )
| 255 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 42
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
@register_to_config
def __init__( self : Optional[int], lowerCAmelCase : int = 32, lowerCAmelCase : int = 64, lowerCAmelCase : int = 20, lowerCAmelCase : int = 768, lowerCAmelCase : Optional[Any]=77, lowerCAmelCase : Tuple=4, lowerCAmelCase : float = 0.0, lowerCAmelCase : str = "silu", lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[str] = "linear", lowerCAmelCase : Optional[str] = "prd", lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[int] = None, ) -> List[Any]:
super().__init__()
lowercase : List[Any] = num_attention_heads
lowercase : int = attention_head_dim
lowercase : List[Any] = num_attention_heads * attention_head_dim
lowercase : Tuple = additional_embeddings
lowercase : Dict = time_embed_dim or inner_dim
lowercase : Optional[Any] = embedding_proj_dim or embedding_dim
lowercase : int = clip_embed_dim or embedding_dim
lowercase : List[str] = Timesteps(lowerCAmelCase, lowerCAmelCase, 0 )
lowercase : List[str] = TimestepEmbedding(lowerCAmelCase, lowerCAmelCase, out_dim=lowerCAmelCase, act_fn=lowerCAmelCase )
lowercase : List[str] = nn.Linear(lowerCAmelCase, lowerCAmelCase )
if embedding_proj_norm_type is None:
lowercase : str = None
elif embedding_proj_norm_type == "layer":
lowercase : Tuple = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowercase : List[str] = nn.Linear(lowerCAmelCase, lowerCAmelCase )
if encoder_hid_proj_type is None:
lowercase : Optional[int] = None
elif encoder_hid_proj_type == "linear":
lowercase : Dict = nn.Linear(lowerCAmelCase, lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowercase : Dict = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, lowerCAmelCase ) )
if added_emb_type == "prd":
lowercase : Union[str, Any] = nn.Parameter(torch.zeros(1, 1, lowerCAmelCase ) )
elif added_emb_type is None:
lowercase : str = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowercase : Dict = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, dropout=lowerCAmelCase, activation_fn='gelu', attention_bias=lowerCAmelCase, )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
lowercase : str = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
lowercase : Optional[int] = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
lowercase : int = nn.LayerNorm(lowerCAmelCase )
lowercase : str = nn.Linear(lowerCAmelCase, lowerCAmelCase )
lowercase : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -1_0000.0 )
causal_attention_mask.triu_(1 )
lowercase : List[str] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask', lowerCAmelCase, persistent=lowerCAmelCase )
lowercase : Any = nn.Parameter(torch.zeros(1, lowerCAmelCase ) )
lowercase : Any = nn.Parameter(torch.zeros(1, lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase ( self : Tuple ) -> Dict[str, AttentionProcessor]:
lowercase : Any = {}
def fn_recursive_add_processors(lowerCAmelCase : str, lowerCAmelCase : torch.nn.Module, lowerCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(lowerCAmelCase, 'set_processor' ):
lowercase : List[str] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''', lowerCAmelCase, lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
return processors
def lowercase ( self : Union[str, Any], lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Tuple:
lowercase : str = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase : str, lowerCAmelCase : torch.nn.Module, lowerCAmelCase : Union[str, Any] ):
if hasattr(lowerCAmelCase, 'set_processor' ):
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''', lowerCAmelCase, lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
self.set_attn_processor(AttnProcessor() )
def lowercase ( self : Any, lowerCAmelCase : int, lowerCAmelCase : Union[torch.Tensor, float, int], lowerCAmelCase : torch.FloatTensor, lowerCAmelCase : Optional[torch.FloatTensor] = None, lowerCAmelCase : Optional[torch.BoolTensor] = None, lowerCAmelCase : bool = True, ) -> List[Any]:
lowercase : Optional[Any] = hidden_states.shape[0]
lowercase : Union[str, Any] = timestep
if not torch.is_tensor(lowerCAmelCase ):
lowercase : List[str] = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
lowercase : List[str] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase : Optional[int] = timesteps * torch.ones(lowerCAmelCase, dtype=timesteps.dtype, device=timesteps.device )
lowercase : Dict = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowercase : Optional[int] = timesteps_projected.to(dtype=self.dtype )
lowercase : Any = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
lowercase : Any = self.embedding_proj_norm(lowerCAmelCase )
lowercase : List[str] = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowercase : str = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowercase : Optional[Any] = self.proj_in(lowerCAmelCase )
lowercase : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
lowercase : Dict = []
lowercase : Optional[int] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowercase : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowercase : Union[str, Any] = hidden_states[:, None, :]
lowercase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowercase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase, -1, -1 )
additional_embeds.append(lowerCAmelCase )
lowercase : Union[str, Any] = torch.cat(
lowerCAmelCase, dim=1, )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowercase : Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowercase : List[Any] = F.pad(
lowerCAmelCase, (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
), value=0.0, )
lowercase : str = hidden_states + positional_embeddings
if attention_mask is not None:
lowercase : Tuple = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
lowercase : List[Any] = F.pad(lowerCAmelCase, (0, self.additional_embeddings), value=0.0 )
lowercase : int = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowercase : Union[str, Any] = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 )
if self.norm_in is not None:
lowercase : List[Any] = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
lowercase : Tuple = block(lowerCAmelCase, attention_mask=lowerCAmelCase )
lowercase : Optional[Any] = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
lowercase : Optional[Any] = hidden_states[:, -1]
else:
lowercase : Any = hidden_states[:, additional_embeddings_len:]
lowercase : Optional[int] = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : Dict ) -> Dict:
lowercase : int = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 255 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
a ={
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
a ={
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
__lowerCamelCase : List[str] = set()
__lowerCamelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : Tuple = char
__lowerCamelCase : Optional[Any] = set(_lowerCamelCase )
return pairs
class A_ ( __lowercase ):
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Dict="</s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : Dict="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : List[str] = vocab_file
__lowerCamelCase : Any = merges_file
__lowerCamelCase : List[Any] = {}
__lowerCamelCase : int = 0
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : int = 2
__lowerCamelCase : Any = 3
self.add_from_file(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE__ ,encoding='utf-8') as merges_handle:
__lowerCamelCase : Any = merges_handle.read().split('\n')[:-1]
__lowerCamelCase : Tuple = [tuple(merge.split()[:-1]) for merge in merges]
__lowerCamelCase : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__))))
__lowerCamelCase : str = {}
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Any = [self.cls_token_id]
__lowerCamelCase : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : List[Any] = [self.sep_token_id]
__lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def lowerCAmelCase ( self : List[Any]):
return len(self.encoder)
def lowerCAmelCase ( self : Tuple):
return dict(self.encoder ,**self.added_tokens_encoder)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]):
if token in self.cache:
return self.cache[token]
__lowerCamelCase : Optional[Any] = tuple(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tuple(list(word[:-1]) + [word[-1] + '</w>'])
__lowerCamelCase : str = get_pairs(SCREAMING_SNAKE_CASE__)
if not pairs:
return token
while True:
__lowerCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ ,float('inf')))
if bigram not in self.bpe_ranks:
break
__lowerCamelCase : Tuple = bigram
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Dict = 0
while i < len(SCREAMING_SNAKE_CASE__):
try:
__lowerCamelCase : int = word.index(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__lowerCamelCase : Union[str, Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__lowerCamelCase : Union[str, Any] = tuple(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = new_word
if len(SCREAMING_SNAKE_CASE__) == 1:
break
else:
__lowerCamelCase : Tuple = get_pairs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = """@@ """.join(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = word[:-4]
__lowerCamelCase : int = word
return word
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Dict = []
__lowerCamelCase : int = re.findall(R'\S+\n?' ,SCREAMING_SNAKE_CASE__)
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__).split(' ')))
return split_tokens
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int]):
return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token))
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : str = """ """.join(SCREAMING_SNAKE_CASE__).replace('@@ ' ,'').strip()
return out_string
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : str = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__lowerCamelCase : List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
if os.path.abspath(self.merges_file) != os.path.abspath(SCREAMING_SNAKE_CASE__):
copyfile(self.merges_file ,SCREAMING_SNAKE_CASE__)
return out_vocab_file, out_merge_file
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int]):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
try:
with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset")
return
__lowerCamelCase : int = f.readlines()
for lineTmp in lines:
__lowerCamelCase : Any = lineTmp.strip()
__lowerCamelCase : int = line.rfind(' ')
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'')
__lowerCamelCase : Tuple = line[:idx]
__lowerCamelCase : Dict = len(self.encoder)
| 363 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
@property
def lowerCAmelCase ( self : Union[str, Any]):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = ort.SessionOptions()
__lowerCamelCase : Tuple = False
return options
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
__lowerCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy')
# using the PNDM scheduler by default
__lowerCamelCase : Dict = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = 'A red cat sitting on a park bench'
__lowerCamelCase : Any = np.random.RandomState(0)
__lowerCamelCase : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=1_5 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np' ,)
__lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 113 | 0 |
'''simple docstring'''
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = arr.split(""",""" )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE : Optional[int] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE : Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__UpperCAmelCase = input("""please input some numbers:""")
__UpperCAmelCase = SubArray(whole_array)
__UpperCAmelCase = array.solve_sub_array()
print(("""the results is:""", re))
| 323 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 178 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __A( unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ):
UpperCamelCase__ = size if size is not None else {"""height""": 20, """width""": 20}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = do_convert_rgb
UpperCamelCase__ = [5_12, 10_24, 20_48, 40_96]
UpperCamelCase__ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def UpperCAmelCase_ (self ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
UpperCamelCase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase_ (self ):
UpperCamelCase__ = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase_ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_convert_rgb""" ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase__ = 20_48
UpperCamelCase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def UpperCAmelCase_ (self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase_ (self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
UpperCamelCase__ = """Hello"""
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ , header_text=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ , header_text=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase_ (self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase_ (self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase_ (self ):
UpperCamelCase__ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase__ = 3
@property
def UpperCAmelCase_ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_convert_rgb""" ) )
def UpperCAmelCase_ (self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , max_patches=SCREAMING_SNAKE_CASE_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 178 | 1 |
'''simple docstring'''
import os
from distutils.util import strtobool
def lowerCamelCase__ ( _A , _A ):
for e in env_keys:
a : Optional[int] = int(os.environ.get(_A , -1 ) )
if val >= 0:
return val
return default
def lowerCamelCase__ ( _A , _A=False ):
a : int = os.environ.get(_A , str(_A ) )
return strtobool(_A ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCamelCase__ ( _A , _A="no" ):
a : List[str] = os.environ.get(_A , str(_A ) )
return value | 297 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( _A , _A ):
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : str = tmp_path / 'cache'
a : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Dict = features.copy() if features else default_expected_features
a : Union[str, Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Tuple = tmp_path / 'cache'
a : Optional[Any] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
a : Optional[int] = features.copy() if features else default_expected_features
a : Dict = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Optional[int] = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase__ ( _A , _A ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
a : Dict = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
a : int = features.copy()
a : List[Any] = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : Dict = tmp_path / 'cache'
a : Any = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase__ ( _A , _A , _A ):
if issubclass(_A , _A ):
a : Optional[int] = jsonl_path
elif issubclass(_A , _A ):
a : Optional[int] = [jsonl_path]
a : List[str] = tmp_path / 'cache'
a : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def lowerCamelCase__ ( _A , _A , _A=("train",) ):
assert isinstance(_A , _A )
for split in splits:
a : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase__ ( _A , _A , _A ):
a : Dict = tmp_path / 'cache'
a : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : List[Any] = features.copy() if features else default_expected_features
a : Any = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
a : List[str] = JsonDatasetReader({'train': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase__ ( _A , _A , _A ):
if split:
a : Any = {split: jsonl_path}
else:
a : List[Any] = 'train'
a : List[str] = {'train': jsonl_path, 'test': jsonl_path}
a : List[Any] = tmp_path / 'cache'
a : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a : Tuple = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( _A ):
return json.load(_A )
def lowerCamelCase__ ( _A ):
return [json.loads(_A ) for line in buffer]
class a__:
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : Tuple , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write()
buffer.seek(0 )
a : List[str] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : List[Any] = load_json_function(__snake_case )
assert isinstance(__snake_case , __snake_case )
assert isinstance(exported_content[0] , __snake_case )
assert len(__snake_case ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowercase_ ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write()
buffer.seek(0 )
a : int = load_json(__snake_case )
assert isinstance(__snake_case , __snake_case )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__snake_case , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__snake_case ) == 10
def lowercase_ ( self : List[str] , __snake_case : str ):
with pytest.raises(__snake_case ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowercase_ ( self : Tuple , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[int] ):
a : Tuple = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
a : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
with fsspec.open(__snake_case , 'rb' , compression='infer' ) as f:
a : Union[str, Any] = f.read()
assert exported_content == original_content | 297 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase ( A_ )-> Dict:
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
for char in word:
a : List[Any] = ord(A_ )
if not _is_chinese_char(A_ ):
return 0
return 1
def lowercase ( A_ )-> Union[str, Any]:
'''simple docstring'''
a : List[str] = set()
for token in tokens:
a : Union[str, Any] = len(A_ ) > 1 and is_chinese(A_ )
if chinese_word:
word_set.add(A_ )
a : List[Any] = list(A_ )
return word_list
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
a : Any = max([len(A_ ) for w in chinese_word_set] )
a : Dict = bert_tokens
a , a : int = 0, len(A_ )
while start < end:
a : Optional[Any] = True
if is_chinese(bert_word[start] ):
a : List[Any] = min(end - start , A_ )
for i in range(A_ , 1 , -1 ):
a : Any = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a : Dict = "##" + bert_word[j]
a : Any = start + i
a : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : Any = []
for i in range(0 , len(A_ ) , 100 ):
a : List[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
a : Union[str, Any] = [get_chinese_word(A_ ) for r in res]
ltp_res.extend(A_ )
assert len(A_ ) == len(A_ )
a : int = []
for i in range(0 , len(A_ ) , 100 ):
a : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=A_ , truncation=A_ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(A_ ) == len(A_ )
a : Optional[Any] = []
for input_ids, chinese_word in zip(A_ , A_ ):
a : Any = []
for id in input_ids:
a : List[str] = bert_tokenizer._convert_id_to_token(A_ )
input_tokens.append(A_ )
a : int = add_sub_symbol(A_ , A_ )
a : List[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A_ ):
if token[:2] == "##":
a : List[Any] = token[2:]
# save chinese tokens' pos
if len(A_ ) == 1 and _is_chinese_char(ord(A_ ) ):
ref_id.append(A_ )
ref_ids.append(A_ )
assert len(A_ ) == len(A_ )
return ref_ids
def lowercase ( A_ )-> str:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
a : Optional[int] = f.readlines()
a : Optional[Any] = [line.strip() for line in data if len(A_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a : Tuple = LTP(args.ltp ) # faster in GPU device
a : int = BertTokenizer.from_pretrained(args.bert )
a : Optional[Any] = prepare_ref(A_ , A_ , A_ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
a : List[str] = [json.dumps(A_ ) + "\n" for ref in ref_ids]
f.writelines(A_ )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
__lowercase = parser.parse_args()
main(args)
| 226 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 226 | 1 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__snake_case = 299792458
# Symbols
__snake_case , __snake_case , __snake_case , __snake_case = symbols('''ct x y z''')
def a ( __a ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def a ( __a ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(__a ) ** 2 )
def a ( __a ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(__a ), -gamma(__a ) * beta(__a ), 0, 0],
[-gamma(__a ) * beta(__a ), gamma(__a ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def a ( __a , __a = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
UpperCamelCase__ :Any = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__a ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__snake_case = transform(29979245)
print('''Example of four vector: ''')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__snake_case = {ct: c, x: 1, y: 1, z: 1}
__snake_case = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""") | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> int:
"""simple docstring"""
for attribute in key.split('''.''' ):
a = getattr(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if weight_type is not None:
a = getattr(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ).shape
else:
a = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
else:
a = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Optional[Any]:
"""simple docstring"""
a = []
a = fairseq_model.state_dict()
a = hf_model.feature_extractor
a = hf_model.adapter
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, hf_model.config.feat_extract_norm == '''group''', )
a = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a = True
if "*" in mapped_key:
a = name.split(__SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
a = mapped_key.replace('''*''', __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a = """weight_g"""
elif "weight_v" in name:
a = """weight_v"""
elif "bias" in name:
a = """bias"""
elif "weight" in name:
a = """weight"""
else:
a = None
set_recursively(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> int:
"""simple docstring"""
a = full_name.split('''conv_layers.''' )[-1]
a = name.split('''.''' )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
a = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
a = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
a = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
a = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Any:
"""simple docstring"""
a = full_name.split('''adaptor.''' )[-1]
a = name.split('''.''' )
if items[1].isdigit():
a = int(items[1] )
else:
a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
a = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
a = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
a = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
a = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
a = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = emb.weight.shape
a = nn.Linear(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, bias=__SCREAMING_SNAKE_CASE )
a = emb.weight.data
return lin_layer
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, ) -> Union[str, Any]:
"""simple docstring"""
a = WavaVecaConfig.from_pretrained(
__SCREAMING_SNAKE_CASE, add_adapter=__SCREAMING_SNAKE_CASE, adapter_stride=__SCREAMING_SNAKE_CASE, adapter_kernel_size=__SCREAMING_SNAKE_CASE, use_auth_token=__SCREAMING_SNAKE_CASE, output_hidden_size=__SCREAMING_SNAKE_CASE, )
a = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
# load model
a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
}, )
a = model[0].eval()
# load feature extractor
a = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE, use_auth_token=__SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
a = WavaVecaModel(__SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder, __SCREAMING_SNAKE_CASE )
# load decoder weights
a = MBartForCausalLM(__SCREAMING_SNAKE_CASE )
a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=__SCREAMING_SNAKE_CASE )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
a = SpeechEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE, decoder=__SCREAMING_SNAKE_CASE )
a = False
a = MBartaaTokenizer(__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
a = hf_wavavec.config.to_dict()
a = tokenizer.pad_token_id
a = tokenizer.bos_token_id
a = tokenizer.eos_token_id
a = """mbart50"""
a = """wav2vec2"""
a = tokenizer.eos_token_id
a = 2_5_0_0_0_4
a = tokenizer.eos_token_id
a = SpeechEncoderDecoderConfig.from_dict(__SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1_024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250_004, type=int, help="""`decoder_start_token_id` of model config""")
UpperCamelCase__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 370 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} )
SCREAMING_SNAKE_CASE_ = Features({} )
SCREAMING_SNAKE_CASE_ = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
return {self.text_column: "text"}
| 330 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Dict = logging.get_logger(__name__)
_A : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A : str = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_A : Optional[int] = {'facebook/blenderbot-3B': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Tuple = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCamelCase__ : str = bs[:]
lowerCamelCase__ : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase__ : Tuple = [chr(UpperCAmelCase ) for n in cs]
return dict(zip(UpperCAmelCase , UpperCAmelCase ) )
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Tuple = set()
lowerCamelCase__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : int = char
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Dict = VOCAB_FILES_NAMES
_UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int , A : List[str] , A : int , A : Optional[int]="replace" , A : Tuple="<s>" , A : List[Any]="</s>" , A : str="</s>" , A : str="<s>" , A : int="<unk>" , A : Optional[int]="<pad>" , A : Union[str, Any]="<mask>" , A : List[str]=False , **A : int , ) ->Union[str, Any]:
lowerCamelCase__ : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
lowerCamelCase__ : Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
lowerCamelCase__ : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
lowerCamelCase__ : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
lowerCamelCase__ : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
lowerCamelCase__ : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase__ : Tuple = json.load(A )
lowerCamelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ : Tuple = errors # how to handle errors in decoding
lowerCamelCase__ : List[str] = bytes_to_unicode()
lowerCamelCase__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding='''utf-8''' ) as merges_handle:
lowerCamelCase__ : Tuple = merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase__ : List[str] = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase__ : Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self : str ) ->Dict:
return len(self.encoder )
def __lowerCamelCase ( self : Optional[int] ) ->List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self : str , A : Union[str, Any] ) ->str:
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : List[Any] = tuple(A )
lowerCamelCase__ : Dict = get_pairs(A )
if not pairs:
return token
while True:
lowerCamelCase__ : List[Any] = min(A , key=lambda A : self.bpe_ranks.get(A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : int = bigram
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Tuple = 0
while i < len(A ):
try:
lowerCamelCase__ : Union[str, Any] = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : Union[str, Any] = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : List[str] = tuple(A )
lowerCamelCase__ : Tuple = new_word
if len(A ) == 1:
break
else:
lowerCamelCase__ : int = get_pairs(A )
lowerCamelCase__ : Optional[Any] = ''' '''.join(A )
lowerCamelCase__ : Any = word
return word
def __lowerCamelCase ( self : Any , A : Union[str, Any] ) ->List[Any]:
lowerCamelCase__ : List[Any] = []
for token in re.findall(self.pat , A ):
lowerCamelCase__ : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(''' ''' ) )
return bpe_tokens
def __lowerCamelCase ( self : Tuple , A : List[Any] ) ->int:
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self : Optional[Any] , A : Any ) ->Optional[int]:
return self.decoder.get(A )
def __lowerCamelCase ( self : int , A : Optional[Any] ) ->Optional[Any]:
lowerCamelCase__ : int = ''''''.join(A )
lowerCamelCase__ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __lowerCamelCase ( self : List[Any] , A : str , A : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase__ : Optional[Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : str = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + '''\n''' )
lowerCamelCase__ : Union[str, Any] = 0
with open(A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase__ : Any = token_index
writer.write(''' '''.join(A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __lowerCamelCase ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None ) ->List[int]:
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self : Any , A : Union[str, Any] , A : Any=False , **A : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : List[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
lowerCamelCase__ : Union[str, Any] = ''' ''' + text
return (text, kwargs)
def __lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None ) ->Dict:
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self : Any , A : "Conversation" ) ->List[int]:
lowerCamelCase__ : List[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(A )
lowerCamelCase__ : Union[str, Any] = ''' '''.join(A )
lowerCamelCase__ : List[str] = self.encode(A )
if len(A ) > self.model_max_length:
lowerCamelCase__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 142 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict = logging.get_logger(__name__)
_A : Union[str, Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = "vit_msn"
def __init__( self : Optional[Any] , A : Dict=7_6_8 , A : Union[str, Any]=1_2 , A : Optional[Any]=1_2 , A : List[Any]=3_0_7_2 , A : List[str]="gelu" , A : Optional[int]=0.0 , A : int=0.0 , A : int=0.02 , A : Tuple=1e-06 , A : int=2_2_4 , A : Union[str, Any]=1_6 , A : Dict=3 , A : Optional[Any]=True , **A : Optional[Any] , ) ->Dict:
super().__init__(**A )
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Tuple = qkv_bias
| 142 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
if attention_mask is None:
_a : Dict = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_a : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_a : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_a : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __magic_name__ :
def __init__( self : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : int=7 ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : int=False ,_UpperCAmelCase : List[Any]=99 ,_UpperCAmelCase : Optional[int]=16 ,_UpperCAmelCase : Union[str, Any]=2 ,_UpperCAmelCase : Dict=4 ,_UpperCAmelCase : Dict=4 ,_UpperCAmelCase : Union[str, Any]="gelu" ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : List[str]=0.1 ,_UpperCAmelCase : Dict=32 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : List[Any]=0 ,_UpperCAmelCase : List[str]=0.02 ,):
_a : Tuple = parent
_a : Tuple = batch_size
_a : List[Any] = seq_length
_a : Any = is_training
_a : str = use_labels
_a : int = vocab_size
_a : Tuple = hidden_size
_a : Tuple = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Optional[Any] = intermediate_size
_a : List[str] = hidden_act
_a : Dict = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : List[Any] = max_position_embeddings
_a : Tuple = eos_token_id
_a : Tuple = pad_token_id
_a : Optional[int] = bos_token_id
_a : Optional[int] = initializer_range
def __lowercase ( self : List[str] ):
_a : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) ,3 ,self.vocab_size )
_a : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) ,dtype=np.intaa )) ,-1 )
_a : Optional[int] = shift_tokens_right(_UpperCAmelCase ,1 ,2 )
_a : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,initializer_range=self.initializer_range ,use_cache=_UpperCAmelCase ,)
_a : str = prepare_blenderbot_inputs_dict(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
return config, inputs_dict
def __lowercase ( self : int ):
_a , _a : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ):
_a : List[Any] = 20
_a : List[Any] = model_class_name(_UpperCAmelCase )
_a : Any = model.encode(inputs_dict['input_ids'] )
_a , _a : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_a : Optional[int] = model.init_cache(decoder_input_ids.shape[0] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype='i4' )
_a : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
_a : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] ,_UpperCAmelCase ,decoder_attention_mask=_UpperCAmelCase ,past_key_values=_UpperCAmelCase ,decoder_position_ids=_UpperCAmelCase ,)
_a : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype='i4' )
_a : Optional[int] = model.decode(
decoder_input_ids[:, -1:] ,_UpperCAmelCase ,decoder_attention_mask=_UpperCAmelCase ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=_UpperCAmelCase ,)
_a : Tuple = model.decode(_UpperCAmelCase ,_UpperCAmelCase )
_a : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 ,msg=F"""Max diff is {diff}""" )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ):
_a : Optional[int] = 20
_a : Dict = model_class_name(_UpperCAmelCase )
_a : str = model.encode(inputs_dict['input_ids'] )
_a , _a : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_a : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
_a : Dict = model.init_cache(decoder_input_ids.shape[0] ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
_a : Any = model.decode(
decoder_input_ids[:, :-1] ,_UpperCAmelCase ,decoder_attention_mask=_UpperCAmelCase ,past_key_values=_UpperCAmelCase ,decoder_position_ids=_UpperCAmelCase ,)
_a : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype='i4' )
_a : Optional[int] = model.decode(
decoder_input_ids[:, -1:] ,_UpperCAmelCase ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=_UpperCAmelCase ,decoder_position_ids=_UpperCAmelCase ,)
_a : Union[str, Any] = model.decode(_UpperCAmelCase ,_UpperCAmelCase ,decoder_attention_mask=_UpperCAmelCase )
_a : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 ,msg=F"""Max diff is {diff}""" )
@require_flax
class __magic_name__ ( unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = 9_9
def __lowercase ( self : Tuple ):
_a : Tuple = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] ,dtype=np.intaa ,)
_a : List[str] = input_ids.shape[0]
_a : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size ,d_model=24 ,encoder_layers=2 ,decoder_layers=2 ,encoder_attention_heads=2 ,decoder_attention_heads=2 ,encoder_ffn_dim=32 ,decoder_ffn_dim=32 ,max_position_embeddings=48 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
def __lowercase ( self : Optional[Any] ):
_a , _a , _a : Optional[Any] = self._get_config_and_data()
_a : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
_a : str = lm_model(input_ids=_UpperCAmelCase )
_a : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
_a : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size ,d_model=14 ,encoder_layers=2 ,decoder_layers=2 ,encoder_attention_heads=2 ,decoder_attention_heads=2 ,encoder_ffn_dim=8 ,decoder_ffn_dim=8 ,max_position_embeddings=48 ,)
_a : Dict = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
_a : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] ,dtype=np.intaa )
_a : List[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] ,dtype=np.intaa )
_a : int = lm_model(input_ids=_UpperCAmelCase ,decoder_input_ids=_UpperCAmelCase )
_a : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape ,_UpperCAmelCase )
def __lowercase ( self : Tuple ):
_a : str = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] ,dtype=np.intaa )
_a : Tuple = shift_tokens_right(_UpperCAmelCase ,1 ,2 )
_a : Union[str, Any] = np.equal(_UpperCAmelCase ,1 ).astype(np.floataa ).sum()
_a : str = np.equal(_UpperCAmelCase ,1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape ,input_ids.shape )
self.assertEqual(_UpperCAmelCase ,n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] ,2 ).all() )
@require_flax
class __magic_name__ ( _UpperCamelCase , unittest.TestCase , _UpperCamelCase ):
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase : List[str] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __lowercase ( self : int ):
_a : List[Any] = FlaxBlenderbotSmallModelTester(self )
def __lowercase ( self : Optional[int] ):
_a , _a : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a , _a : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_a : Optional[int] = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=None ,**_UpperCAmelCase : str ):
return model.encode(input_ids=_UpperCAmelCase ,attention_mask=_UpperCAmelCase )
with self.subTest('JIT Enabled' ):
_a : str = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_a : Tuple = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase ,_UpperCAmelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( self : List[Any] ):
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
_a : List[Any] = model.encode(inputs_dict['input_ids'] ,inputs_dict['attention_mask'] )
_a : Union[str, Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCAmelCase : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Tuple ):
return model.decode(
decoder_input_ids=_UpperCAmelCase ,decoder_attention_mask=_UpperCAmelCase ,encoder_outputs=_UpperCAmelCase ,)
with self.subTest('JIT Enabled' ):
_a : Union[str, Any] = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_a : Dict = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase ,_UpperCAmelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowercase ( self : Tuple ):
for model_class_name in self.all_model_classes:
_a : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_a : Any = np.ones((1, 1) ) * model.config.eos_token_id
_a : List[str] = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 107 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> list:
if any(not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(lowerCAmelCase_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCAmelCase_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 107 | 1 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 2_5_6
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['''melgan''']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
# From MELGAN
lowerCAmelCase = math.log(1E-5) # Matches MelGAN training.
lowerCAmelCase = 4.0 # Largest value for most examples
lowerCAmelCase = 128
self.register_modules(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=(-1.0, 1.0) , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = output_range
if clip:
lowerCAmelCase = torch.clip(__lowerCAmelCase , self.min_value , self.max_value)
# Scale to [0, 1].
lowerCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=(-1.0, 1.0) , __lowerCAmelCase=False):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = input_range
lowerCAmelCase = torch.clip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) if clip else outputs
# Scale to [0, 1].
lowerCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = input_tokens > 0
lowerCAmelCase , lowerCAmelCase = self.notes_encoder(
encoder_input_tokens=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = self.continuous_encoder(
encoder_inputs=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = noise_time
if not torch.is_tensor(__lowerCAmelCase):
lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowerCAmelCase) and len(timesteps.shape) == 0:
lowerCAmelCase = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
lowerCAmelCase = self.decoder(
encodings_and_masks=__lowerCAmelCase , decoder_input_tokens=__lowerCAmelCase , decoder_noise_time=__lowerCAmelCase)
return logits
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 100 , __lowerCAmelCase = True , __lowerCAmelCase = "numpy" , __lowerCAmelCase = None , __lowerCAmelCase = 1 , ):
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__lowerCAmelCase)}.")
lowerCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
lowerCAmelCase = np.zeros([1, 0, self.n_dims] , np.floataa)
lowerCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowerCAmelCase):
if i == 0:
lowerCAmelCase = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
lowerCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowerCAmelCase = ones
lowerCAmelCase = self.scale_features(
__lowerCAmelCase , output_range=[-1.0, 1.0] , clip=__lowerCAmelCase)
lowerCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowerCAmelCase , continuous_mask=__lowerCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowerCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowerCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
lowerCAmelCase = self.decode(
encodings_and_masks=__lowerCAmelCase , input_tokens=__lowerCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase).prev_sample
lowerCAmelCase = self.scale_to_features(__lowerCAmelCase , input_range=[-1.0, 1.0])
lowerCAmelCase = mel[:1]
lowerCAmelCase = mel.cpu().float().numpy()
lowerCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase)
logger.info("""Generated segment""" , __lowerCAmelCase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""")
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""")
if output_type == "numpy":
lowerCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
lowerCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowerCAmelCase)
| 272 | '''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None
@property
def a_ ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = (3, 32, 128)
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
lowerCAmelCase = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1))
return image_input
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = """test"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase):
processor()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.char_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = None
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase)
lowerCAmelCase = torch.randn(1 , 27 , 38)
lowerCAmelCase = torch.randn(1 , 27 , 50257)
lowerCAmelCase = torch.randn(1 , 27 , 30522)
lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 272 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''PerceiverFeatureExtractor''']
UpperCamelCase_ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 363 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}''' | 303 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "ZinengTang/tvlt-base"
SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
def __UpperCamelCase ( self : Dict , **a : List[Any] ) -> List[Any]:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **a )
def __UpperCamelCase ( self : List[Any] , **a : List[Any] ) -> List[Any]:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Dict = TvltProcessor(image_processor=a , feature_extractor=a )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , a )
self.assertIsInstance(processor.image_processor , a )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Dict = TvltProcessor(image_processor=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([1_2000] )
SCREAMING_SNAKE_CASE : Any = feature_extractor(a , return_tensors="np" )
SCREAMING_SNAKE_CASE : Tuple = processor(audio=a , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Union[str, Any] = TvltProcessor(image_processor=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([3, 224, 224] )
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(a , return_tensors="np" )
SCREAMING_SNAKE_CASE : str = processor(images=a , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Dict = TvltProcessor(image_processor=a , feature_extractor=a )
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([1_2000] )
SCREAMING_SNAKE_CASE : str = np.ones([3, 224, 224] )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(audio=a , images=a )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Union[str, Any] = TvltProcessor(image_processor=a , feature_extractor=a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , ) | 76 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger('''transformers.models.speecht5''')
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
hf_model.apply_weight_norm()
__lowercase= checkpoint['input_conv.weight_g']
__lowercase= checkpoint['input_conv.weight_v']
__lowercase= checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__lowercase= checkpoint[F'upsamples.{i}.1.weight_g']
__lowercase= checkpoint[F'upsamples.{i}.1.weight_v']
__lowercase= checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__lowercase= checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
__lowercase= checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
__lowercase= checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
__lowercase= checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
__lowercase= checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
__lowercase= checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
__lowercase= checkpoint['output_conv.1.weight_g']
__lowercase= checkpoint['output_conv.1.weight_v']
__lowercase= checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , ) -> int:
'''simple docstring'''
if config_path is not None:
__lowercase= SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
__lowercase= SpeechTaHifiGanConfig()
__lowercase= SpeechTaHifiGan(lowercase__ )
__lowercase= torch.load(lowercase__ )
load_weights(orig_checkpoint['model']['generator'] , lowercase__ , lowercase__ )
__lowercase= np.load(lowercase__ )
__lowercase= stats[0].reshape(-1 )
__lowercase= stats[1].reshape(-1 )
__lowercase= torch.from_numpy(lowercase__ ).float()
__lowercase= torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 304 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''blenderbot-small'''
UpperCamelCase_ : Optional[Any] =['''past_key_values''']
UpperCamelCase_ : Optional[int] ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self , lowerCAmelCase=5_0_2_6_5 , lowerCAmelCase=5_1_2 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=8 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1_6 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=2 , **lowerCAmelCase , ):
__lowercase= vocab_size
__lowercase= max_position_embeddings
__lowercase= d_model
__lowercase= encoder_ffn_dim
__lowercase= encoder_layers
__lowercase= encoder_attention_heads
__lowercase= decoder_ffn_dim
__lowercase= decoder_layers
__lowercase= decoder_attention_heads
__lowercase= dropout
__lowercase= attention_dropout
__lowercase= activation_dropout
__lowercase= activation_function
__lowercase= init_std
__lowercase= encoder_layerdrop
__lowercase= decoder_layerdrop
__lowercase= use_cache
__lowercase= encoder_layers
__lowercase= scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
class A ( A_ ):
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase= {0: 'batch'}
__lowercase= {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
__lowercase= {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowercase= OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def _A (self ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super().outputs
else:
__lowercase= super(lowerCAmelCase , self ).outputs
if self.use_past:
__lowercase, __lowercase= self.num_layers
for i in range(lowerCAmelCase ):
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
__lowercase= {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Generate decoder inputs
__lowercase= seq_length if not self.use_past else 1
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowercase= {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowercase= dict(**lowerCAmelCase , **lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
__lowercase= common_inputs['decoder_input_ids'].shape[1]
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= decoder_seq_length + 3
__lowercase= (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase= torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase )] , dim=1 )
__lowercase= []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase, __lowercase= self.num_layers
__lowercase= min(lowerCAmelCase , lowerCAmelCase )
__lowercase= max(lowerCAmelCase , lowerCAmelCase ) - min_num_layers
__lowercase= 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
torch.zeros(lowerCAmelCase ),
) )
# TODO: test this.
__lowercase= encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(lowerCAmelCase , lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowercase, __lowercase= common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase= seqlen + 2
__lowercase, __lowercase= self.num_layers
__lowercase, __lowercase= self.num_attention_heads
__lowercase= (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase= common_inputs['attention_mask'].dtype
__lowercase= torch.cat(
[common_inputs['attention_mask'], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
__lowercase= [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase )
]
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase= tokenizer.num_special_tokens_to_add(lowerCAmelCase )
__lowercase= compute_effective_axis_dimension(
lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase= [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase= dict(tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase ) )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = -1 , lowerCAmelCase = -1 , lowerCAmelCase = False , lowerCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
elif self.task == "causal-lm":
__lowercase= self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
else:
__lowercase= self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
return common_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase= super()._flatten_past_key_values_(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
__lowercase= super(lowerCAmelCase , self )._flatten_past_key_values_(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 304 | 1 |
import datasets
_lowerCamelCase : Optional[int] = '''\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n'''
_lowerCamelCase : Optional[Any] = '''\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'''
_lowerCamelCase : str = '''\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'''
def a_ ( __lowercase : Optional[int] , __lowercase : Optional[int] ) -> Dict:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
'''simple docstring'''
def A ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : List[str] , lowercase : List[Any] , lowercase : str ):
'''simple docstring'''
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )} | 282 | """simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCamelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCamelCase = "main"
# Default branch name
__lowerCamelCase = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCamelCase = "aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCamelCase = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCamelCase = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Bonjour!' )
yield
print('Au revoir!' )
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> List[str]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class UpperCamelCase__( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def snake_case__ ( self ) -> Union[str, Any]:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_tf
def snake_case__ ( self ) -> str:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_flax
def snake_case__ ( self ) -> List[Any]:
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
| 221 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase ):
UpperCamelCase_ : Tuple = '''timm_backbone'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = backbone
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = features_only
SCREAMING_SNAKE_CASE : int = use_pretrained_backbone
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Dict = out_indices if out_indices is not None else (-1,)
| 363 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case = ["""small""", """medium""", """large"""]
snake_case = """lm_head.decoder.weight"""
snake_case = """lm_head.weight"""
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch.load(lowercase )
SCREAMING_SNAKE_CASE : Any = d.pop(lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
torch.save(lowercase , os.path.join(lowercase , lowercase ) )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
snake_case = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
snake_case = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 319 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[str] =field(
default=lowercase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowercase )} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[str] =field(
default=lowercase , metadata={"help": "The input training data file (a text file)."} )
a : Optional[str] =field(
default=lowercase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a : Optional[str] =field(
default=lowercase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a : bool =field(
default=lowercase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a : bool =field(
default=lowercase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a : bool =field(default=lowercase , metadata={"help": "Whether ot not to use whole word mask."} )
a : float =field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a : float =field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a : int =field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a : int =field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a : bool =field(
default=lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def a__ ( SCREAMING_SNAKE_CASE : DataTrainingArguments , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[str] = None , ):
'''simple docstring'''
def _dataset(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase : int = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
lowerCAmelCase : Dict = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
lowerCAmelCase : int = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
lowerCAmelCase : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase : List[Any] = (
get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase : Union[str, Any] = (
get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , evaluate=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase : int = DataCollatorForPermutationLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase : Any = DataCollatorForWholeWordMask(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase : List[str] = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase : Any = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , prediction_loss_only=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase : List[str] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase : str = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase : List[str] = trainer.evaluate()
lowerCAmelCase : Union[str, Any] = math.exp(eval_output["eval_loss"] )
lowerCAmelCase : Tuple = {"perplexity": perplexity}
lowerCAmelCase : int = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(SCREAMING_SNAKE_CASE )
return results
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 108 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="dpr"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__ = 0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = projection_dim
lowerCAmelCase : Dict = position_embedding_type
| 108 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {}
class __snake_case( lowerCamelCase__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = """llama"""
UpperCAmelCase : int = ["""past_key_values"""]
def __init__( self , A_=3_2000 , A_=4096 , A_=1_1008 , A_=32 , A_=32 , A_=None , A_="silu" , A_=2048 , A_=0.0_2 , A_=1e-6 , A_=True , A_=0 , A_=1 , A_=2 , A_=1 , A_=False , A_=None , **A_ , ) -> List[str]:
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = intermediate_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase = num_attention_heads
lowerCAmelCase = num_key_value_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = initializer_range
lowerCAmelCase = rms_norm_eps
lowerCAmelCase = pretraining_tp
lowerCAmelCase = use_cache
lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def __snake_case ( self ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'got {self.rope_scaling}' )
lowerCAmelCase = self.rope_scaling.get("""type""" , snake_case__ )
lowerCAmelCase = self.rope_scaling.get("""factor""" , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' ) | 362 |
'''simple docstring'''
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
pass
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
pass
class __snake_case:
'''simple docstring'''
def __init__( self ) -> int:
lowerCAmelCase = [
[],
[],
[],
]
def __snake_case ( self , A_ , A_ ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(A_ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __snake_case ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ) -> str:
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class __snake_case:
'''simple docstring'''
def __init__( self ) -> Dict:
lowerCAmelCase = []
def __snake_case ( self , A_ ) -> None:
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(A_ )
def __snake_case ( self ) -> int:
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
lowerCAmelCase = min(self.queue )
self.queue.remove(A_ )
return data
def __str__( self ) -> str:
return str(self.queue )
def _snake_case ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue() | 187 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType("""DataClass""", Any)
lowerCAmelCase__ = NewType("""DataClassType""", Any)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: list ) -> Callable[[str], Any]:
'''simple docstring'''
A__ = {str(SCREAMING_SNAKE_CASE_ ): choice for choice in choices}
return lambda SCREAMING_SNAKE_CASE_ : str_to_choice.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( *,
SCREAMING_SNAKE_CASE_: Union[str, List[str]] = None , SCREAMING_SNAKE_CASE_: str = None , SCREAMING_SNAKE_CASE_: Any = dataclasses.MISSING , SCREAMING_SNAKE_CASE_: Callable[[], Any] = dataclasses.MISSING , SCREAMING_SNAKE_CASE_: dict = None , **SCREAMING_SNAKE_CASE_: str , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , default_factory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 42
def __init__( self , lowercase , **lowercase ) -> Tuple:
'''simple docstring'''
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**lowercase )
if dataclasses.is_dataclass(lowercase ):
A__ = [dataclass_types]
A__ = list(lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowercase )
@staticmethod
def UpperCamelCase ( lowercase , lowercase ) -> Tuple:
'''simple docstring'''
A__ = F'--{field.name}'
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowercase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
A__ = kwargs.pop("aliases" , [] )
if isinstance(lowercase , lowercase ):
A__ = [aliases]
A__ = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(lowercase , "UnionType" ) and isinstance(lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowercase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F' Problem encountered in field \'{field.name}\'.' )
if type(lowercase ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , lowercase ) and issubclass(field.type , lowercase )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(lowercase )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = "?"
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(lowercase ) and issubclass(lowercase , lowercase ):
A__ = field.type.__args__[0]
A__ = "+"
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(lowercase , *lowercase , **lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'--no_{field.name}' , action="store_false" , dest=field.name , **lowercase )
def UpperCamelCase ( self , lowercase ) -> Any:
'''simple docstring'''
if hasattr(lowercase , "_argument_group_name" ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(lowercase )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowercase ):
A__ = ".".join(map(lowercase , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(lowercase ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(lowercase , lowercase )
def UpperCamelCase ( self , lowercase=None , lowercase=False , lowercase=True , lowercase=None , lowercase=None , ) -> Tuple[DataClass, ...]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(lowercase , type=lowercase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=lowercase )
A__ = vars(lowercase ).get(args_file_flag.lstrip("-" ) , lowercase )
if cmd_args_file_paths:
args_files.extend([Path(lowercase ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=lowercase )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(lowercase ) if f.init}
A__ = {k: v for k, v in vars(lowercase ).items() if k in keys}
for k in keys:
delattr(lowercase , lowercase )
A__ = dtype(**lowercase )
outputs.append(lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def UpperCamelCase ( self , lowercase , lowercase = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(lowercase ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**lowercase )
outputs.append(lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowercase )}' )
return tuple(lowercase )
def UpperCamelCase ( self , lowercase , lowercase = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
with open(Path(lowercase ) , encoding="utf-8" ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(lowercase , allow_extra_keys=lowercase )
return tuple(lowercase )
def UpperCamelCase ( self , lowercase , lowercase = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
A__ = self.parse_dict(yaml.safe_load(Path(lowercase ).read_text() ) , allow_extra_keys=lowercase )
return tuple(lowercase )
| 68 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = IFInpaintingSuperResolutionPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__UpperCamelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def _snake_case (self ):
return self._get_superresolution_dummy_components()
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case (self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case (self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _snake_case (self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case (self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case (self ):
self._test_save_load_local()
def _snake_case (self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 174 | 0 |
def __UpperCamelCase ( _A ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
_A = int(input('''Enter number: ''').strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 167 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A ( unittest.TestCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__=7, UpperCamelCase__=3, UpperCamelCase__=30, UpperCamelCase__=400, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=True, UpperCamelCase__=1 / 255, UpperCamelCase__=True, ):
"""simple docstring"""
lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_pad
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(UpperCamelCase__, Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = self.size['''shortest_edge''']
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[0] )[0]
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_pad''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor()
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowerCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, masks_path=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify masks
lowerCAmelCase_ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), UpperCamelCase__ )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
| 167 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : list[str] | None = None ):
lowercase_ :Any = word_bank or []
# create a table
lowercase_ :int = len(__lowerCamelCase ) + 1
lowercase_ :list[list[list[str]]] = []
for _ in range(__lowerCamelCase ):
table.append([] )
# seed value
lowercase_ :List[str] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCamelCase )] == word:
lowercase_ :list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCamelCase )]:
combination.reverse()
return table[len(__lowerCamelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 223 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCAmelCase : Tuple =logging.get_logger(__name__)
lowerCAmelCase : List[str] ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCAmelCase : Optional[int] ={
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase : List[Any] ={
'''RUCAIBox/mvp''': 1_024,
}
class a_ ( _lowerCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["input_ids", "attention_mask"]
__A = MvpTokenizer
def __init__( self : Optional[Any] , lowercase : Any=None , lowercase : List[Any]=None , lowercase : Dict=None , lowercase : int="replace" , lowercase : int="<s>" , lowercase : List[str]="</s>" , lowercase : Optional[Any]="</s>" , lowercase : List[str]="<s>" , lowercase : List[str]="<unk>" , lowercase : List[str]="<pad>" , lowercase : Tuple="<mask>" , lowercase : Tuple=False , lowercase : Dict=True , **lowercase : List[str] , ):
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
lowercase_ :Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowercase_ :List[str] = getattr(lowercase , pre_tok_state.pop("type" ) )
lowercase_ :int = add_prefix_space
lowercase_ :Optional[int] = pre_tok_class(**lowercase )
lowercase_ :Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ :List[Any] = "post_processor"
lowercase_ :str = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
lowercase_ :Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ :int = tuple(state["sep"] )
if "cls" in state:
lowercase_ :Any = tuple(state["cls"] )
lowercase_ :int = False
if state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowercase_ :Union[str, Any] = add_prefix_space
lowercase_ :int = True
if state.get("trim_offsets" , lowercase ) != trim_offsets:
lowercase_ :Any = trim_offsets
lowercase_ :int = True
if changes_to_apply:
lowercase_ :Tuple = getattr(lowercase , state.pop("type" ) )
lowercase_ :Any = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : int , lowercase : Dict ):
"""simple docstring"""
lowercase_ :List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
lowercase_ :Union[str, Any] = value
def lowercase__ ( self : Optional[Any] , *lowercase : List[Any] , **lowercase : Any ):
"""simple docstring"""
lowercase_ :Any = kwargs.get("is_split_into_words" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowercase , **lowercase )
def lowercase__ ( self : Optional[Any] , *lowercase : Optional[int] , **lowercase : int ):
"""simple docstring"""
lowercase_ :Any = kwargs.get("is_split_into_words" , lowercase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowercase , **lowercase )
def lowercase__ ( self : Dict , lowercase : str , lowercase : Optional[str] = None ):
"""simple docstring"""
lowercase_ :str = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def lowercase__ ( self : Tuple , lowercase : Dict , lowercase : int=None ):
"""simple docstring"""
lowercase_ :List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : int , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
"""simple docstring"""
lowercase_ :Union[str, Any] = [self.sep_token_id]
lowercase_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 223 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Any , snake_case : Any , snake_case : Optional[int]=13 , snake_case : List[str]=7 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : int=True , snake_case : Tuple=True , snake_case : int=99 , snake_case : Any=16 , snake_case : Dict=36 , snake_case : Any=6 , snake_case : Dict=6 , snake_case : Dict=6 , snake_case : int=37 , snake_case : int="gelu" , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : Dict=512 , snake_case : List[Any]=16 , snake_case : Any=2 , snake_case : Any=0.02 , snake_case : Optional[int]=3 , snake_case : List[Any]=4 , snake_case : List[str]=None , ) -> Union[str, Any]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[int] = embedding_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_hidden_groups
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase__ ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : int ) -> Optional[int]:
__UpperCAmelCase : List[Any] = AlbertModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Tuple = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case )
__UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : List[str] , snake_case : str , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : str = AlbertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , sentence_order_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase__ ( self : Dict , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Dict = AlbertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple ) -> int:
__UpperCAmelCase : Optional[Any] = AlbertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Optional[int] , snake_case : Dict , snake_case : int , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Any = AlbertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : str , snake_case : Dict , snake_case : Union[str, Any] , snake_case : List[str] ) -> int:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Optional[int] = AlbertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Dict , snake_case : int , snake_case : List[Any] , snake_case : List[Any] , snake_case : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = self.num_choices
__UpperCAmelCase : List[Any] = AlbertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Any = True
def lowerCamelCase__ ( self : Optional[int] , snake_case : Any , snake_case : Dict , snake_case : Tuple=False ) -> Optional[Any]:
__UpperCAmelCase : Any = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__UpperCAmelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
__UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Dict ) -> int:
__UpperCAmelCase : List[Any] = AlbertModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : str ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*snake_case )
@slow
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = AlbertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' )
__UpperCAmelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
__UpperCAmelCase : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
__UpperCAmelCase : int = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) ) | 240 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class a ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] ) -> int:
# test for the above condition
self.test()
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Any = False
while not completed:
if counter == 1:
self.reset()
__UpperCAmelCase : Optional[int] = self.advance()
if not self.does_advance(snake_case ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = self.update(snake_case )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : Optional[int] , snake_case : int ) -> Optional[int]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : List[Any] , snake_case : int ) -> int:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : int ) -> Optional[int]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : int ) -> Tuple:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[Any]=False ) -> Any:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class a ( _a ):
"""simple docstring"""
def __init__( self : int , snake_case : List[int] ) -> Tuple:
super(snake_case , self ).__init__()
if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(snake_case , snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
__UpperCAmelCase : Dict = token_ids
__UpperCAmelCase : Tuple = len(self.token_ids )
__UpperCAmelCase : List[str] = -1 # the index of the currently fulfilled step
__UpperCAmelCase : int = False
def lowerCamelCase__ ( self : List[str] ) -> str:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Optional[int]:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> Optional[int]:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case )}' )
__UpperCAmelCase : Any = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Tuple = False
if self.does_advance(snake_case ):
self.fulfilled_idx += 1
__UpperCAmelCase : Union[str, Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Union[str, Any] = completed
else:
# failed to make progress.
__UpperCAmelCase : List[str] = True
self.reset()
return stepped, completed, reset
def lowerCamelCase__ ( self : int ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Union[str, Any] = 0
def lowerCamelCase__ ( self : str ) -> Optional[int]:
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase__ ( self : int , snake_case : Dict=False ) -> List[str]:
__UpperCAmelCase : List[str] = PhrasalConstraint(self.token_ids )
if stateful:
__UpperCAmelCase : int = self.seqlen
__UpperCAmelCase : Optional[Any] = self.fulfilled_idx
__UpperCAmelCase : List[Any] = self.completed
return new_constraint
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case : List[List[int]] , snake_case : Dict=True ) -> Any:
__UpperCAmelCase : List[Any] = max([len(snake_case ) for one in nested_token_ids] )
__UpperCAmelCase : Union[str, Any] = {}
for token_ids in nested_token_ids:
__UpperCAmelCase : List[str] = root
for tidx, token_id in enumerate(snake_case ):
if token_id not in level:
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = level[token_id]
if no_subsets and self.has_subsets(snake_case , snake_case ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f' {nested_token_ids}.' )
__UpperCAmelCase : Tuple = root
def lowerCamelCase__ ( self : Dict , snake_case : List[str] ) -> List[Any]:
__UpperCAmelCase : Dict = self.trie
for current_token in current_seq:
__UpperCAmelCase : List[str] = start[current_token]
__UpperCAmelCase : str = list(start.keys() )
return next_tokens
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[str] ) -> Any:
__UpperCAmelCase : Optional[Any] = self.next_tokens(snake_case )
return len(snake_case ) == 0
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> Optional[int]:
__UpperCAmelCase : str = list(root.values() )
if len(snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(snake_case ) for nn in next_nodes] )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Dict ) -> str:
__UpperCAmelCase : Dict = self.count_leaves(snake_case )
return len(snake_case ) != leaf_count
class a ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[List[int]] ) -> str:
super(snake_case , self ).__init__()
if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(snake_case , snake_case ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(snake_case , snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
__UpperCAmelCase : Optional[int] = DisjunctiveTrie(snake_case )
__UpperCAmelCase : Tuple = nested_token_ids
__UpperCAmelCase : List[Any] = self.trie.max_height
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Union[str, Any] = False
def lowerCamelCase__ ( self : int ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.trie.next_tokens(self.current_seq )
if len(snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self : Tuple , snake_case : int ) -> Dict:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case )}' )
__UpperCAmelCase : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Tuple:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case )}' )
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : str = False
if self.does_advance(snake_case ):
self.current_seq.append(snake_case )
__UpperCAmelCase : int = True
else:
__UpperCAmelCase : Optional[Any] = True
self.reset()
__UpperCAmelCase : Optional[Any] = self.trie.reached_leaf(self.current_seq )
__UpperCAmelCase : Tuple = completed
return stepped, completed, reset
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
__UpperCAmelCase : str = False
__UpperCAmelCase : Tuple = []
def lowerCamelCase__ ( self : Tuple ) -> Any:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase__ ( self : Any , snake_case : Optional[int]=False ) -> Tuple:
__UpperCAmelCase : str = DisjunctiveConstraint(self.token_ids )
if stateful:
__UpperCAmelCase : Tuple = self.seqlen
__UpperCAmelCase : Dict = self.current_seq
__UpperCAmelCase : str = self.completed
return new_constraint
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[Constraint] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = constraints
# max # of steps required to fulfill a given constraint
__UpperCAmelCase : int = max([c.seqlen for c in constraints] )
__UpperCAmelCase : int = len(snake_case )
__UpperCAmelCase : Optional[int] = False
self.init_state()
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : str = [constraint.copy(stateful=snake_case ) for constraint in self.constraints]
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Dict = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase__ ( self : Tuple ) -> int:
__UpperCAmelCase : int = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__UpperCAmelCase : Optional[int] = constraint.advance()
if isinstance(snake_case , snake_case ):
token_list.append(snake_case )
elif isinstance(snake_case , snake_case ):
token_list.extend(snake_case )
else:
__UpperCAmelCase : Optional[Any] = self.inprogress_constraint.advance()
if isinstance(snake_case , snake_case ):
token_list.append(snake_case )
elif isinstance(snake_case , snake_case ):
token_list.extend(snake_case )
if len(snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__ ( self : List[str] , snake_case : Optional[List[int]] ) -> Optional[int]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__UpperCAmelCase , __UpperCAmelCase : Dict = self.add(snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase__ ( self : List[str] , snake_case : int ) -> List[str]:
if not isinstance(snake_case , snake_case ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
__UpperCAmelCase , __UpperCAmelCase : str = False, False
if self.completed:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : str = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = self.inprogress_constraint.update(snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=snake_case ) )
__UpperCAmelCase : Optional[Any] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__UpperCAmelCase : str = None
if len(self.pending_constraints ) == 0:
# we're done!
__UpperCAmelCase : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(snake_case ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pending_constraint.update(snake_case )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(snake_case )
__UpperCAmelCase : Tuple = None
if not complete and stepped:
__UpperCAmelCase : List[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__UpperCAmelCase : Optional[int] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__UpperCAmelCase : Any = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase__ ( self : int , snake_case : Optional[int]=True ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__UpperCAmelCase : str = [
constraint.copy(stateful=snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__UpperCAmelCase : Union[str, Any] = self.inprogress_constraint.copy(stateful=snake_case )
__UpperCAmelCase : Tuple = [constraint.copy() for constraint in self.pending_constraints]
return new_state | 240 | 1 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=7_68 ) -> Any:
super().__init__(__lowerCamelCase )
a = proj_size
a = CLIPVisionModel(__lowerCamelCase )
a = PaintByExampleMapper(__lowerCamelCase )
a = nn.LayerNorm(config.hidden_size )
a = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
a = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=False ) -> Tuple:
a = self.model(pixel_values=__lowerCamelCase )
a = clip_output.pooler_output
a = self.mapper(latent_states[:, None] )
a = self.final_layer_norm(__lowerCamelCase )
a = self.proj_out(__lowerCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class snake_case__ (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : List[Any] ) -> Any:
super().__init__()
a = (config.num_hidden_layers + 1) // 5
a = config.hidden_size
a = 1
a = nn.ModuleList(
[
BasicTransformerBlock(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , activation_fn="gelu" , attention_bias=__lowerCamelCase )
for _ in range(__lowerCamelCase )
] )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int ) -> Any:
for block in self.blocks:
a = block(__lowerCamelCase )
return hidden_states
| 107 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : Optional[int] = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__lowerCAmelCase : Dict = {'bert_for_seq_generation': 512}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<::::>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ) -> None:
a = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ) -> Dict:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Optional[Any]:
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : Optional[Any] , __lowerCamelCase : Dict ) -> Optional[Any]:
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] ) -> int:
return self.sp_model.piece_to_id(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[str] ) -> Any:
a = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Dict ) -> Any:
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowerCamelCase ) + token
a = []
else:
current_sub_tokens.append(__lowerCamelCase )
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 107 | 1 |
from __future__ import annotations
import math
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, snake_case_, snake_case_, snake_case_ ), minimax(depth + 1, node_index * 2 + 1, snake_case_, snake_case_, snake_case_ ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, snake_case_, snake_case_, snake_case_ ), minimax(depth + 1, node_index * 2 + 1, snake_case_, snake_case_, snake_case_ ), )
)
def _UpperCamelCase ( ) -> None:
__UpperCAmelCase : int = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__UpperCAmelCase : Any = math.log(len(snake_case_ ), 2 )
print(f'''Optimal value : {minimax(0, 0, snake_case_, snake_case_, snake_case_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=1e-1_2 ) -> str:
__UpperCAmelCase : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
__UpperCAmelCase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(snake_case__, axis=1 ), a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__, norm_emb_a.T )
class _snake_case ( nn.Module ):
lowerCamelCase__: CLIPConfig
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : int = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__UpperCAmelCase : int = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__UpperCAmelCase : Tuple = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
__UpperCAmelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self: List[Any] , __lowerCamelCase: Dict ) -> Dict:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowerCamelCase )[1]
__UpperCAmelCase : List[str] = self.visual_projection(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = jax_cosine_distance(__lowerCamelCase , self.special_care_embeds )
__UpperCAmelCase : Optional[Any] = jax_cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__UpperCAmelCase : List[str] = 0.0
__UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
__UpperCAmelCase : List[Any] = is_special_care * 0.01
__UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__UpperCAmelCase : List[str] = jnp.round(__lowerCamelCase , 3 )
__UpperCAmelCase : Any = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( _lowercase ):
lowerCamelCase__: int = CLIPConfig
lowerCamelCase__: Tuple = "clip_input"
lowerCamelCase__: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self: Union[str, Any] , __lowerCamelCase: CLIPConfig , __lowerCamelCase: Optional[Tuple] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: jnp.dtype = jnp.floataa , __lowerCamelCase: bool = True , **__lowerCamelCase: Optional[int] , ) -> int:
if input_shape is None:
__UpperCAmelCase : Dict = (1, 2_24, 2_24, 3)
__UpperCAmelCase : Tuple = self.module_class(config=__lowerCamelCase , dtype=__lowerCamelCase , **__lowerCamelCase )
super().__init__(__lowerCamelCase , __lowerCamelCase , input_shape=__lowerCamelCase , seed=__lowerCamelCase , dtype=__lowerCamelCase , _do_init=_do_init )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: jax.random.KeyArray , __lowerCamelCase: Tuple , __lowerCamelCase: FrozenDict = None ) -> FrozenDict:
# init input tensor
__UpperCAmelCase : Tuple = jax.random.normal(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Dict = jax.random.split(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
__UpperCAmelCase : str = self.module.init(__lowerCamelCase , __lowerCamelCase )["params"]
return random_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: dict = None , ) -> List[Any]:
__UpperCAmelCase : int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 342 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __snake_case :
def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=1_3 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=9_9 , __lowerCAmelCase : Dict=[1, 1, 2] , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Dict=3_2 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : List[str]=8 , __lowerCAmelCase : Tuple=3_7 , __lowerCAmelCase : List[Any]="gelu_new" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : List[Any]=5_1_2 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : int=3 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[int]=False , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Dict = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : Dict = use_input_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Optional[Any] = block_sizes
_lowerCamelCase : List[Any] = num_decoder_layers
_lowerCamelCase : List[Any] = d_model
_lowerCamelCase : Any = n_head
_lowerCamelCase : List[str] = d_head
_lowerCamelCase : Optional[int] = d_inner
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : List[str] = hidden_dropout
_lowerCamelCase : Any = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Dict = type_vocab_size
_lowerCamelCase : Tuple = 2
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : Tuple = num_choices
_lowerCamelCase : int = scope
_lowerCamelCase : Optional[int] = initializer_std
# Used in the tests to check the size of the first attention layer
_lowerCamelCase : Optional[Any] = n_head
# Used in the tests to check the size of the first hidden state
_lowerCamelCase : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_lowerCamelCase : Any = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_lowerCamelCase : List[str] = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : str = None
if self.use_input_mask:
_lowerCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : int = TFFunnelModel(config=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : List[Any] = [input_ids, input_mask]
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = TFFunnelModel(config=__lowerCAmelCase )
_lowerCamelCase : int = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_lowerCamelCase : Tuple = False
_lowerCamelCase : Tuple = TFFunnelModel(config=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
_lowerCamelCase : Tuple = TFFunnelBaseModel(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Any = [input_ids, input_mask]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_lowerCamelCase : Any = False
_lowerCamelCase : Optional[Any] = TFFunnelBaseModel(config=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_lowerCamelCase : Any = False
_lowerCamelCase : List[str] = TFFunnelBaseModel(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : Tuple = TFFunnelForPreTraining(config=__lowerCAmelCase )
_lowerCamelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
_lowerCamelCase : str = TFFunnelForMaskedLM(config=__lowerCAmelCase )
_lowerCamelCase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.num_labels
_lowerCamelCase : Optional[Any] = TFFunnelForSequenceClassification(config=__lowerCAmelCase )
_lowerCamelCase : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : str = self.num_choices
_lowerCamelCase : Any = TFFunnelForMultipleChoice(config=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : str = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : List[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.num_labels
_lowerCamelCase : Optional[int] = TFFunnelForTokenClassification(config=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
_lowerCamelCase : Dict = TFFunnelForQuestionAnswering(config=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Dict = config_and_inputs
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ : str = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : Union[str, Any] = False
snake_case__ : Any = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = TFFunnelModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self , config_class=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
@require_tf
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ : Union[str, Any] = False
snake_case__ : int = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = TFFunnelModelTester(self , base=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
| 72 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def snake_case_ ( A_ : dict, A_ : int, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = set()
# keep track of all the paths to be checked
_lowerCamelCase : str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCamelCase : str = queue.pop(0 )
# get the last node from the path
_lowerCamelCase : List[Any] = path[-1]
if node not in explored:
_lowerCamelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCamelCase : Union[str, Any] = list(A_ )
new_path.append(A_ )
queue.append(A_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A_ )
# in case there's no path between the 2 nodes
return []
def snake_case_ ( A_ : dict, A_ : int, A_ : Dict ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCamelCase : Optional[int] = [start]
_lowerCamelCase : int = set(A_ )
# Keep tab on distances from `start` node.
_lowerCamelCase : int = {start: 0, target: -1}
while queue:
_lowerCamelCase : Optional[Any] = queue.pop(0 )
if node == target:
_lowerCamelCase : Any = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A_ )
queue.append(A_ )
_lowerCamelCase : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 72 | 1 |
from __future__ import annotations
snake_case_ = '''#'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : dict = {}
def snake_case_ ( self , a):
lowercase__ : List[str] = self._trie
for char in text:
if char not in trie:
lowercase__ : Dict = {}
lowercase__ : Optional[int] = trie[char]
lowercase__ : Optional[Any] = True
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
lowercase__ : Any = trie[char]
else:
return []
return self._elements(a)
def snake_case_ ( self , a):
lowercase__ : Any = []
for c, v in d.items():
lowercase__ : List[Any] = [' '] if c == END else [(c + s) for s in self._elements(a)]
result.extend(a)
return tuple(a)
snake_case_ = Trie()
snake_case_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Any = trie.find_word(SCREAMING_SNAKE_CASE_ )
return tuple(string + word for word in suffixes )
def snake_case__ ( ):
'''simple docstring'''
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 216 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case__ ( SCREAMING_SNAKE_CASE_ : BertModel , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Tuple = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
lowercase__ : Dict = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ : str ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return f"""bert/{name}"""
def create_tf_var(SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : tf.Session ):
lowercase__ : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
lowercase__ : List[str] = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase__ : Tuple = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
lowercase__ : str = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase__ : str = torch_tensor.T
lowercase__ : Any = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = session.run(SCREAMING_SNAKE_CASE_ )
print(f"""Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
lowercase__ : Tuple = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory in which to save tensorflow model' )
lowercase__ : List[str] = parser.parse_args(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 216 | 1 |
from scipy.stats import spearmanr
import datasets
_lowerCAmelCase : Optional[int] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_lowerCAmelCase : Union[str, Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_lowerCAmelCase : Tuple = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Tuple , snake_case :int=False ):
'''simple docstring'''
A_ : List[Any] = spearmanr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 300 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {"""tokenizer_file""": """tokenizer.json"""}
lowercase__ : List[str] = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE = None
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Any=False , **SCREAMING_SNAKE_CASE_ : Any , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
lowerCAmelCase_ : str = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
lowerCAmelCase_ : str = add_prefix_space
lowerCAmelCase_ : str = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : List[Any] = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.' )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : List[Any] = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.' )
return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
lowerCAmelCase_ : Optional[int] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : "Conversation" ):
lowerCAmelCase_ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
lowerCAmelCase_ : Any = input_ids[-self.model_max_length :]
return input_ids
| 224 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
SCREAMING_SNAKE_CASE__ = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
@lru_cache()
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase_ = bs[:]
lowercase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowercase_ = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: str ):
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
return pairs
class __lowerCamelCase ( snake_case__ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else bos_token
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else sep_token
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cls_token
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding="utf-8" ) as vocab_handle:
lowercase_ = json.load(UpperCAmelCase_ )
lowercase_ = {v: k for k, v in self.encoder.items()}
lowercase_ = errors # how to handle errors in decoding
lowercase_ = bytes_to_unicode()
lowercase_ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding="utf-8" ) as merges_handle:
lowercase_ = merges_handle.read().split("\n" )[1:-1]
lowercase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowercase_ = {}
lowercase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase_ = tuple(UpperCAmelCase_ )
lowercase_ = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
lowercase_ = min(UpperCAmelCase_ , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(UpperCAmelCase_ ):
try:
lowercase_ = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ = tuple(UpperCAmelCase_ )
lowercase_ = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
lowercase_ = get_pairs(UpperCAmelCase_ )
lowercase_ = " ".join(UpperCAmelCase_ )
lowercase_ = word
return word
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for token in re.findall(self.pat , UpperCAmelCase_ ):
lowercase_ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_ ).split(" " ) )
return bpe_tokens
def A__ ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase_ )
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = "".join(UpperCAmelCase_ )
lowercase_ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Dict:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + "\n" )
lowercase_ = 0
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
lowercase_ = token_index
writer.write(" ".join(UpperCAmelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[str]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , UpperCAmelCase , UpperCAmelCase=False , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_ ) > 0 and not text[0].isspace()):
lowercase_ = " " + text
return (text, kwargs)
| 365 |
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]:
try:
__lowerCamelCase : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase : Any = strtobool(lowerCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
a =parse_flag_from_env("""RUN_SLOW""", default=False)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
return unittest.skip('Test was skipped' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> Any:
if test_case is None:
return partial(lowerCamelCase__ , version=lowerCamelCase__ )
return unittest.skipUnless(is_torch_version('>=' , lowerCamelCase__ ) , F"test requires torch version >= {version}" )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(lowerCamelCase__ )
a =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(lowerCamelCase__ )
class A_ ( unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = True
@classmethod
def lowerCAmelCase ( cls : int):
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase ( cls : int):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def lowerCAmelCase ( self : Any):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(SCREAMING_SNAKE_CASE__)
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[Any]):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[mock.Mock, List[mock.Mock]]):
__lowerCamelCase : Tuple = mocks if isinstance(SCREAMING_SNAKE_CASE__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : int = AcceleratorState()
__lowerCamelCase : Optional[int] = tensor[None].clone().to(state.device )
__lowerCamelCase : Dict = gather(lowerCamelCase__ ).cpu()
__lowerCamelCase : Union[str, Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase__ ):
return False
return True
class A_ :
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Union[str, Any] = returncode
__lowerCamelCase : List[str] = stdout
__lowerCamelCase : Tuple = stderr
async def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
while True:
__lowerCamelCase : str = await stream.readline()
if line:
callback(lowerCamelCase__ )
else:
break
async def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(lowerCamelCase__ ) )
__lowerCamelCase : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase : int = []
__lowerCamelCase : str = []
def tee(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" ):
__lowerCamelCase : Any = line.decode('utf-8' ).rstrip()
sink.append(lowerCamelCase__ )
if not quiet:
print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label='stderr:' ) ) ),
] , timeout=lowerCamelCase__ , )
return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=1_8_0 , lowerCamelCase__=False , lowerCamelCase__=True ) -> _RunOutput:
__lowerCamelCase : Union[str, Any] = asyncio.get_event_loop()
__lowerCamelCase : str = loop.run_until_complete(
_stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = ' '.join(lowerCamelCase__ )
if result.returncode > 0:
__lowerCamelCase : Optional[int] = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class A_ ( SCREAMING_SNAKE_CASE ):
pass
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> int:
try:
__lowerCamelCase : List[str] = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase__ , 'decode' ):
__lowerCamelCase : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 73 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(self , **_lowerCamelCase )
a :Union[str, Any] = repo_info
a :int = token
a :int = None
def SCREAMING_SNAKE_CASE__ ( self ):
if self.dir_cache is None:
a :Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a :List[Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ):
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ):
self._get_dirs()
a :Union[str, Any] = self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
self._get_dirs()
a :str = PurePosixPath(path.strip('''/''' ) )
a :Tuple = {}
for p, f in self.dir_cache.items():
a :Optional[int] = PurePosixPath(p.strip('''/''' ) )
a :str = p.parent
if root == path:
a :List[str] = f
a :Any = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 94 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Dict = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase : Any = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Union[str, Any] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = TaTokenizer
UpperCamelCase_ = []
def __init__( self : int , UpperCamelCase__ : str=None , UpperCamelCase__ : int=None , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Union[str, Any]="<pad>" , UpperCamelCase__ : Dict=100 , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = [f"""<extra_id_{i}>""" for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE : List[Any] = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id_''' in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[Any] = vocab_file
SCREAMING_SNAKE_CASE : Any = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : int = extra_ids
@staticmethod
def __A ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE : Optional[Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase__ , )
return max_model_length
def __A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : str = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE : str = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Any ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self : List[str] ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 258 | import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__UpperCamelCase : Optional[int] = parser.parse_args()
if args.model_type == "bert":
__UpperCamelCase : Optional[int] = BertForMaskedLM.from_pretrained(args.model_name)
__UpperCamelCase : Optional[int] = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
__UpperCamelCase : List[Any] = model.state_dict()
__UpperCamelCase : Union[str, Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
__UpperCamelCase : List[Any] = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__UpperCamelCase : Optional[int] = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
__UpperCamelCase : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__UpperCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__UpperCamelCase : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__UpperCamelCase : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__UpperCamelCase : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__UpperCamelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__UpperCamelCase : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__UpperCamelCase : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__UpperCamelCase : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__UpperCamelCase : List[str] = state_dict['cls.predictions.decoder.weight']
__UpperCamelCase : int = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCamelCase : List[str] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
__UpperCamelCase : List[Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 258 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__A : str = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __UpperCamelCase ( _A : Tuple ) ->Dict:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
lowerCamelCase_ =[image]
lowerCamelCase_ =[trans(img.convert("""RGB""" ) ) for img in image]
lowerCamelCase_ =torch.stack(lowerCAmelCase__ )
return image
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Tuple:
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> int:
# get the original timestep using init_timestep
lowerCamelCase_ =min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> Tuple:
if not isinstance(SCREAMING_SNAKE_CASE_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE_ )}' )
lowerCamelCase_ =image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowerCamelCase_ =init_latents.shape
lowerCamelCase_ =randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
print("""add noise to latents at timestep""" , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =init_latents
return latents
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.8 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , )-> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(SCREAMING_SNAKE_CASE_ )
# 2. Preprocess image
lowerCamelCase_ =preprocess(SCREAMING_SNAKE_CASE_ )
# 3. set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=self.device )
lowerCamelCase_ =self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
lowerCamelCase_ =timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# 4. Prepare latent variables
lowerCamelCase_ =self.prepare_latents(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.unet.dtype , self.device , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =latents
# 5. Denoising loop
for t in self.progress_bar(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , use_clipped_model_output=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 154 |
from torch import nn
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 147 | 0 |
import mpmath # for roots of unity
import numpy as np
class __magic_name__ :
def __init__( self , _lowercase=None , _lowercase=None )-> Optional[int]:
# Input as list
UpperCamelCase_ = list(poly_a or [0] )[:]
UpperCamelCase_ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
UpperCamelCase_ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
UpperCamelCase_ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
UpperCamelCase_ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
UpperCamelCase_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
UpperCamelCase_ = self.__multiply()
def UpperCAmelCase_ ( self , _lowercase )-> str:
UpperCamelCase_ = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(_lowercase ) <= 1:
return dft[0]
#
UpperCamelCase_ = self.c_max_length // 2
while next_ncol > 0:
UpperCamelCase_ = [[] for i in range(_lowercase )]
UpperCamelCase_ = self.root**next_ncol
# First half of next step
UpperCamelCase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowercase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
UpperCamelCase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowercase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
UpperCamelCase_ = new_dft
UpperCamelCase_ = next_ncol // 2
return dft[0]
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = self.__dft("A" )
UpperCamelCase_ = self.__dft("B" )
UpperCamelCase_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
UpperCamelCase_ = 2
while next_ncol <= self.c_max_length:
UpperCamelCase_ = [[] for i in range(_lowercase )]
UpperCamelCase_ = self.root ** (next_ncol // 2)
UpperCamelCase_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
UpperCamelCase_ = new_inverse_c
next_ncol *= 2
# Unpack
UpperCamelCase_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self )-> List[str]:
UpperCamelCase_ = "A = " + " + ".join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) )
UpperCamelCase_ = "B = " + " + ".join(
F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) )
UpperCamelCase_ = "A*B = " + " + ".join(
F"{coef}*x^{i}" for coef, i in enumerate(self.product ) )
return F"{a}\n{b}\n{c}"
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=4 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.1 , _lowercase=True , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , )-> str:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_multiple_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = weight_tying
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self )-> int:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = GPTNeoXJapaneseModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Dict:
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXJapaneseModel(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase )-> List[str]:
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Dict:
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , output_hidden_states=_lowercase )
UpperCamelCase_ = output_from_no_past["hidden_states"][0]
UpperCamelCase_ = model(
_lowercase , attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["hidden_states"][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCamelCase_ :str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ :int = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = False
UpperCamelCase_ :Dict = False
UpperCamelCase_ :List[str] = False
UpperCamelCase_ :int = False
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = GPTNeoXJapaneseModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
# This regression test was failing with PyTorch < 1.3
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase_ = None
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowercase )
@slow
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = "abeja/gpt-neox-japanese-2.7b"
UpperCamelCase_ = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
UpperCamelCase_ = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
UpperCamelCase_ = GPTNeoXJapaneseTokenizer.from_pretrained(_lowercase )
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowercase )
UpperCamelCase_ = []
for prompt in prompts:
UpperCamelCase_ = tokenizer(_lowercase , return_tensors="pt" ).input_ids
UpperCamelCase_ = model.generate(_lowercase , max_length=50 )
UpperCamelCase_ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
predicted_outputs += generated_string
self.assertListEqual(_lowercase , _lowercase )
| 60 | 1 |
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 68 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> List[str]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(__a )
def snake_case_ (self ) -> Any:
UpperCamelCase = self._create_example_records()
UpperCamelCase = Dataset.from_list(__a )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(__a ):
self.assertDictEqual(__a , example_records[i] )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = self._create_example_records()
UpperCamelCase = Dataset.from_list(__a )
UpperCamelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def snake_case_ (self ) -> str: # checks what happens with missing columns
UpperCamelCase = [{"col_1": 1}, {"col_2": "x"}]
UpperCamelCase = Dataset.from_list(__a )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def snake_case_ (self ) -> Union[str, Any]: # checks if the type can be inferred from the second record
UpperCamelCase = [{"col_1": []}, {"col_1": [1, 2]}]
UpperCamelCase = Dataset.from_list(__a )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def snake_case_ (self ) -> List[str]:
UpperCamelCase = Dataset.from_list([] )
self.assertEqual(len(__a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 244 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "visual_bert"
def __init__(self , __a=3_05_22 , __a=7_68 , __a=5_12 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.02 , __a=1e-1_2 , __a=False , __a=True , __a=1 , __a=0 , __a=2 , **__a , ) -> int:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = visual_embedding_dim
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = bypass_transformer
UpperCamelCase = special_visual_initialize
| 244 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger()
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =field(default_factory=__snake_case )
lowerCamelCase__ =field(default_factory=__snake_case )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : str = len(list(m.modules() ) ) == 1 or isinstance(a_ , nn.Convad ) or isinstance(a_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__(self , a_ ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =1
lowerCamelCase__ =field(default_factory=__snake_case )
lowerCamelCase__ =field(default_factory=__snake_case )
lowerCamelCase__ =True
def __call__(self , a_ ):
'''simple docstring'''
__snake_case : int = Tracker(self.dest )(a_ ).parametrized
__snake_case : Optional[int] = Tracker(self.src )(a_ ).parametrized
__snake_case : Union[str, Any] = list(filter(lambda a_ : type(a_ ) not in self.src_skip , a_ ) )
__snake_case : Dict = list(filter(lambda a_ : type(a_ ) not in self.dest_skip , a_ ) )
if len(a_ ) != len(a_ ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(a_ )} operations while"""
f""" destination module has {len(a_ )}.""" )
for dest_m, src_m in zip(a_ , a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
super().__init__()
__snake_case : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f"""Unexpected layer name {k}"""
__snake_case : Optional[int] = len(a_ ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
__snake_case : List[Any] = nn.ModuleDict(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return get_trunk_forward_outputs(
a_ , out_feat_keys=a_ , feature_blocks=self._feature_blocks , )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self , a_ ):
'''simple docstring'''
if x not in self:
__snake_case : Tuple = self.convert_name_to_timm(a_ )
__snake_case : str = partial(lambda: (timm.create_model(a_ , pretrained=a_ ).eval(), None) )
else:
__snake_case : Optional[int] = super().__getitem__(a_ )
return val
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __getitem__(self , a_ ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__snake_case : List[Any] = RegNetModel
else:
__snake_case : List[str] = RegNetForImageClassification
return val
def lowercase ( _snake_case : Dict , _snake_case : List[str] , _snake_case : List[Tuple[str, str]] ) ->Optional[Any]:
"""simple docstring"""
for from_key, to_key in keys:
__snake_case : Union[str, Any] = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def lowercase ( _snake_case : str , _snake_case : Callable[[], nn.Module] , _snake_case : Callable[[], nn.Module] , _snake_case : RegNetConfig , _snake_case : Path , _snake_case : bool = True , ) ->Dict:
"""simple docstring"""
print(f"""Converting {name}...""" )
with torch.no_grad():
__snake_case , __snake_case : Optional[int] = from_model_func()
__snake_case : Tuple = our_model_func(_snake_case ).eval()
__snake_case : List[Any] = ModuleTransfer(src=_snake_case , dest=_snake_case , raise_if_mismatch=_snake_case )
__snake_case : Union[str, Any] = torch.randn((1, 3, 224, 224) )
module_transfer(_snake_case )
if from_state_dict is not None:
__snake_case : int = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__snake_case : Tuple = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
__snake_case : Union[str, Any] = manually_copy_vissl_head(_snake_case , our_model.state_dict() , _snake_case )
our_model.load_state_dict(_snake_case )
__snake_case : List[Any] = our_model(_snake_case , output_hidden_states=_snake_case )
__snake_case : Union[str, Any] = (
our_outputs.logits if isinstance(_snake_case , _snake_case ) else our_outputs.last_hidden_state
)
__snake_case : Optional[Any] = from_model(_snake_case )
__snake_case : Optional[Any] = from_output[-1] if type(_snake_case ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__snake_case : str = our_outputs.hidden_states[-1]
assert torch.allclose(_snake_case , _snake_case ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_snake_case , )
__snake_case : Any = 224 if '''seer''' not in name else 384
# we can use the convnext one
__snake_case : Dict = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_snake_case )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_snake_case , )
print(f"""Pushed {name}""" )
def lowercase ( _snake_case : Path , _snake_case : str = None , _snake_case : bool = True ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = '''imagenet-1k-id2label.json'''
__snake_case : Optional[Any] = 1_000
__snake_case : int = (1, num_labels)
__snake_case : Optional[Any] = '''huggingface/label-files'''
__snake_case : Optional[int] = num_labels
__snake_case : str = json.load(open(cached_download(hf_hub_url(_snake_case , _snake_case , repo_type='''dataset''' ) ) , '''r''' ) )
__snake_case : str = {int(_snake_case ): v for k, v in idalabel.items()}
__snake_case : Union[str, Any] = idalabel
__snake_case : int = {v: k for k, v in idalabel.items()}
__snake_case : int = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case )
__snake_case : int = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
__snake_case : int = NameToOurModelFuncMap()
__snake_case : List[str] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_snake_case : str , _snake_case : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__snake_case : Tuple = torch.hub.load_state_dict_from_url(_snake_case , model_dir=str(_snake_case ) , map_location='''cpu''' )
__snake_case : Dict = model_func()
# check if we have a head, if yes add it
__snake_case : Any = files['''classy_state_dict''']['''base_model''']['''model''']
__snake_case : Tuple = model_state_dict['''trunk''']
model.load_state_dict(_snake_case )
return model.eval(), model_state_dict["heads"]
# pretrained
__snake_case : List[Any] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : str = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : int = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : Optional[Any] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__snake_case : Union[str, Any] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : List[str] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__snake_case : Optional[int] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__snake_case : Optional[int] = partial(
_snake_case , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _snake_case , _snake_case , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_snake_case , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _snake_case , _snake_case , _snake_case , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported regnet* architecture,"""
""" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 102 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_a = CLIPConfig
_a = ['CLIPEncoderLayer']
def __init__( self : int, lowerCamelCase : CLIPConfig )-> List[Any]:
super().__init__(lowerCamelCase )
lowerCamelCase__ : Dict =CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase__ : Dict =nn.Linear(config.vision_config.projection_dim, 1 )
lowerCamelCase__ : Union[str, Any] =nn.Linear(config.vision_config.projection_dim, 1 )
@torch.no_grad()
def snake_case ( self : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : List[Any]=0.5, lowerCamelCase : Optional[Any]=0.5 )-> Optional[int]:
lowerCamelCase__ : Dict =self.vision_model(lowerCamelCase )[0]
lowerCamelCase__ : List[Any] =self.p_head(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =nsfw_detected.flatten()
lowerCamelCase__ : Any =nsfw_detected > p_threshold
lowerCamelCase__ : Optional[Any] =nsfw_detected.tolist()
if any(lowerCamelCase ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(lowerCamelCase ):
if nsfw_detected_:
lowerCamelCase__ : Tuple =np.zeros(images[idx].shape )
lowerCamelCase__ : str =self.w_head(lowerCamelCase )
lowerCamelCase__ : List[str] =watermark_detected.flatten()
lowerCamelCase__ : Optional[Any] =watermark_detected > w_threshold
lowerCamelCase__ : int =watermark_detected.tolist()
if any(lowerCamelCase ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(lowerCamelCase ):
if watermark_detected_:
lowerCamelCase__ : Optional[int] =np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 352 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[0] * len(__lowerCamelCase )
lowerCamelCase__ : List[Any] =[]
lowerCamelCase__ : List[Any] =[1] * len(__lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCamelCase )
while queue:
lowerCamelCase__ : Tuple =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase__ : Optional[Any] =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCamelCase )
print(max(__lowerCamelCase ) )
# Adjacency list of Graph
_lowercase : Optional[Any] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 272 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 | """simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = data
snake_case_ = None
class UpperCamelCase :
def __init__( self) -> Dict:
snake_case_ = None
snake_case_ = None
def __iter__( self) -> Iterator[Any]:
snake_case_ = self.head
while self.head:
yield node.data
snake_case_ = node.next
if node == self.head:
break
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> str:
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(len(self), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(0, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
if index < 0 or index > len(self):
raise IndexError('list index out of range.')
snake_case_ = Node(lowerCAmelCase__)
if self.head is None:
snake_case_ = new_node # first node points itself
snake_case_ = snake_case_ = new_node
elif index == 0: # insert at head
snake_case_ = self.head
snake_case_ = snake_case_ = new_node
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = new_node
if index == len(self) - 1: # insert at tail
snake_case_ = new_node
def a_ ( self) -> str:
return self.delete_nth(0)
def a_ ( self) -> Any:
return self.delete_nth(len(self) - 1)
def a_ ( self, lowerCAmelCase__ = 0) -> Any:
if not 0 <= index < len(self):
raise IndexError('list index out of range.')
snake_case_ = self.head
if self.head == self.tail: # just one node
snake_case_ = snake_case_ = None
elif index == 0: # delete head node
snake_case_ = self.tail.next.next
snake_case_ = self.head.next
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = temp.next.next
if index == len(self) - 1: # delete at tail
snake_case_ = temp
return delete_node.data
def a_ ( self) -> bool:
return len(self) == 0
def UpperCAmelCase ( ) -> None:
snake_case_ = CircularLinkedList()
assert len(UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCAmelCase ) == i
circular_linked_list.insert_nth(UpperCAmelCase , i + 1 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : List[str] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> int:
if not isinstance(__snake_case , __snake_case ):
__A : List[Any] = f'Input value of [number={number}] must be an integer'
raise TypeError(__snake_case )
if number < 1:
__A : Union[str, Any] = f'Input value of [number={number}] must be > 0'
raise ValueError(__snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__A : Optional[Any] = int(math.log(number // 3 , 2 ) ) + 2
__A : Union[str, Any] = [3, 5]
__A : List[Any] = 2
__A : Optional[Any] = 3
for block in range(1 , __snake_case ):
for _ in range(__snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ : str = 0
try:
lowercase__ : List[str] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""") | 190 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 4_2
_A = 4_2
class lowerCamelCase__ ( __lowercase , __lowercase):
'''simple docstring'''
_A = 1
@register_to_config
def __init__( self :Any , a :Optional[int] = 2_0_0_0 , a :Union[str, Any] = 0.15 , a :int = 0.01 , a :Any = 1_3_4_8.0 , a :Optional[int] = 1E-5 , a :Optional[Any] = 1 , ) -> Optional[Any]:
# standard deviation of the initial noise distribution
__UpperCamelCase : Any = sigma_max
# setable values
__UpperCamelCase : Tuple = None
self.set_sigmas(a , a , a , a )
def _lowerCamelCase ( self :Optional[int] , a :int , a :Tuple = None ) -> torch.FloatTensor:
return sample
def _lowerCamelCase ( self :Union[str, Any] , a :Any , a :Dict = None , a :List[Any] = None ) -> Optional[Any]:
__UpperCamelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__UpperCamelCase : List[Any] = torch.linspace(1 , a , a , device=a )
def _lowerCamelCase ( self :Tuple , a :int , a :Union[str, Any] = None , a :str = None , a :Union[str, Any] = None ) -> Dict:
__UpperCamelCase : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
__UpperCamelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
__UpperCamelCase : Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(a , a )
__UpperCamelCase : Dict = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__UpperCamelCase : Any = torch.exp(torch.linspace(math.log(a ) , math.log(a ) , a ) )
__UpperCamelCase : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowerCamelCase ( self :Dict , a :Any , a :Any ) -> List[str]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _lowerCamelCase ( self :Optional[int] , a :List[str] , a :int , a :Optional[int] , a :List[Any] = None , a :Any = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
__UpperCamelCase : str = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__UpperCamelCase : Optional[int] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__UpperCamelCase : List[str] = timesteps.to(self.discrete_sigmas.device )
__UpperCamelCase : Union[str, Any] = self.discrete_sigmas[timesteps].to(sample.device )
__UpperCamelCase : Optional[int] = self.get_adjacent_sigma(a , a ).to(sample.device )
__UpperCamelCase : Tuple = torch.zeros_like(a )
__UpperCamelCase : Dict = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__UpperCamelCase : Optional[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__UpperCamelCase : Optional[int] = diffusion.unsqueeze(-1 )
__UpperCamelCase : Union[str, Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__UpperCamelCase : Optional[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=a , device=sample.device , dtype=sample.dtype )
__UpperCamelCase : Any = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__UpperCamelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=a , prev_sample_mean=a )
def _lowerCamelCase ( self :List[Any] , a :List[str] , a :Any , a :Union[str, Any] = None , a :List[str] = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__UpperCamelCase : Any = randn_tensor(sample.shape , layout=sample.layout , generator=a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__UpperCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__UpperCamelCase : str = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__UpperCamelCase : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__UpperCamelCase : int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__UpperCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__UpperCamelCase : List[str] = step_size.unsqueeze(-1 )
__UpperCamelCase : Optional[Any] = sample + step_size * model_output
__UpperCamelCase : str = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self :Union[str, Any] , a :Optional[int] , a :Any , a :Tuple , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__UpperCamelCase : int = timesteps.to(original_samples.device )
__UpperCamelCase : Any = self.discrete_sigmas.to(original_samples.device )[timesteps]
__UpperCamelCase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(a ) * sigmas[:, None, None, None]
)
__UpperCamelCase : Dict = noise + original_samples
return noisy_samples
def __len__( self :Dict ) -> Optional[int]:
return self.config.num_train_timesteps | 232 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__SCREAMING_SNAKE_CASE = (
"Wrong input data's dimensions... "
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowerCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
__SCREAMING_SNAKE_CASE = (
"Wrong input data's shape... "
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowerCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
__SCREAMING_SNAKE_CASE = (
"Input data have different datatype... "
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for value in value_array:
__SCREAMING_SNAKE_CASE = euclidean(lowerCAmelCase_ , dataset[0] )
__SCREAMING_SNAKE_CASE = dataset[0].tolist()
for dataset_value in dataset[1:]:
__SCREAMING_SNAKE_CASE = euclidean(lowerCAmelCase_ , lowerCAmelCase_ )
if dist > temp_dist:
__SCREAMING_SNAKE_CASE = temp_dist
__SCREAMING_SNAKE_CASE = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195 | 0 |
'''simple docstring'''
import math
import unittest
def A_ ( snake_case ):
assert isinstance(snake_case , snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int] ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCamelCase ( self : Optional[int] ):
with self.assertRaises(lowerCamelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) ,"Zero doesn't have any positive factors, primes must have exactly two." ,)
self.assertFalse(
is_prime(1 ) ,"One only has 1 positive factor, primes must have exactly two." ,)
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 139 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = XLMProphetNetTokenizer
__A = False
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "[PAD]"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase_ ) , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Hello World!"
a = [35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 227 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : List[str] ):
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCAmelCase_ ( self : Any ):
torch.manual_seed(0 )
_A = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCAmelCase_ ( self : int ):
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.dummy_uncond_unet
_A = DDIMScheduler()
_A = self.dummy_vq_model
_A = LDMPipeline(unet=_UpperCAmelCase , vqvae=_UpperCAmelCase , scheduler=_UpperCAmelCase )
ldm.to(_UpperCAmelCase )
ldm.set_progress_bar_config(disable=_UpperCAmelCase )
_A = torch.manual_seed(0 )
_A = ldm(generator=_UpperCAmelCase , num_inference_steps=2 , output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = ldm(generator=_UpperCAmelCase , num_inference_steps=2 , output_type='numpy' , return_dict=_UpperCAmelCase )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_A = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
_A = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_UpperCAmelCase )
ldm.set_progress_bar_config(disable=_UpperCAmelCase )
_A = torch.manual_seed(0 )
_A = ldm(generator=_UpperCAmelCase , num_inference_steps=5 , output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
_A = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 271 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCAmelCase : List[Any] = ['''accelerate''', '''launch''']
UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
UpperCAmelCase : Union[str, Any] = '''default_config.yaml'''
UpperCAmelCase : Union[str, Any] = config_folder / config_file
UpperCAmelCase : Union[str, Any] = config_folder / '''_default_config.yaml'''
UpperCAmelCase : List[Any] = Path('''tests/test_configs''' )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCAmelCase_ ( cls : Tuple ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Optional[int] ):
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Any ):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Dict = '''test-tpu'''
UpperCAmelCase : Optional[int] = '''us-central1-a'''
UpperCAmelCase : List[str] = '''ls'''
UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
UpperCAmelCase : Optional[Any] = '''cd /usr/share'''
UpperCAmelCase : Optional[Any] = '''tests/test_samples/test_command_file.sh'''
UpperCAmelCase : str = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCAmelCase_ ( self : Any ):
_A = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Dict ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[str] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[Any] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : int ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
| 271 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.