code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def snake_case ( *__a , **__a ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
__lowerCAmelCase = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
__lowerCAmelCase = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
__lowerCAmelCase = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def snake_case ( self ):
pass
@require_torch
def snake_case ( self ):
__lowerCAmelCase = "hf-internal-testing/tiny-detr-mobilenetsv3"
__lowerCAmelCase = AutoModelForObjectDetection.from_pretrained(__a )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(__a )
__lowerCAmelCase = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
__lowerCAmelCase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
] , )
__lowerCAmelCase = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
] , )
@require_torch
@slow
def snake_case ( self ):
__lowerCAmelCase = "facebook/detr-resnet-50"
__lowerCAmelCase = AutoModelForObjectDetection.from_pretrained(__a )
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(__a )
__lowerCAmelCase = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
__lowerCAmelCase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
__lowerCAmelCase = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def snake_case ( self ):
__lowerCAmelCase = "facebook/detr-resnet-50"
__lowerCAmelCase = pipeline("object-detection" , model=__a )
__lowerCAmelCase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
__lowerCAmelCase = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def snake_case ( self ):
__lowerCAmelCase = 0.9_9_8_5
__lowerCAmelCase = "facebook/detr-resnet-50"
__lowerCAmelCase = pipeline("object-detection" , model=__a )
__lowerCAmelCase = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def snake_case ( self ):
__lowerCAmelCase = "Narsil/layoutlmv3-finetuned-funsd"
__lowerCAmelCase = 0.9_9_9_3
__lowerCAmelCase = pipeline("object-detection" , model=__a , threshold=__a )
__lowerCAmelCase = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
] , )
| 636 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCAmelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) )
self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) )
def snake_case ( self ):
__lowerCAmelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCAmelCase = get_activation("gelu" )
__lowerCAmelCase = get_activation("gelu_10" )
__lowerCAmelCase = torch_builtin(__a )
__lowerCAmelCase = geluaa(__a )
__lowerCAmelCase = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__a ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case ( self ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__a ):
get_activation("bogus" )
with self.assertRaises(__a ):
get_activation(__a )
def snake_case ( self ):
__lowerCAmelCase = get_activation("gelu" )
__lowerCAmelCase = 1
__lowerCAmelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__a ):
__lowerCAmelCase = acta.a
| 636 | 1 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase=1000 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A_ : List[Any] = n - 1
A_ : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A_ : Optional[Any] = 0
while count < prec:
A_ : Optional[int] = random.randint(2 , n - 1 )
A_ : Any = bin_exp_mod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if b != 1:
A_ : Dict = True
for _ in range(_UpperCAmelCase ):
if b == n - 1:
A_ : Optional[int] = False
break
A_ : Any = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : List[str] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 361 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : Optional[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 361 | 1 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""") | 237 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = RobertaTokenizer
__lowerCAmelCase = RobertaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = {"cls_token": "<s>"}
def snake_case_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a_ : str = dict(zip(a_ , range(len(a_ ) ) ) )
a_ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a_ : List[str] = {"unk_token": "<unk>"}
a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def snake_case_ ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , **a_ ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , a_ ):
a_ : str = "lower newer"
a_ : int = "lower newer"
return input_text, output_text
def snake_case_ ( self ):
a_ : List[str] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : Dict = "lower newer"
a_ : str = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a_ : Dict = tokenizer.tokenize(a_ ) # , add_prefix_space=True)
self.assertListEqual(a_ , a_ )
a_ : List[Any] = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def snake_case_ ( self ):
a_ : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=a_ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=a_ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def snake_case_ ( self ):
a_ : Dict = self.tokenizer_class.from_pretrained("roberta-base" )
a_ : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
a_ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
a_ : List[Any] = tokenizer.encode(
"sequence builders" , add_special_tokens=a_ , add_prefix_space=a_ )
a_ : Tuple = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=a_ , add_prefix_space=a_ )
a_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(a_ )
a_ : Tuple = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case_ ( self ):
a_ : List[str] = self.get_tokenizer()
a_ : str = "Encode this sequence."
a_ : str = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
a_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a_ , a_ )
a_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
a_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a_ , a_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a_ : int = tokenizer.encode(a_ , add_special_tokens=a_ )
a_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a_ , a_ )
# Testing spaces after special tokens
a_ : Optional[int] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(a_ , lstrip=a_ , rstrip=a_ )} ) # mask token has a left space
a_ : str = tokenizer.convert_tokens_to_ids(a_ )
a_ : Union[str, Any] = "Encode <mask> sequence"
a_ : Union[str, Any] = "Encode <mask>sequence"
a_ : int = tokenizer.encode(a_ )
a_ : Union[str, Any] = encoded.index(a_ )
a_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a_ , a_ )
a_ : str = tokenizer.encode(a_ )
a_ : List[str] = encoded.index(a_ )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a_ , a_ )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a_ : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
a_ : List[Any] = self.tokenizer_class.from_pretrained(a_ , **a_ )
a_ : List[Any] = "A, <mask> AllenNLP sentence."
a_ : Optional[int] = tokenizer_r.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
a_ : int = tokenizer_p.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a_ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
a_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
a_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def snake_case_ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , a_ )
self.assertEqual(post_processor_state["add_prefix_space"] , a_ )
self.assertEqual(post_processor_state["trim_offsets"] , a_ )
def snake_case_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a_ : List[Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Tuple = F"""{text_of_1_token} {text_of_1_token}"""
a_ : Dict = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
a_ : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Optional[int] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
a_ : Dict = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : str = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
a_ : str = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
a_ : Union[str, Any] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : List[str] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
a_ : str = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
a_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
a_ : Optional[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , ) | 237 | 1 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=1_00 , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.02 , A=3 , ) -> Dict:
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = vocab_size
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ = (image_size // patch_size) ** 2
__magic_name__ = num_patches + 1
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __A ( self , A , A , A ) -> Any:
'''simple docstring'''
__magic_name__ = FlaxBeitModel(config=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = FlaxBeitForMaskedImageModeling(config=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __A ( self , A , A , A ) -> List[str]:
'''simple docstring'''
__magic_name__ = self.type_sequence_label_size
__magic_name__ = FlaxBeitForImageClassification(config=A )
__magic_name__ = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = FlaxBeitForImageClassification(A )
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(A )
def __A ( self ) -> Tuple:
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __A ( self ) -> None:
'''simple docstring'''
__magic_name__ = FlaxBeitModelTester(self )
__magic_name__ = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def __A ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(A )
__magic_name__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __A ( self ) -> Any:
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(A , A )
__magic_name__ = model_class(A )
@jax.jit
def model_jitted(A , **A ):
return model(pixel_values=A , **A )
with self.subTest('''JIT Enabled''' ):
__magic_name__ = model_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__magic_name__ = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __A ( self ) -> str:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __A ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
__magic_name__ = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(A )
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=A , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
__magic_name__ = np.ones((1, 1_96) , dtype=A )
# forward pass
__magic_name__ = model(pixel_values=A , bool_masked_pos=A )
__magic_name__ = outputs.logits
# verify the logits
__magic_name__ = (1, 1_96, 81_92)
self.assertEqual(logits.shape , A )
__magic_name__ = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A , atol=1E-2 ) )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__magic_name__ = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=A , return_tensors='''np''' )
# forward pass
__magic_name__ = model(**A )
__magic_name__ = outputs.logits
# verify the logits
__magic_name__ = (1, 10_00)
self.assertEqual(logits.shape , A )
__magic_name__ = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1E-4 ) )
__magic_name__ = 2_81
self.assertEqual(logits.argmax(-1 ).item() , A )
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
__magic_name__ = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
__magic_name__ = self.default_image_processor
__magic_name__ = prepare_img()
__magic_name__ = image_processor(images=A , return_tensors='''np''' )
# forward pass
__magic_name__ = model(**A )
__magic_name__ = outputs.logits
# verify the logits
__magic_name__ = (1, 2_18_41)
self.assertEqual(logits.shape , A )
__magic_name__ = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1E-4 ) )
__magic_name__ = 23_96
self.assertEqual(logits.argmax(-1 ).item() , A ) | 678 |
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
__magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 678 | 1 |
"""simple docstring"""
lowerCamelCase = 9.80_665
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 82 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar('T')
class snake_case_ ( Generic[T] ):
def __init__( self : str , _snake_case : bool = True )->None:
'''simple docstring'''
__lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
__lowerCAmelCase : List[str] = directed
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : T , _snake_case : T )->GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
__lowerCAmelCase : Optional[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
__lowerCAmelCase : List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__lowerCAmelCase : Any = [destination_vertex]
__lowerCAmelCase : Optional[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
__lowerCAmelCase : Optional[int] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__lowerCAmelCase : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__lowerCAmelCase : Union[str, Any] = [destination_vertex]
__lowerCAmelCase : List[str] = []
return self
def __repr__( self : Union[str, Any] )->str:
'''simple docstring'''
return pformat(self.adj_list ) | 504 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
a: Any = LEDTokenizer
a: Optional[int] = LEDTokenizerFast
a: Tuple = True
def _A ( self: Union[str, Any] ):
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def _A ( self: Union[str, Any] , **__UpperCamelCase: Tuple ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _A ( self: int , **__UpperCamelCase: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _A ( self: Optional[Any] , __UpperCamelCase: Tuple ):
return "lower newer", "lower newer"
@cached_property
def _A ( self: str ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def _A ( self: List[Any] ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def _A ( self: Optional[int] ):
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(__UpperCamelCase , max_length=len(__UpperCamelCase ) , padding=__UpperCamelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@require_torch
def _A ( self: Union[str, Any] ):
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors='''pt''' )
self.assertIn('''input_ids''' , __UpperCamelCase )
self.assertIn('''attention_mask''' , __UpperCamelCase )
self.assertNotIn('''labels''' , __UpperCamelCase )
self.assertNotIn('''decoder_attention_mask''' , __UpperCamelCase )
@require_torch
def _A ( self: Any ):
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=__UpperCamelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def _A ( self: Any ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def _A ( self: int ):
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(__UpperCamelCase , return_tensors='''pt''' )
_a = tokenizer(text_target=__UpperCamelCase , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _A ( self: int ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(__UpperCamelCase , padding=__UpperCamelCase )
_a = [[0] * len(__UpperCamelCase ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(__UpperCamelCase )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , __UpperCamelCase )
def _A ( self: Union[str, Any] ):
pass
def _A ( self: Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_a = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_a = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
_a = tokenizer_p.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__UpperCamelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 346 |
import csv
import tweepy
# Twitter API credentials
lowerCamelCase :Optional[int] = ''
lowerCamelCase :Tuple = ''
lowerCamelCase :Tuple = ''
lowerCamelCase :Optional[Any] = ''
def __snake_case ( _UpperCamelCase ) -> None:
# authorize twitter, initialize tweepy
_a = tweepy.OAuthHandler(_UpperCamelCase , _UpperCamelCase )
auth.set_access_token(_UpperCamelCase , _UpperCamelCase )
_a = tweepy.API(_UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
_a = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_a = api.user_timeline(screen_name=_UpperCamelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# save the id of the oldest tweet less one
_a = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCamelCase ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
_a = api.user_timeline(
screen_name=_UpperCamelCase , count=2_00 , max_id=_UpperCamelCase )
# save most recent tweets
alltweets.extend(_UpperCamelCase )
# update the id of the oldest tweet less one
_a = alltweets[-1].id - 1
print(f"...{len(_UpperCamelCase )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
_a = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" , '''w''' ) as f:
_a = csv.writer(_UpperCamelCase )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(_UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 346 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : Dict , __A : List[str]=3 , __A : List[Any]=32 , __A : int=3 , __A : str=10 , __A : List[Any]=[10, 20, 30, 40] , __A : Union[str, Any]=[1, 1, 2, 1] , __A : Any=True , __A : Optional[int]=True , __A : int="relu" , __A : Dict=3 , __A : Dict=None , ):
__A : Tuple = parent
__A : int = batch_size
__A : List[str] = image_size
__A : Tuple = num_channels
__A : Tuple = embeddings_size
__A : Optional[Any] = hidden_sizes
__A : str = depths
__A : Optional[int] = is_training
__A : Tuple = use_labels
__A : Optional[int] = hidden_act
__A : Tuple = num_labels
__A : Optional[int] = scope
__A : List[Any] = len(__A )
def lowerCAmelCase_ ( self : Dict ):
__A : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Optional[Any] = None
if self.use_labels:
__A : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__A : int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : int ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase_ ( self : str , __A : Optional[Any] , __A : Tuple , __A : List[str] ):
__A : List[Any] = TFRegNetModel(config=__A )
__A : int = model(__A , training=__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase_ ( self : Dict , __A : str , __A : Tuple , __A : List[Any] ):
__A : Optional[Any] = self.num_labels
__A : Optional[int] = TFRegNetForImageClassification(__A )
__A : List[Any] = model(__A , labels=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : str ):
__A : int = self.prepare_config_and_inputs()
__A , __A , __A : List[Any] = config_and_inputs
__A : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
_lowercase : Optional[Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_lowercase : Tuple = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : Optional[int] = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
def lowerCAmelCase_ ( self : Dict ):
__A : Tuple = TFRegNetModelTester(self )
__A : int = ConfigTester(self , config_class=__A , has_text_modality=__A )
def lowerCAmelCase_ ( self : List[str] ):
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowerCAmelCase_ ( self : Dict ):
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def lowerCAmelCase_ ( self : str ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Dict = model_class(__A )
__A : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Optional[int] = [*signature.parameters.keys()]
__A : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def lowerCAmelCase_ ( self : Any ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCAmelCase_ ( self : List[str] ):
def check_hidden_states_output(__A : Optional[int] , __A : str , __A : str ):
__A : Tuple = model_class(__A )
__A : List[str] = model(**self._prepare_for_class(__A , __A ) , training=__A )
__A : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A : int = self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__A , __A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__A : List[Any] = layer_type
__A : Optional[int] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : str = True
check_hidden_states_output(__A , __A , __A )
def lowerCAmelCase_ ( self : Dict ):
__A , __A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__A : int , __A : Any , __A : List[Any] , __A : Any={} ):
__A : List[str] = model(__A , return_dict=__A , **__A )
__A : Tuple = model(__A , return_dict=__A , **__A ).to_tuple()
def recursive_check(__A : Dict , __A : int ):
if isinstance(__A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__A , __A ):
recursive_check(__A , __A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__A , __A ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__A , __A )
for model_class in self.all_model_classes:
__A : Optional[int] = model_class(__A )
__A : Optional[Any] = self._prepare_for_class(__A , __A )
__A : Tuple = self._prepare_for_class(__A , __A )
check_equivalence(__A , __A , __A )
__A : Dict = self._prepare_for_class(__A , __A , return_labels=__A )
__A : List[Any] = self._prepare_for_class(__A , __A , return_labels=__A )
check_equivalence(__A , __A , __A )
__A : int = self._prepare_for_class(__A , __A )
__A : Optional[int] = self._prepare_for_class(__A , __A )
check_equivalence(__A , __A , __A , {"""output_hidden_states""": True} )
__A : int = self._prepare_for_class(__A , __A , return_labels=__A )
__A : List[Any] = self._prepare_for_class(__A , __A , return_labels=__A )
check_equivalence(__A , __A , __A , {"""output_hidden_states""": True} )
def lowerCAmelCase_ ( self : List[str] ):
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def lowerCAmelCase_ ( self : int ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[str] = TFRegNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : str ):
__A : Union[str, Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__A : Tuple = self.default_image_processor
__A : str = prepare_img()
__A : Optional[int] = image_processor(images=__A , return_tensors="""tf""" )
# forward pass
__A : Any = model(**__A , training=__A )
# verify the logits
__A : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
__A : List[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1e-4 )
| 17 |
"""simple docstring"""
import numpy as np
def _UpperCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : float = 1E-1_2 , __lowerCamelCase : int = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(__lowerCamelCase )[0] == np.shape(__lowerCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(__lowerCamelCase )[0] == np.shape(__lowerCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__lowerCamelCase ) == np.iscomplexobj(__lowerCamelCase )
_snake_case = np.iscomplexobj(__lowerCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__lowerCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_snake_case = False
_snake_case = 0
_snake_case = 0
_snake_case = 1E1_2
while not convergence:
# Multiple matrix by the vector.
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
# Normalize the resulting output vector.
_snake_case = w / np.linalg.norm(__lowerCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_snake_case = vector.conj().T if is_complex else vector.T
_snake_case = np.dot(__lowerCamelCase , np.dot(__lowerCamelCase , __lowerCamelCase ) )
# Check convergence.
_snake_case = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_snake_case = True
_snake_case = lambda_
if is_complex:
_snake_case = np.real(lambda_ )
return lambda_, vector
def _UpperCAmelCase ( ) -> None:
_snake_case = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_snake_case = np.array([41, 4, 20] )
_snake_case = real_input_matrix.astype(np.complexaaa )
_snake_case = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_snake_case = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_snake_case = real_input_matrix
_snake_case = real_vector
elif problem_type == "complex":
_snake_case = complex_input_matrix
_snake_case = complex_vector
# Our implementation.
_snake_case , _snake_case = power_iteration(__lowerCamelCase , __lowerCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_snake_case , _snake_case = np.linalg.eigh(__lowerCamelCase )
# Last eigenvalue is the maximum one.
_snake_case = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_snake_case = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__lowerCamelCase ) - np.abs(__lowerCamelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 224 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCAmelCase_ : Any = get_logger(__name__)
UpperCAmelCase_ : Tuple = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowerCAmelCase__ :
'''simple docstring'''
@add_start_docstrings(A_)
def __call__( self : Any , lowercase_ : Tuple , lowercase_ : List[str]):
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class lowerCAmelCase__ :
'''simple docstring'''
@add_start_docstrings(A_)
def __call__( self : Any , lowercase_ : int , lowercase_ : Tuple):
'''simple docstring'''
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(A_)
def __call__( self : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Union[str, Any]):
'''simple docstring'''
for processor in self:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inspect.signature(processor.__call__).parameters
if len(A_) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys())} for '
F'{processor.__class__} are passed to the logits processor.')
SCREAMING_SNAKE_CASE_ : Any = processor(A_ , A_ , A_ , **A_)
else:
SCREAMING_SNAKE_CASE_ : Dict = processor(A_ , A_ , A_)
return scores
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Tuple):
'''simple docstring'''
if not isinstance(A_ , A_) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}')
SCREAMING_SNAKE_CASE_ : int = temperature
def __call__( self : int , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = scores / self.temperature
return scores
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : str = -float('''Inf''') , lowercase_ : Union[str, Any] = 1):
'''simple docstring'''
if not isinstance(A_ , A_) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}')
if not isinstance(A_ , A_) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}')
SCREAMING_SNAKE_CASE_ : Optional[int] = top_p
SCREAMING_SNAKE_CASE_ : int = filter_value
SCREAMING_SNAKE_CASE_ : List[Any] = min_tokens_to_keep
def __call__( self : int , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = lax.top_k(A_ , scores.shape[-1])
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.full_like(A_ , self.filter_value)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.nn.softmax(A_ , axis=-1).cumsum(axis=-1)
SCREAMING_SNAKE_CASE_ : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.roll(A_ , 1)
score_mask |= score_mask.at[:, 0].set(A_)
# min tokens to keep
SCREAMING_SNAKE_CASE_ : List[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(A_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.where(A_ , A_ , A_)
SCREAMING_SNAKE_CASE_ : List[Any] = jax.lax.sort_key_val(A_ , A_)[-1]
return next_scores
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] = -float('''Inf''') , lowercase_ : Tuple = 1):
'''simple docstring'''
if not isinstance(A_ , A_) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}')
SCREAMING_SNAKE_CASE_ : int = max(A_ , A_)
SCREAMING_SNAKE_CASE_ : List[str] = filter_value
def __call__( self : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = scores.shape
SCREAMING_SNAKE_CASE_ : Tuple = jnp.full(batch_size * vocab_size , self.filter_value)
SCREAMING_SNAKE_CASE_ : int = min(self.top_k , scores.shape[-1]) # Safety check
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = lax.top_k(A_ , A_)
SCREAMING_SNAKE_CASE_ : Tuple = jnp.broadcast_to((jnp.arange(A_) * vocab_size)[:, None] , (batch_size, topk)).flatten()
SCREAMING_SNAKE_CASE_ : Any = topk_scores.flatten()
SCREAMING_SNAKE_CASE_ : List[Any] = topk_indices.flatten() + shift
SCREAMING_SNAKE_CASE_ : Optional[Any] = next_scores_flat.at[topk_indices_flat].set(A_)
SCREAMING_SNAKE_CASE_ : Dict = next_scores_flat.reshape(A_ , A_)
return next_scores
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = bos_token_id
def __call__( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.full(scores.shape , -float('''inf'''))
SCREAMING_SNAKE_CASE_ : Optional[int] = 1 - jnp.bool_(cur_len - 1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.where(A_ , new_scores.at[:, self.bos_token_id].set(0) , A_)
return scores
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = max_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token_id
def __call__( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = jnp.full(scores.shape , -float('''inf'''))
SCREAMING_SNAKE_CASE_ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1)
SCREAMING_SNAKE_CASE_ : List[str] = jnp.where(A_ , new_scores.at[:, self.eos_token_id].set(0) , A_)
return scores
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Tuple , lowercase_ : int):
'''simple docstring'''
if not isinstance(A_ , A_) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}')
if not isinstance(A_ , A_) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}')
SCREAMING_SNAKE_CASE_ : str = min_length
SCREAMING_SNAKE_CASE_ : Tuple = eos_token_id
def __call__( self : Optional[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1)
SCREAMING_SNAKE_CASE_ : str = jnp.where(A_ , scores.at[:, self.eos_token_id].set(-float('''inf''')) , A_)
return scores
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = list(A_)
SCREAMING_SNAKE_CASE_ : Optional[int] = begin_index
def __call__( self : List[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = 1 - jnp.bool_(cur_len - self.begin_index)
SCREAMING_SNAKE_CASE_ : List[str] = jnp.where(A_ , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''')) , A_)
return scores
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = list(A_)
def __call__( self : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = scores.at[..., self.suppress_tokens].set(-float('''inf'''))
return scores
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(A_)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
SCREAMING_SNAKE_CASE_ : int = jnp.ones((max(force_token_map.keys()) + 1) , dtype=jnp.intaa) * -1
for index, token in force_token_map.items():
if token is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = force_token_array.at[index].set(A_)
SCREAMING_SNAKE_CASE_ : str = jnp.intaa(A_)
def __call__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : str):
'''simple docstring'''
def _force_token(lowercase_ : Any):
SCREAMING_SNAKE_CASE_ : Dict = scores.shape[0]
SCREAMING_SNAKE_CASE_ : Any = self.force_token_array[generation_idx]
SCREAMING_SNAKE_CASE_ : Dict = jnp.ones_like(A_ , dtype=scores.dtype) * -float('''inf''')
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype)
SCREAMING_SNAKE_CASE_ : List[Any] = lax.dynamic_update_slice(A_ , A_ , (0, current_token))
return new_scores
SCREAMING_SNAKE_CASE_ : str = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(A_) , lambda: scores , ) , )
return scores
class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = generate_config.eos_token_id
SCREAMING_SNAKE_CASE_ : Tuple = generate_config.no_timestamps_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = generate_config.no_timestamps_token_id + 1
SCREAMING_SNAKE_CASE_ : Tuple = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(A_ , '''max_initial_timestamp_index'''):
SCREAMING_SNAKE_CASE_ : int = generate_config.max_initial_timestamp_index
else:
SCREAMING_SNAKE_CASE_ : Tuple = model_config.vocab_size
if self.max_initial_timestamp_index is None:
SCREAMING_SNAKE_CASE_ : List[Any] = model_config.vocab_size
def __call__( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = scores.at[:, self.no_timestamps_token_id].set(-float('''inf'''))
def handle_pairs(lowercase_ : Tuple , lowercase_ : Optional[int]):
SCREAMING_SNAKE_CASE_ : List[str] = jnp.where((cur_len - self.begin_index) >= 1 , A_ , A_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , A_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.where((cur_len - self.begin_index) < 2 , A_ , A_)
SCREAMING_SNAKE_CASE_ : Dict = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , A_ , A_ , )
return jnp.where(
A_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''')) , scores_k.at[: self.eos_token_id].set(-float('''inf''')) , ) , A_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.vmap(A_)(A_ , A_)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.where(cur_len == self.begin_index , A_ , A_)
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , A_ , )
SCREAMING_SNAKE_CASE_ : Any = self.timestamp_begin + self.max_initial_timestamp_index
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.where(
A_ , scores.at[:, last_allowed + 1 :].set(-float('''inf''')) , A_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
SCREAMING_SNAKE_CASE_ : Dict = jax.nn.log_softmax(A_ , axis=-1)
def handle_cumulative_probs(lowercase_ : Optional[int] , lowercase_ : int):
SCREAMING_SNAKE_CASE_ : Any = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1)
SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''')) , A_ , )
SCREAMING_SNAKE_CASE_ : Dict = jax.vmap(A_)(A_ , A_)
return scores
| 703 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(example['''content'''] , truncation=__a )['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[Any] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
UpperCAmelCase_ : Tuple = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase_ : Tuple = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : Tuple = multiprocessing.cpu_count()
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : List[Any] = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 176 | 0 |
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Optional[Any] = len(set_a.intersection(UpperCamelCase__ ) )
if alternative_union:
UpperCamelCase__ : Optional[int] = len(UpperCamelCase__ ) + len(UpperCamelCase__ )
else:
UpperCamelCase__ : int = len(set_a.union(UpperCamelCase__ ) )
return intersection / union
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) ):
UpperCamelCase__ : Optional[Any] = [element for element in set_a if element in set_b]
if alternative_union:
UpperCamelCase__ : Union[str, Any] = len(UpperCamelCase__ ) + len(UpperCamelCase__ )
return len(UpperCamelCase__ ) / union
else:
UpperCamelCase__ : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(UpperCamelCase__ ) / len(UpperCamelCase__ )
return len(UpperCamelCase__ ) / len(UpperCamelCase__ )
return None
if __name__ == "__main__":
lowerCamelCase ={"a", "b", "c", "d", "e"}
lowerCamelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 285 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''distilbert'''
SCREAMING_SNAKE_CASE_ = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=4 * 7_6_8 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.2 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = vocab_size
UpperCamelCase__ : Dict = max_position_embeddings
UpperCamelCase__ : Any = sinusoidal_pos_embds
UpperCamelCase__ : Dict = n_layers
UpperCamelCase__ : List[Any] = n_heads
UpperCamelCase__ : Dict = dim
UpperCamelCase__ : Dict = hidden_dim
UpperCamelCase__ : Optional[int] = dropout
UpperCamelCase__ : Optional[Any] = attention_dropout
UpperCamelCase__ : Tuple = activation
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Optional[int] = qa_dropout
UpperCamelCase__ : str = seq_classif_dropout
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase__ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 285 | 1 |
def _A ( __magic_name__ = 1000 ):
lowercase__ = 2**power
lowercase__ = 0
while n:
lowercase__ , lowercase__ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 611 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1024 ):
lowercase__ , lowercase__ = [], []
lowercase__ = list(zip(__magic_name__ , __magic_name__ ) )
lowercase__ , lowercase__ = sorted_examples[0]
def is_too_big(__magic_name__ ):
return tok(__magic_name__ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase__ = new_src + " " + src
lowercase__ = new_tgt + " " + tgt
if is_too_big(__magic_name__ ) or is_too_big(__magic_name__ ): # cant fit, finalize example
finished_src.append(__magic_name__ )
finished_tgt.append(__magic_name__ )
lowercase__ , lowercase__ = src, tgt
else: # can fit, keep adding
lowercase__ , lowercase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__magic_name__ )
finished_tgt.append(__magic_name__ )
return finished_src, finished_tgt
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = Path(__magic_name__ )
save_path.mkdir(exist_ok=__magic_name__ )
for split in ["train"]:
lowercase__ , lowercase__ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
lowercase__ = [x.rstrip() for x in Path(__magic_name__ ).open().readlines()]
lowercase__ = [x.rstrip() for x in Path(__magic_name__ ).open().readlines()]
lowercase__ , lowercase__ = pack_examples(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
print(f'''packed {split} split from {len(__magic_name__ )} examples -> {len(__magic_name__ )}.''' )
Path(save_path / f'''{split}.source''' ).open("w" ).write("\n".join(__magic_name__ ) )
Path(save_path / f'''{split}.target''' ).open("w" ).write("\n".join(__magic_name__ ) )
for split in ["val", "test"]:
lowercase__ , lowercase__ = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(__magic_name__ , save_path / f'''{split}.source''' )
shutil.copyfile(__magic_name__ , save_path / f'''{split}.target''' )
def _A ( ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__magic_name__ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__magic_name__ , default=128 )
parser.add_argument("--data_dir" , type=__magic_name__ )
parser.add_argument("--save_path" , type=__magic_name__ )
lowercase__ = parser.parse_args()
lowercase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__magic_name__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 611 | 1 |
'''simple docstring'''
UpperCamelCase__: Any = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 127 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['''vqvae''']
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,mel=SCREAMING_SNAKE_CASE__ ,vqvae=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler ,SCREAMING_SNAKE_CASE__ ) else 10_00
@torch.no_grad()
def __call__( self ,SCREAMING_SNAKE_CASE__ = 1 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__=True ,) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = steps or self.get_default_steps()
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__SCREAMING_SNAKE_CASE :Any = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__SCREAMING_SNAKE_CASE :int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=SCREAMING_SNAKE_CASE__ ,device=self.device ,)
__SCREAMING_SNAKE_CASE :int = noise
__SCREAMING_SNAKE_CASE :str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = self.mel.audio_slice_to_image(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = np.frombuffer(input_image.tobytes() ,dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__SCREAMING_SNAKE_CASE :Dict = (input_image / 2_55) * 2 - 1
__SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.vqvae.encode(torch.unsqueeze(SCREAMING_SNAKE_CASE__ ,0 ) ).latent_dist.sample(
generator=SCREAMING_SNAKE_CASE__ )[0]
__SCREAMING_SNAKE_CASE :Dict = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__SCREAMING_SNAKE_CASE :Optional[int] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.scheduler.timesteps[start_step - 1] )
__SCREAMING_SNAKE_CASE :List[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__SCREAMING_SNAKE_CASE :List[Any] = int(mask_start_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE :List[str] = int(mask_end_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE :List[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Dict = self.unet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )['''sample''']
else:
__SCREAMING_SNAKE_CASE :Dict = self.unet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )['''sample''']
if isinstance(self.scheduler ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE__ ,timestep=SCREAMING_SNAKE_CASE__ ,sample=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,)['''prev_sample''']
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.scheduler.step(
model_output=SCREAMING_SNAKE_CASE__ ,timestep=SCREAMING_SNAKE_CASE__ ,sample=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,)['''prev_sample''']
if mask is not None:
if mask_start > 0:
__SCREAMING_SNAKE_CASE :Any = mask[:, step, :, :mask_start]
if mask_end > 0:
__SCREAMING_SNAKE_CASE :int = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__SCREAMING_SNAKE_CASE :Any = 1 / self.vqvae.config.scaling_factor * images
__SCREAMING_SNAKE_CASE :Any = self.vqvae.decode(SCREAMING_SNAKE_CASE__ )['''sample''']
__SCREAMING_SNAKE_CASE :Dict = (images / 2 + 0.5).clamp(0 ,1 )
__SCREAMING_SNAKE_CASE :Union[str, Any] = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
__SCREAMING_SNAKE_CASE :Dict = (images * 2_55).round().astype('''uint8''' )
__SCREAMING_SNAKE_CASE :str = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(SCREAMING_SNAKE_CASE__ ,mode='''RGB''' ).convert('''L''' ) for _ in images) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = [self.mel.image_to_audio(SCREAMING_SNAKE_CASE__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(SCREAMING_SNAKE_CASE__ )[:, np.newaxis, :] ) ,**ImagePipelineOutput(SCREAMING_SNAKE_CASE__ ) )
@torch.no_grad()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler ,SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = np.array(
[np.frombuffer(image.tobytes() ,dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__SCREAMING_SNAKE_CASE :List[Any] = (sample / 2_55) * 2 - 1
__SCREAMING_SNAKE_CASE :Optional[int] = torch.Tensor(SCREAMING_SNAKE_CASE__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
__SCREAMING_SNAKE_CASE :Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.scheduler.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE :Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE :int = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE :str = self.unet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )['''sample''']
__SCREAMING_SNAKE_CASE :Any = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__SCREAMING_SNAKE_CASE :Optional[int] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__SCREAMING_SNAKE_CASE :str = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> torch.Tensor:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = acos(torch.dot(torch.flatten(SCREAMING_SNAKE_CASE__ ) ,torch.flatten(SCREAMING_SNAKE_CASE__ ) ) / torch.norm(SCREAMING_SNAKE_CASE__ ) / torch.norm(SCREAMING_SNAKE_CASE__ ) )
return sin((1 - alpha) * theta ) * xa / sin(SCREAMING_SNAKE_CASE__ ) + sin(alpha * theta ) * xa / sin(SCREAMING_SNAKE_CASE__ ) | 498 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int]):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : int = mock.Mock()
__lowerCamelCase : Dict = 5_0_0
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Any = HTTPError
__lowerCamelCase : List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=SCREAMING_SNAKE_CASE__) as mock_head:
__lowerCamelCase : List[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase ( self : Dict):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : Union[str, Any] = mock.Mock()
__lowerCamelCase : Dict = 5_0_0
__lowerCamelCase : Any = {}
__lowerCamelCase : Dict = HTTPError
__lowerCamelCase : Optional[int] = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Optional[int] = GPTaTokenizerFast.from_pretrained('gpt2')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=SCREAMING_SNAKE_CASE__) as mock_head:
__lowerCamelCase : Dict = GPTaTokenizerFast.from_pretrained('gpt2')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Optional[Any]):
# This test is for deprecated behavior and can be removed in v5
try:
__lowerCamelCase : str = tempfile.mktemp()
with open(SCREAMING_SNAKE_CASE__ ,'wb') as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = AlbertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__)
finally:
os.remove(SCREAMING_SNAKE_CASE__)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json'):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb') as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_0_0_0)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json')
def lowerCAmelCase ( self : str):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : Optional[int] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model')
@is_staging_test
class A_ ( unittest.TestCase ):
_UpperCAmelCase : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowerCAmelCase ( cls : Any):
__lowerCamelCase : List[str] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : int):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer')
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any]):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Optional[int] = BertTokenizer(SCREAMING_SNAKE_CASE__)
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token)
__lowerCamelCase : Any = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ,repo_id='test-tokenizer' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token)
__lowerCamelCase : List[str] = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
def lowerCAmelCase ( self : Dict):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Tuple = BertTokenizer(SCREAMING_SNAKE_CASE__)
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token)
__lowerCamelCase : str = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token)
__lowerCamelCase : Any = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
@require_tokenizers
def lowerCAmelCase ( self : Optional[Any]):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : str = CustomTokenizer(SCREAMING_SNAKE_CASE__)
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token)
__lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" ,trust_remote_code=SCREAMING_SNAKE_CASE__)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Optional[Any] = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__)
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__)
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token)
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" ,trust_remote_code=SCREAMING_SNAKE_CASE__)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast')
__lowerCamelCase : int = AutoTokenizer.from_pretrained(
F"{USER}/test-dynamic-tokenizer" ,use_fast=SCREAMING_SNAKE_CASE__ ,trust_remote_code=SCREAMING_SNAKE_CASE__)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer')
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[int] = Trie()
trie.add('Hello 友達')
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}})
trie.add('Hello')
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}})
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Optional[int] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100') ,['[CLS] This is a extra_id_100'])
trie.add('[CLS]')
trie.add('extra_id_1')
trie.add('extra_id_100')
self.assertEqual(trie.split('[CLS] This is a extra_id_100') ,['[CLS]', ' This is a ', 'extra_id_100'])
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = Trie()
trie.add('A')
self.assertEqual(trie.split('ABC') ,['A', 'BC'])
self.assertEqual(trie.split('BCA') ,['BC', 'A'])
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = Trie()
trie.add('TOKEN]')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') ,['This is something ', '[SPECIAL_TOKEN]'])
def lowerCAmelCase ( self : Any):
__lowerCamelCase : int = Trie()
trie.add('A')
trie.add('P')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') ,['This is something ', '[SPECIAL_TOKEN]'])
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : str = Trie()
trie.add('AB')
trie.add('B')
trie.add('C')
self.assertEqual(trie.split('ABC') ,['AB', 'C'])
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Tuple = Trie()
trie.add('ABC')
trie.add('B')
trie.add('CD')
self.assertEqual(trie.split('ABCD') ,['ABC', 'D'])
def lowerCAmelCase ( self : str):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__lowerCamelCase : Optional[Any] = Trie()
__lowerCamelCase : List[Any] = trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3])
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['AB', 'C'])
| 337 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337 | 1 |
import requests
snake_case_ : int = """YOUR API KEY"""
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] = giphy_api_key ) -> Any:
"""simple docstring"""
lowerCamelCase_ : List[str] = "+".join(query.split() )
lowerCamelCase_ : Optional[int] = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
lowerCamelCase_ : List[str] = requests.get(SCREAMING_SNAKE_CASE__ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 488 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( lowercase__ , lowercase__ = None , lowercase__ = None ) ->Optional[int]:
if start is None:
_snake_case: Union[str, Any] = 0
if end is None:
_snake_case: Optional[int] = len(_lowerCamelCase ) - 1
if start >= end:
return
_snake_case: str = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
_snake_case: Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 717 |
'''simple docstring'''
A : List[str] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 273 | 0 |
'''simple docstring'''
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_a : int = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 447 |
'''simple docstring'''
import functools
def _UpperCAmelCase ( __A : list[int] , __A : list[int] ):
# Validation
if not isinstance(__A , __A ) or not all(isinstance(__A , __A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__A ) != 3 or not all(isinstance(__A , __A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__A ) == 0:
return 0
if min(__A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
a_ : List[Any] = set(__A )
@functools.cache
def dynamic_programming(__A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Optional[Any] = ShapEPipeline
__lowercase :Optional[int] = ["prompt"]
__lowercase :str = ["prompt"]
__lowercase :Tuple = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__lowercase :Tuple = False
@property
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return 8
@property
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ = PriorTransformer(**UpperCamelCase__ )
return model
@property
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ = ShapERenderer(**UpperCamelCase__ )
return model
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.dummy_prior
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = self.dummy_tokenizer
lowerCamelCase_ = self.dummy_renderer
lowerCamelCase_ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase__ , clip_sample=UpperCamelCase__ , clip_sample_range=1.0 , )
lowerCamelCase_ = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Dict:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase_ = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = '''cpu'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
lowerCamelCase_ = output.images[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = torch_device == '''cpu'''
lowerCamelCase_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = self.get_dummy_inputs(UpperCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ = batch_size * [inputs[key]]
lowerCamelCase_ = pipe(**UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
lowerCamelCase_ = ShapEPipeline.from_pretrained('''openai/shap-e''' )
lowerCamelCase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
lowerCamelCase_ = pipe(
'''a shark''' , generator=UpperCamelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ ) | 66 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class lowerCAmelCase :
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = str(id_ )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = []
lowerCamelCase_ = {} # {vertex:distance}
def __lt__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.key < other.key
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return self.id
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = weight
def lowerCamelCase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Dict ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : list , _lowerCamelCase : Vertex ):
lowerCamelCase_ = []
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = graph[:]
while q:
lowerCamelCase_ = min(_lowerCamelCase )
q.remove(_lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
for i in range(1 , len(_lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCamelCase_ ( _lowerCamelCase : list , _lowerCamelCase : Vertex ):
for u in graph:
lowerCamelCase_ = math.inf
lowerCamelCase_ = None
lowerCamelCase_ = 0
lowerCamelCase_ = list(_lowerCamelCase )
hq.heapify(_lowerCamelCase )
while h:
lowerCamelCase_ = hq.heappop(_lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase_ = u
lowerCamelCase_ = u.edges[v.id]
hq.heapify(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCamelCase_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 66 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model'}
UpperCamelCase__ = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
UpperCamelCase__ = {
'google/reformer-crime-and-punishment': 5_2_4_2_8_8,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ['input_ids', 'attention_mask']
def __init__(self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple="</s>" , __UpperCAmelCase : List[Any]="<unk>" , __UpperCAmelCase : Union[str, Any]=[] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) -> None:
"""simple docstring"""
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ (self : Dict ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__(self : Dict , __UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ (self : int , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.piece_to_id(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase__ = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
UpperCAmelCase__ = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowercase_ (self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 486 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__UpperCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 215 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class snake_case :
'''simple docstring'''
A_ : Tuple = PegasusConfig
A_ : Optional[Any] = {}
A_ : Any = "gelu"
def __init__( self : Optional[int], _lowerCamelCase : Union[str, Any], _lowerCamelCase : str=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : int=False, _lowerCamelCase : str=99, _lowerCamelCase : Union[str, Any]=32, _lowerCamelCase : str=2, _lowerCamelCase : List[Any]=4, _lowerCamelCase : Optional[Any]=37, _lowerCamelCase : Union[str, Any]=0.1, _lowerCamelCase : Optional[int]=0.1, _lowerCamelCase : Optional[Any]=40, _lowerCamelCase : List[str]=2, _lowerCamelCase : Dict=1, _lowerCamelCase : Any=0, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = eos_token_id
__A = pad_token_id
__A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
__A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
__A = tf.concat([input_ids, eos_tensor], axis=1 )
__A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__A = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
__A = prepare_pegasus_inputs_dict(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Tuple ):
'''simple docstring'''
__A = TFPegasusModel(config=_lowerCamelCase ).get_decoder()
__A = inputs_dict['''input_ids''']
__A = input_ids[:1, :]
__A = inputs_dict['''attention_mask'''][:1, :]
__A = inputs_dict['''head_mask''']
__A = 1
# first forward pass
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, head_mask=_lowerCamelCase, use_cache=_lowerCamelCase )
__A , __A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 3), config.vocab_size )
__A = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
__A = tf.concat([input_ids, next_tokens], axis=-1 )
__A = tf.concat([attention_mask, next_attn_mask], axis=-1 )
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase )[0]
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
__A = int(ids_tensor((1,), output_from_past.shape[-1] ) )
__A = output_from_no_past[:, -3:, random_slice_idx]
__A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase, _lowerCamelCase, rtol=1e-3 )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ):
"""simple docstring"""
if attention_mask is None:
__A = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
A_ : Optional[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
A_ : Optional[int] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ : Tuple = True
A_ : Union[str, Any] = False
A_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = TFPegasusModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case ( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A_ : str = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
A_ : Union[str, Any] = "google/pegasus-xsum"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _SCREAMING_SNAKE_CASE ( self : str, **_lowerCamelCase : str ):
'''simple docstring'''
__A = self.translate_src_text(**_lowerCamelCase )
assert self.expected_text == generated_words
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], **_lowerCamelCase : Tuple ):
'''simple docstring'''
__A = self.tokenizer(self.src_text, **_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''tf''' )
__A = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_lowerCamelCase, )
__A = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_lowerCamelCase )
return generated_words
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 215 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = len(A )
while cur > 1:
# Find the maximum number in arr
lowercase__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowercase__ = arr[mi::-1] + arr[mi + 1 : len(A )]
# Reverse whole list
lowercase__ = arr[cur - 1 :: -1] + arr[cur : len(A )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : str = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 460 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _SCREAMING_SNAKE_CASE (A , A , A=1E-12 ) -> str:
"""simple docstring"""
lowercase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
lowercase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
return jnp.matmul(A , norm_emb_a.T )
class __lowerCAmelCase (nn.Module ):
'''simple docstring'''
lowerCAmelCase__ : CLIPConfig
lowerCAmelCase__ : jnp.dtype = jnp.floataa
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = FlaxCLIPVisionModule(self.config.vision_config )
lowercase__ = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase , dtype=self.dtype )
lowercase__ = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowercase__ = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowercase__ = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
lowercase__ = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__(self : Union[str, Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = self.vision_model(UpperCamelCase )[1]
lowercase__ = self.visual_projection(UpperCamelCase )
lowercase__ = jax_cosine_distance(UpperCamelCase , self.special_care_embeds )
lowercase__ = jax_cosine_distance(UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowercase__ = 0.0
lowercase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowercase__ = jnp.round(UpperCamelCase , 3 )
lowercase__ = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase )
# Use a lower threshold if an image has any special care concept
lowercase__ = is_special_care * 0.01
lowercase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowercase__ = jnp.round(UpperCamelCase , 3 )
lowercase__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = CLIPConfig
lowerCAmelCase__ : Optional[int] = """clip_input"""
lowerCAmelCase__ : Any = FlaxStableDiffusionSafetyCheckerModule
def __init__(self : Optional[int] , UpperCamelCase : CLIPConfig , UpperCamelCase : Optional[Tuple] = None , UpperCamelCase : int = 0 , UpperCamelCase : jnp.dtype = jnp.floataa , UpperCamelCase : bool = True , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
if input_shape is None:
lowercase__ = (1, 224, 224, 3)
lowercase__ = self.module_class(config=UpperCamelCase , dtype=UpperCamelCase , **UpperCamelCase )
super().__init__(UpperCamelCase , UpperCamelCase , input_shape=UpperCamelCase , seed=UpperCamelCase , dtype=UpperCamelCase , _do_init=_do_init )
def UpperCamelCase__ (self : Any , UpperCamelCase : jax.random.KeyArray , UpperCamelCase : Tuple , UpperCamelCase : FrozenDict = None ):
'''simple docstring'''
lowercase__ = jax.random.normal(UpperCamelCase , UpperCamelCase )
lowercase__ ,lowercase__ = jax.random.split(UpperCamelCase )
lowercase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
lowercase__ = self.module.init(UpperCamelCase , UpperCamelCase )['''params''']
return random_params
def __call__(self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : dict = None , ):
'''simple docstring'''
lowercase__ = jnp.transpose(UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 460 | 1 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class a__ :
"""simple docstring"""
def __init__(self , __lowercase=None , __lowercase=None ):
# Input as list
__lowerCAmelCase = list(poly_a or [0] )[:]
__lowerCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__lowerCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__lowerCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__lowerCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__lowerCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__lowerCAmelCase = self.__multiply()
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(__A ) <= 1:
return dft[0]
#
__lowerCAmelCase = self.c_max_length // 2
while next_ncol > 0:
__lowerCAmelCase = [[] for i in range(__A )]
__lowerCAmelCase = self.root**next_ncol
# First half of next step
__lowerCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__A ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__lowerCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__A ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__lowerCAmelCase = new_dft
__lowerCAmelCase = next_ncol // 2
return dft[0]
def _snake_case (self ):
__lowerCAmelCase = self.__dft('''A''' )
__lowerCAmelCase = self.__dft('''B''' )
__lowerCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__lowerCAmelCase = 2
while next_ncol <= self.c_max_length:
__lowerCAmelCase = [[] for i in range(__A )]
__lowerCAmelCase = self.root ** (next_ncol // 2)
__lowerCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__lowerCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
__lowerCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ):
__lowerCAmelCase = "A = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
__lowerCAmelCase = "B = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
__lowerCAmelCase = "A*B = " + " + ".join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
def __magic_name__( ):
return [
a * b * (1_0_0_0 - a - b)
for a in range(1, 9_9_9)
for b in range(lowerCamelCase, 9_9_9)
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 474 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCamelCase : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str=7 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Optional[int]=18 , lowerCamelCase__ : str=30 , lowerCamelCase__ : List[str]=400 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[Any]=None , ):
a__ : Dict = size if size is not None else {"height": 20, "width": 20}
a__ : Optional[int] = parent
a__ : Tuple = batch_size
a__ : int = num_channels
a__ : Any = image_size
a__ : Tuple = min_resolution
a__ : Tuple = max_resolution
a__ : Union[str, Any] = size
a__ : str = do_normalize
a__ : List[Any] = do_convert_rgb
a__ : str = [512, 1_024, 2_048, 4_096]
a__ : str = patch_size if patch_size is not None else {"height": 16, "width": 16}
def _UpperCamelCase( self : Dict ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _UpperCamelCase( self : int ):
a__ : Union[str, Any] = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
a__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = PixaStructImageProcessor if is_vision_available() else None
def _UpperCamelCase( self : Dict ):
a__ : Optional[int] = PixaStructImageProcessingTester(self )
@property
def _UpperCamelCase( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase( self : int ):
a__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "do_convert_rgb" ) )
def _UpperCamelCase( self : List[Any] ):
a__ : List[str] = self.image_processor_tester.prepare_dummy_image()
a__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
a__ : Union[str, Any] = 2_048
a__ : int = image_processor(lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def _UpperCamelCase( self : List[Any] ):
# Initialize image_processor
a__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
a__ : Optional[int] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a__ : Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a__ : Tuple = image_processor(
lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _UpperCamelCase( self : Any ):
# Initialize image_processor
a__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
a__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
a__ : int = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCamelCase__ ):
a__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches
a__ : List[Any] = "Hello"
a__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ , header_text=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a__ : str = image_processor(
lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ , header_text=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _UpperCamelCase( self : Optional[Any] ):
# Initialize image_processor
a__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
a__ : Any = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a__ : Optional[int] = image_processor(
lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _UpperCamelCase( self : Tuple ):
# Initialize image_processor
a__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
a__ : Optional[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a__ : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a__ : List[str] = image_processor(
lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = PixaStructImageProcessor if is_vision_available() else None
def _UpperCamelCase( self : Union[str, Any] ):
a__ : List[str] = PixaStructImageProcessingTester(self , num_channels=4 )
a__ : List[str] = 3
@property
def _UpperCamelCase( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase( self : Optional[Any] ):
a__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "do_convert_rgb" ) )
def _UpperCamelCase( self : str ):
# Initialize image_processor
a__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
a__ : Optional[Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a__ : Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a__ : str = image_processor(
lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 37 |
import numpy as np
import datasets
_UpperCamelCase = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_UpperCamelCase = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_UpperCamelCase = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = np.array(UpperCAmelCase )
__snake_case : Tuple = np.array(UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__snake_case : int = X - np.mean(UpperCAmelCase )
__snake_case : List[str] = np.cov(reference_distribution.T )
try:
__snake_case : List[Any] = np.linalg.inv(UpperCAmelCase )
except np.linalg.LinAlgError:
__snake_case : Dict = np.linalg.pinv(UpperCAmelCase )
__snake_case : int = np.dot(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = np.dot(UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 243 | 0 |
from __future__ import annotations
import math
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> float:
"""simple docstring"""
A : str = u
for i in range(1 , _lowerCAmelCase ):
A : str = temp * (u - i)
return temp
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
A : Dict = int(input("""enter the numbers of values: """ ) )
A : list[list[float]] = []
for _ in range(_lowerCAmelCase ):
y.append([] )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
y[i].append(_lowerCAmelCase )
A : Optional[Any] = 0
print("""enter the values of parameters in a list: """ )
A : Tuple = list(map(_lowerCAmelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(_lowerCAmelCase ):
A : Optional[int] = float(input() )
A : List[Any] = int(input("""enter the value to interpolate: """ ) )
A : Union[str, Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCAmelCase ):
for j in range(n - i ):
A : Optional[Any] = y[j + 1][i - 1] - y[j][i - 1]
A : Optional[Any] = y[0][0]
for i in range(1 , _lowerCAmelCase ):
summ += (ucal(_lowerCAmelCase , _lowerCAmelCase ) * y[0][i]) / math.factorial(_lowerCAmelCase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 520 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = old_name
if "patch_embed" in old_name:
A , A , A : Optional[Any] = old_name.split(""".""" )
if layer == "0":
A : Optional[int] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A : List[str] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A : Optional[int] = old_name.replace("""3""" , """convolution2""" )
else:
A : List[Any] = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(R"""\d\.\d""" , _lowerCAmelCase ):
A : Union[str, Any] = R"""\b\d{2}\b"""
if bool(re.search(_lowerCAmelCase , _lowerCAmelCase ) ):
A : int = re.search(R"""\d\.\d\d.""" , _lowerCAmelCase ).group()
else:
A : int = re.search(R"""\d\.\d.""" , _lowerCAmelCase ).group()
if int(match[0] ) < 6:
A : Any = old_name.replace(_lowerCAmelCase , """""" )
A : Dict = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A : int = """intermediate_stages.""" + trimmed_name
else:
A : Tuple = old_name.replace(_lowerCAmelCase , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A : Optional[Any] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A : int = str(int(match[2] ) - num_meta4D_last_stage )
A : Optional[Any] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A : Optional[Any] = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A : Dict = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A : Optional[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
A : Dict = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R""".\d.""" , _lowerCAmelCase ):
A : Union[str, Any] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A : Any = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A : Union[str, Any] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A : Tuple = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A : int = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A : Dict = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A : int = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A : Any = new_name.replace("""norm""" , """layernorm""" )
A : int = """efficientformer.""" + new_name
else:
A : int = """efficientformer.encoder.""" + new_name
return new_name
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
for key in checkpoint.copy().keys():
A : Union[str, Any] = checkpoint.pop(_lowerCAmelCase )
A : Optional[Any] = val
return checkpoint
def __UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : int = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : int = torch.load(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : Tuple = EfficientFormerConfig.from_json_file(_lowerCAmelCase )
A : Optional[int] = EfficientFormerForImageClassificationWithTeacher(_lowerCAmelCase )
A : int = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A : Optional[int] = config.depths[-1] - config.num_metaad_blocks + 1
A : Dict = convert_torch_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
A : Dict = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A : str = prepare_img()
A : int = 256
A : Any = 224
A : List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A : int = processor(images=_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A : Dict = Compose(
[
Resize(_lowerCAmelCase , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_lowerCAmelCase ),
ToTensor(),
Normalize(_lowerCAmelCase , _lowerCAmelCase ),
] )
A : Any = image_transforms(_lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
A : Any = model(_lowerCAmelCase )
A : Any = outputs.logits
A : Any = (1, 1000)
if "l1" in model_name:
A : Tuple = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A : List[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A : str = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCAmelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE_:Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 520 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(UpperCAmelCase__ ,0 ,UpperCAmelCase__ ,args=(UpperCAmelCase__) )[0]
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ,UpperCAmelCase__ ):
"""simple docstring"""
return math.pow(UpperCAmelCase__ ,z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 605 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = HfArgumentParser(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
_SCREAMING_SNAKE_CASE = TensorFlowBenchmark(args=UpperCAmelCase__ )
try:
_SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_SCREAMING_SNAKE_CASE = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
_SCREAMING_SNAKE_CASE = ' '.join(str(UpperCAmelCase__ ).split(' ' )[:-1] )
_SCREAMING_SNAKE_CASE = ''
_SCREAMING_SNAKE_CASE = eval(str(UpperCAmelCase__ ).split(' ' )[-1] )
_SCREAMING_SNAKE_CASE = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
_SCREAMING_SNAKE_CASE = full_error_msg + begin_error_msg + str(UpperCAmelCase__ )
raise ValueError(UpperCAmelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 605 | 1 |
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
_UpperCamelCase : Dict ="1"
_UpperCamelCase : Any ="0"
_UpperCamelCase : int ="1"
_UpperCamelCase : Optional[int] =ort.SessionOptions()
_UpperCamelCase : List[str] =ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
_UpperCamelCase : List[str] =["TensorrtExecutionProvider", "CUDAExecutionProvider"]
_UpperCamelCase : List[str] =ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
_UpperCamelCase : int =ort.RunOptions()
_UpperCamelCase : int =1_28
_UpperCamelCase : Optional[Any] =1
_UpperCamelCase : Optional[int] =np.ones((batch, sequence), dtype=np.intaa)
_UpperCamelCase : Dict =np.ones((batch, sequence), dtype=np.intaa)
_UpperCamelCase : Union[str, Any] =np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
_UpperCamelCase : Union[str, Any] =time.time()
_UpperCamelCase : Optional[int] =20_00
_UpperCamelCase : List[str] ={}
for iter in range(max_iters):
_UpperCamelCase : Dict =sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 10_00 / max_iters))
| 575 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_UpperCamelCase : Any =parse(importlib.metadata.version("torch"))
def lowerCamelCase_ ( A_ , A_ , A_ ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
__lowerCamelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(A_ , A_ ):
__lowerCamelCase = parse(importlib.metadata.version(A_ ) )
return operation(A_ , parse(A_ ) )
def lowerCamelCase_ ( A_ , A_ ):
return compare_versions(A_ , A_ , A_ )
| 575 | 1 |
def lowerCamelCase ( ) -> int:
return 1
def lowerCamelCase ( a_ ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( a_ ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(a_ )
def lowerCamelCase ( a_ ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(a_ )
def lowerCamelCase ( a_ ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(a_ )
def lowerCamelCase ( a_ ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(a_ )
def lowerCamelCase ( a_ ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(a_ )
def lowerCamelCase ( a_ ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(a_ )
def lowerCamelCase ( a_ = 200 ) -> int:
return two_pound(a_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 318 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCamelCase ( a_=None ) -> List[str]:
if subparsers is not None:
lowerCAmelCase_ = subparsers.add_parser('test' )
else:
lowerCAmelCase_ = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=a_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCAmelCase_ = script_name
else:
lowerCAmelCase_ = F'''--config_file={args.config_file} {script_name}'''
lowerCAmelCase_ = ['accelerate-launch'] + test_args.split()
lowerCAmelCase_ = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def lowerCamelCase ( ) -> Optional[Any]:
lowerCAmelCase_ = test_command_parser()
lowerCAmelCase_ = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 318 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "deit"
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=768 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_072 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : str=1e-1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=224 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=16 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> List[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = encoder_stride
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = version.parse("1.11" )
@property
def a ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a ( self : List[Any] ) -> float:
return 1e-4
| 718 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=-1 ) -> str:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase__ = label_idx
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
lowerCAmelCase__ = []
lowerCAmelCase__ = []
else:
lowerCAmelCase__ = line.split(" " )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
return examples
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> Dict:
lowerCAmelCase__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Dict ) -> List[str]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
lowerCAmelCase__ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = mode.value
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
return examples
def a ( self : int , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> int:
lowerCAmelCase__ = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = preds_list[example_id]
lowerCAmelCase__ = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
example_id += 1
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 125 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __UpperCAmelCase ( lowercase__ ):
A__ : Optional[int] = '''swinv2'''
A__ : Union[str, Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCamelCase=224 , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=96 , _lowerCamelCase=[2, 2, 6, 2] , _lowerCamelCase=[3, 6, 12, 24] , _lowerCamelCase=7 , _lowerCamelCase=4.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=False , _lowerCamelCase=0.0_2 , _lowerCamelCase=1E-5 , _lowerCamelCase=32 , **_lowerCamelCase , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ =image_size
lowerCamelCase__ =patch_size
lowerCamelCase__ =num_channels
lowerCamelCase__ =embed_dim
lowerCamelCase__ =depths
lowerCamelCase__ =len(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ =num_heads
lowerCamelCase__ =window_size
lowerCamelCase__ =mlp_ratio
lowerCamelCase__ =qkv_bias
lowerCamelCase__ =hidden_dropout_prob
lowerCamelCase__ =attention_probs_dropout_prob
lowerCamelCase__ =drop_path_rate
lowerCamelCase__ =hidden_act
lowerCamelCase__ =use_absolute_embeddings
lowerCamelCase__ =layer_norm_eps
lowerCamelCase__ =initializer_range
lowerCamelCase__ =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase__ =int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE__ ) - 1) )
lowerCamelCase__ =(0, 0, 0, 0)
| 530 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCAmelCase_ : Tuple = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = '''maskformer'''
snake_case__ : List[str] = {'''hidden_size''': '''mask_feature_size'''}
snake_case__ : Tuple = ['''resnet''', '''swin''']
snake_case__ : Any = ['''detr''']
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : float = 20.0 , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Union[str, Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
a_ : Union[str, Any] = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Tuple = backbone_config.pop('model_type' )
a_ : Dict = CONFIG_MAPPING[backbone_model_type]
a_ : int = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
a_ : List[str] = DetrConfig()
else:
# verify that the decoder is supported
a_ : Dict = (
decoder_config.pop('model_type' ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Dict = CONFIG_MAPPING[decoder_type]
a_ : str = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
a_ : int = backbone_config
a_ : Tuple = decoder_config
# main feature dimension for the model
a_ : str = fpn_feature_size
a_ : List[str] = mask_feature_size
# initializer
a_ : Optional[int] = init_std
a_ : Any = init_xavier_std
# Hungarian matcher && loss
a_ : List[str] = cross_entropy_weight
a_ : Optional[int] = dice_weight
a_ : int = mask_weight
a_ : str = use_auxiliary_loss
a_ : str = no_object_weight
a_ : int = output_auxiliary_logits
a_ : str = self.decoder_config.encoder_attention_heads
a_ : Union[str, Any] = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , SCREAMING_SNAKE_CASE__ : PretrainedConfig , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , decoder_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict[str, any]:
a_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
a_ : Dict = self.backbone_config.to_dict()
a_ : Optional[Any] = self.decoder_config.to_dict()
a_ : List[Any] = self.__class__.model_type
return output
| 570 | 0 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : str = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_a)})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''})
lowerCamelCase__ = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase__ = field(
default=1_28 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
lowerCamelCase__ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
lowerCamelCase__ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
lowerCamelCase__ = field(
default=_a , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''})
lowerCamelCase__ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''})
lowerCamelCase__ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''})
lowerCamelCase__ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
lowerCamelCase__ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''})
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = '''train'''
lowerCamelCase__ = '''dev'''
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = Split.train , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "pt" , )-> Optional[Any]:
__A = args
__A = is_language_sensitive
__A = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
__A = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__A = mode
# Load data features from cache or dataset file
__A = '''v2''' if args.version_2_with_negative else '''v1'''
__A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__A = cached_features_file + '''.lock'''
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not args.overwrite_cache:
__A = time.time()
__A = torch.load(UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__A = self.old_features['''features''']
__A = self.old_features.get('''dataset''' , UpperCAmelCase )
__A = self.old_features.get('''examples''' , UpperCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
''' future run''' )
else:
if mode == Split.dev:
__A = self.processor.get_dev_examples(args.data_dir )
else:
__A = self.processor.get_train_examples(args.data_dir )
__A , __A = squad_convert_examples_to_features(
examples=self.examples , tokenizer=UpperCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCAmelCase , )
__A = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , UpperCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self )-> Union[str, Any]:
return len(self.features )
def __getitem__( self , UpperCAmelCase )-> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
__A = self.features[i]
__A = torch.tensor(feature.input_ids , dtype=torch.long )
__A = torch.tensor(feature.attention_mask , dtype=torch.long )
__A = torch.tensor(feature.token_type_ids , dtype=torch.long )
__A = torch.tensor(feature.cls_index , dtype=torch.long )
__A = torch.tensor(feature.p_mask , dtype=torch.float )
__A = torch.tensor(feature.is_impossible , dtype=torch.float )
__A = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__A = torch.tensor(feature.start_position , dtype=torch.long )
__A = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 341 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase ( snake_case ) -> Dict:
'''simple docstring'''
__A = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCamelCase ( snake_case , snake_case ) -> List[str]:
'''simple docstring'''
__A = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
__A = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
'''simple docstring'''
__A = '''imagenet-1k-id2label.json'''
__A = 1_0_0_0
__A = '''huggingface/label-files'''
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(snake_case , snake_case , repo_type='''dataset''' ) ) , '''r''' ) )
__A = {int(snake_case ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(snake_case )
__A = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__A = image_size
__A = torch.load(snake_case , map_location=torch.device('''cpu''' ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(snake_case )
__A = list_of_state_dict + embeddings(snake_case )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(snake_case , snake_case )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case )
for i in range(len(snake_case ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case )
model.save_pretrained(snake_case )
image_processor.save_pretrained(snake_case )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_8_4,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCamelCase : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 341 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class a__( A__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 4_2
class a__( A__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=__lowerCAmelCase , image_encoder=__lowerCAmelCase , image_processor=__lowerCAmelCase , scheduler=__lowerCAmelCase , renderer=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if latents is None:
lowerCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
lowerCAmelCase = latents.to(__lowerCAmelCase)
lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def a_ ( self , __lowerCAmelCase=0):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
lowerCAmelCase = torch.device(f"cuda:{gpu_id}")
lowerCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase)
@property
def a_ ( self):
"""simple docstring"""
if self.device != torch.device("""meta""") or not hasattr(self.image_encoder , """_hf_hook"""):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__lowerCAmelCase , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase) and isinstance(image[0] , torch.Tensor):
lowerCAmelCase = torch.cat(__lowerCAmelCase , axis=0) if image[0].ndim == 4 else torch.stack(__lowerCAmelCase , axis=0)
if not isinstance(__lowerCAmelCase , torch.Tensor):
lowerCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors="""pt""").pixel_values[0].unsqueeze(0)
lowerCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__lowerCAmelCase)
lowerCAmelCase = self.image_encoder(__lowerCAmelCase)["""last_hidden_state"""]
lowerCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCAmelCase = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0)
if do_classifier_free_guidance:
lowerCAmelCase = torch.zeros_like(__lowerCAmelCase)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = 25 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 4.0 , __lowerCAmelCase = 64 , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , PIL.Image.Image):
lowerCAmelCase = 1
elif isinstance(__lowerCAmelCase , torch.Tensor):
lowerCAmelCase = image.shape[0]
elif isinstance(__lowerCAmelCase , __lowerCAmelCase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
lowerCAmelCase = len(__lowerCAmelCase)
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowerCAmelCase)}")
lowerCAmelCase = self._execution_device
lowerCAmelCase = batch_size * num_images_per_prompt
lowerCAmelCase = guidance_scale > 1.0
lowerCAmelCase = self._encode_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
# prior
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase)
lowerCAmelCase = self.scheduler.timesteps
lowerCAmelCase = self.prior.config.num_embeddings
lowerCAmelCase = self.prior.config.embedding_dim
lowerCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCAmelCase = latents.reshape(latents.shape[0] , __lowerCAmelCase , __lowerCAmelCase)
for i, t in enumerate(self.progress_bar(__lowerCAmelCase)):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowerCAmelCase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.prior(
__lowerCAmelCase , timestep=__lowerCAmelCase , proj_embedding=__lowerCAmelCase , ).predicted_image_embedding
# remove the variance
lowerCAmelCase , lowerCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCAmelCase , lowerCAmelCase = noise_pred.chunk(2)
lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCAmelCase = self.scheduler.step(
__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__lowerCAmelCase)
lowerCAmelCase = []
for i, latent in enumerate(__lowerCAmelCase):
print()
lowerCAmelCase = self.renderer.decode(
latent[None, :] , __lowerCAmelCase , size=__lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__lowerCAmelCase)
lowerCAmelCase = torch.stack(__lowerCAmelCase)
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}")
lowerCAmelCase = images.cpu().numpy()
if output_type == "pil":
lowerCAmelCase = [self.numpy_to_pil(__lowerCAmelCase) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__lowerCAmelCase)
| 370 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : Dict=13, lowerCamelCase : Optional[int]=7, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : List[Any]=99, lowerCamelCase : Any=32, lowerCamelCase : List[str]=5, lowerCamelCase : Union[str, Any]=4, lowerCamelCase : Union[str, Any]=37, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Tuple=0.1, lowerCamelCase : List[str]=0.1, lowerCamelCase : Optional[Any]=512, lowerCamelCase : int=16, lowerCamelCase : str=2, lowerCamelCase : int=0.02, lowerCamelCase : int=4, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = True
lowercase__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=lowerCamelCase )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=lowerCamelCase )
lowercase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]], dtype=jnp.intaa )
lowercase__ = model(lowerCamelCase )[0]
lowercase__ = [1, 11, 50_265]
self.assertEqual(list(output.shape ), lowerCamelCase )
# compare the actual values for a slice.
lowercase__ = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4 ) )
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=lowerCamelCase )
lowercase__ = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]], dtype=jnp.intaa )
lowercase__ = model(lowerCamelCase )[0]
# compare the actual values for a slice.
lowercase__ = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase, atol=1E-4 ) )
| 183 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( __UpperCAmelCase ):
a : Optional[int] = ["""image_processor"""]
a : Tuple = """SamImageProcessor"""
def __init__( self , UpperCamelCase ):
super().__init__(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.image_processor
_SCREAMING_SNAKE_CASE = -10
_SCREAMING_SNAKE_CASE = self.image_processor.size["longest_edge"]
def __call__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase = None , **UpperCamelCase , ):
_SCREAMING_SNAKE_CASE = self.image_processor(
UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , )
# pop arguments that are not used in the foward but used nevertheless
_SCREAMING_SNAKE_CASE = encoding_image_processor["original_sizes"]
if hasattr(UpperCamelCase , "numpy" ): # Checks if Torch or TF tensor
_SCREAMING_SNAKE_CASE = original_sizes.numpy()
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._check_and_preprocess_points(
input_points=UpperCamelCase , input_labels=UpperCamelCase , input_boxes=UpperCamelCase , )
_SCREAMING_SNAKE_CASE = self._normalize_and_convert(
UpperCamelCase , UpperCamelCase , input_points=UpperCamelCase , input_labels=UpperCamelCase , input_boxes=UpperCamelCase , return_tensors=UpperCamelCase , )
return encoding_image_processor
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="pt" , ):
if input_points is not None:
if len(UpperCamelCase ) != len(UpperCamelCase ):
_SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , UpperCamelCase , original_sizes[0] ) for point in input_points
]
else:
_SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , UpperCamelCase , UpperCamelCase )
for point, original_size in zip(UpperCamelCase , UpperCamelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self._pad_points_and_labels(UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = np.array(UpperCamelCase )
if input_labels is not None:
_SCREAMING_SNAKE_CASE = np.array(UpperCamelCase )
if input_boxes is not None:
if len(UpperCamelCase ) != len(UpperCamelCase ):
_SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , UpperCamelCase , original_sizes[0] , is_bounding_box=UpperCamelCase )
for box in input_boxes
]
else:
_SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , UpperCamelCase , UpperCamelCase , is_bounding_box=UpperCamelCase )
for box, original_size in zip(UpperCamelCase , UpperCamelCase )
]
_SCREAMING_SNAKE_CASE = np.array(UpperCamelCase )
if input_boxes is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCamelCase )
# boxes batch size of 1 by default
_SCREAMING_SNAKE_CASE = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(UpperCamelCase )
# boxes batch size of 1 by default
_SCREAMING_SNAKE_CASE = tf.expand_dims(UpperCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCamelCase )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(UpperCamelCase )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE = tf.expand_dims(UpperCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCamelCase )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(UpperCamelCase )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE = tf.expand_dims(UpperCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def lowercase ( self , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = max([point.shape[0] for point in input_points] )
_SCREAMING_SNAKE_CASE = []
for i, point in enumerate(UpperCamelCase ):
if point.shape[0] != expected_nb_points:
_SCREAMING_SNAKE_CASE = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_SCREAMING_SNAKE_CASE = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(UpperCamelCase )
_SCREAMING_SNAKE_CASE = processed_input_points
return input_points, input_labels
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = original_size
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor._get_preprocess_shape(UpperCamelCase , longest_edge=UpperCamelCase )
_SCREAMING_SNAKE_CASE = deepcopy(UpperCamelCase ).astype(UpperCamelCase )
if is_bounding_box:
_SCREAMING_SNAKE_CASE = coords.reshape(-1 , 2 , 2 )
_SCREAMING_SNAKE_CASE = coords[..., 0] * (new_w / old_w)
_SCREAMING_SNAKE_CASE = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_SCREAMING_SNAKE_CASE = coords.reshape(-1 , 4 )
return coords
def lowercase ( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if input_points is not None:
if hasattr(UpperCamelCase , "numpy" ): # Checks for TF or Torch tensor
_SCREAMING_SNAKE_CASE = input_points.numpy().tolist()
if not isinstance(UpperCamelCase , UpperCamelCase ) or not isinstance(input_points[0] , UpperCamelCase ):
raise ValueError("Input points must be a list of list of floating points." )
_SCREAMING_SNAKE_CASE = [np.array(UpperCamelCase ) for input_point in input_points]
else:
_SCREAMING_SNAKE_CASE = None
if input_labels is not None:
if hasattr(UpperCamelCase , "numpy" ):
_SCREAMING_SNAKE_CASE = input_labels.numpy().tolist()
if not isinstance(UpperCamelCase , UpperCamelCase ) or not isinstance(input_labels[0] , UpperCamelCase ):
raise ValueError("Input labels must be a list of list integers." )
_SCREAMING_SNAKE_CASE = [np.array(UpperCamelCase ) for label in input_labels]
else:
_SCREAMING_SNAKE_CASE = None
if input_boxes is not None:
if hasattr(UpperCamelCase , "numpy" ):
_SCREAMING_SNAKE_CASE = input_boxes.numpy().tolist()
if (
not isinstance(UpperCamelCase , UpperCamelCase )
or not isinstance(input_boxes[0] , UpperCamelCase )
or not isinstance(input_boxes[0][0] , UpperCamelCase )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_SCREAMING_SNAKE_CASE = [np.array(UpperCamelCase ).astype(np.floataa ) for box in input_boxes]
else:
_SCREAMING_SNAKE_CASE = None
return input_points, input_labels, input_boxes
@property
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCamelCase ) )
def lowercase ( self , *UpperCamelCase , **UpperCamelCase ):
return self.image_processor.post_process_masks(*UpperCamelCase , **UpperCamelCase ) | 493 |
'''simple docstring'''
def _a ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ):
# Check if the input is valid
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = equationa
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = equationa
# Calculate the determinants of the matrices
_SCREAMING_SNAKE_CASE = aa * ba - aa * ba
_SCREAMING_SNAKE_CASE = ca * ba - ca * ba
_SCREAMING_SNAKE_CASE = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_SCREAMING_SNAKE_CASE = determinant_x / determinant
_SCREAMING_SNAKE_CASE = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y) | 493 | 1 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_snake_case = logging.get_logger(__name__)
_snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_snake_case = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
_snake_case = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class lowerCAmelCase ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
__lowerCamelCase = CodeGenTokenizer
def __init__( self :int , _lowercase :str=None , _lowercase :Optional[int]=None , _lowercase :List[Any]=None , _lowercase :List[str]="<|endoftext|>" , _lowercase :str="<|endoftext|>" , _lowercase :List[Any]="<|endoftext|>" , _lowercase :List[str]=False , **_lowercase :Tuple , ):
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
if kwargs.pop("add_bos_token" , a__ ):
lowercase__ = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
f'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
lowercase__ = getattr(a__ , pre_tok_state.pop("type" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**a__ )
lowercase__ = add_prefix_space
def UpperCAmelCase ( self :Dict , *_lowercase :Any , **_lowercase :Any ):
'''simple docstring'''
lowercase__ = kwargs.get("is_split_into_words" , a__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a__ , **a__ )
def UpperCAmelCase ( self :Dict , *_lowercase :List[Any] , **_lowercase :Any ):
'''simple docstring'''
lowercase__ = kwargs.get("is_split_into_words" , a__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a__ , **a__ )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[Any] , _lowercase :Tuple = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def UpperCAmelCase ( self :Any , _lowercase :Dict , _lowercase :Optional[int] = False , _lowercase :Tuple = None , _lowercase :Union[str, Any] = None , **_lowercase :Optional[int] , ):
'''simple docstring'''
lowercase__ = super().decode(
token_ids=a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ , **a__ , )
if truncate_before_pattern is not None and len(a__ ) > 0:
lowercase__ = self.truncate(a__ , a__ )
return decoded_text
def UpperCAmelCase ( self :Any , _lowercase :Dict , _lowercase :Optional[int] ):
'''simple docstring'''
def find_re(_lowercase :Optional[int] , _lowercase :Dict , _lowercase :Optional[Any] ):
lowercase__ = pattern.search(a__ , a__ )
return m.start() if m else -1
lowercase__ = [re.compile(a__ , re.MULTILINE ) for pattern in truncate_before_pattern]
lowercase__ = list(re.finditer("^print" , a__ , re.MULTILINE ) )
if len(a__ ) > 1:
lowercase__ = completion[: prints[1].start()]
lowercase__ = list(re.finditer("^def" , a__ , re.MULTILINE ) )
if len(a__ ) > 1:
lowercase__ = completion[: defs[1].start()]
lowercase__ = 0
lowercase__ = [
pos for pos in [find_re(a__ , a__ , a__ ) for terminal in terminals] if pos != -1
]
if len(a__ ) > 0:
return completion[: min(a__ )]
else:
return completion
| 655 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCAmelCase = """sshleifer/bart-tiny-random"""
_lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return AutoConfig.from_pretrained(a__ )
def _lowerCamelCase ( self ):
A_ , *A_ : Optional[Any] = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowerCamelCase ( self ):
A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=a__ )
def _lowerCamelCase ( self ):
A_ , *A_ : int = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=a__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowerCamelCase ( self ):
A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowerCamelCase ( self ):
with self.assertRaises(a__ ):
create_student_by_copying_alternating_layers(a__ , tempfile.mkdtemp() , e=a__ , d=a__ )
| 569 | 0 |
"""simple docstring"""
def _A ( _a : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
A = limit + 1
A = [0] * limit
for first_term in range(1 , _a ):
for n in range(_a , _a , _a ):
A = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
A = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 255 |
"""simple docstring"""
def _A ( _a : float , _a : float , _a : float , _a : float , _a : float , ):
"""simple docstring"""
A = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
A = 1 - (matter_density + radiation_density + dark_energy)
A = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
A = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
UpperCAmelCase =0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 255 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Tuple = 'imagegpt'
__lowerCAmelCase : Tuple = ['past_key_values']
__lowerCAmelCase : str = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE_=5_12 + 1 , SCREAMING_SNAKE_CASE_=32 * 32 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Any = vocab_size
lowercase__ : Optional[Any] = n_positions
lowercase__ : List[str] = n_embd
lowercase__ : Tuple = n_layer
lowercase__ : Optional[Any] = n_head
lowercase__ : List[str] = n_inner
lowercase__ : Union[str, Any] = activation_function
lowercase__ : Union[str, Any] = resid_pdrop
lowercase__ : Optional[int] = embd_pdrop
lowercase__ : int = attn_pdrop
lowercase__ : str = layer_norm_epsilon
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Any = scale_attn_weights
lowercase__ : str = use_cache
lowercase__ : str = scale_attn_by_inverse_layer_idx
lowercase__ : Optional[int] = reorder_and_upcast_attn
lowercase__ : Union[str, Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
])
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ):
'''simple docstring'''
lowercase__ : List[Any] = self._generate_dummy_images(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = dict(preprocessor(images=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_))
return inputs
| 12 | import numpy as np
lowercase__ : Tuple = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class lowercase_ :
"""simple docstring"""
def __init__( self ) ->None:
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase , lowerCAmelCase = np.where(letter == self.SQUARE )
lowerCAmelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = message.lower()
lowerCAmelCase = message.replace(''' ''' , '''''' )
lowerCAmelCase = message.replace('''j''' , '''i''' )
lowerCAmelCase = np.empty((2, len(__SCREAMING_SNAKE_CASE )) )
for letter_index in range(len(__SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase = numbers[0]
lowerCAmelCase = numbers[1]
lowerCAmelCase = first_step.reshape(2 * len(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = ''''''
for numbers_index in range(len(__SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = int(second_step[numbers_index * 2] )
lowerCAmelCase = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase = self.numbers_to_letter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = encoded_message + letter
return encoded_message
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = message.lower()
message.replace(''' ''' , '''''' )
lowerCAmelCase = np.empty(2 * len(__SCREAMING_SNAKE_CASE ) )
for letter_index in range(len(__SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase = numbers[0]
lowerCAmelCase = numbers[1]
lowerCAmelCase = first_step.reshape((2, len(__SCREAMING_SNAKE_CASE )) )
lowerCAmelCase = ''''''
for numbers_index in range(len(__SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = int(second_step[0, numbers_index] )
lowerCAmelCase = int(second_step[1, numbers_index] )
lowerCAmelCase = self.numbers_to_letter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = decoded_message + letter
return decoded_message
| 312 | 0 |
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE__ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Any = find_backend(' if not is_torch_available():' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__a : Optional[Any] = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__a : Union[str, Any] = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , 'torch_and_transformers_and_onnx' )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , SCREAMING_SNAKE_CASE__ )
self.assertIn('torch_and_transformers' , SCREAMING_SNAKE_CASE__ )
self.assertIn('flax_and_transformers' , SCREAMING_SNAKE_CASE__ )
self.assertIn('torch_and_transformers_and_onnx' , SCREAMING_SNAKE_CASE__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Optional[Any] = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , '\nCONSTANT = None\n' )
__a : Tuple = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__a : Tuple = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
__a : Union[str, Any] = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Optional[int] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
__a : int = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , SCREAMING_SNAKE_CASE__ )
| 577 |
def UpperCAmelCase__ ( lowerCamelCase_ : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577 | 1 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : int ) -> None:
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 246 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
@slow
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
lowerCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCamelCase = bertabert.config.encoder.vocab_size
lowerCamelCase = tokenizer.sep_token_id
lowerCamelCase = tokenizer.cls_token_id
lowerCamelCase = 128
lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
lowerCamelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
lowerCamelCase = train_dataset.select(range(32 ) )
lowerCamelCase = val_dataset.select(range(16 ) )
lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__snake_case : List[str] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase = tokenizer(batch['article'] , padding='max_length' , truncation=__snake_case , max_length=512 )
lowerCamelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=__snake_case , max_length=128 )
lowerCamelCase = inputs.input_ids
lowerCamelCase = inputs.attention_mask
lowerCamelCase = outputs.input_ids
lowerCamelCase = outputs.input_ids.copy()
lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
lowerCamelCase = outputs.attention_mask
assert all(len(__snake_case ) == 512 for x in inputs.input_ids )
assert all(len(__snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__snake_case : int ):
lowerCamelCase = pred.label_ids
lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
lowerCamelCase = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
lowerCamelCase = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__snake_case ) )] ) / len(__snake_case )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__snake_case , batch_size=__snake_case , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
lowerCamelCase = self.get_auto_remove_tmp_dir()
lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__snake_case , per_device_train_batch_size=__snake_case , per_device_eval_batch_size=__snake_case , predict_with_generate=__snake_case , evaluation_strategy='steps' , do_train=__snake_case , do_eval=__snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase = SeqaSeqTrainer(
model=__snake_case , args=__snake_case , compute_metrics=_compute_metrics , train_dataset=__snake_case , eval_dataset=__snake_case , tokenizer=__snake_case , )
# start training
trainer.train()
| 246 | 1 |
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowercase__ ( lowerCAmelCase__ : int ) -> bool:
'''simple docstring'''
a__ : Optional[int] = 0
a__ : List[str] = number
while duplicate > 0:
a__ , a__ : Optional[Any] = divmod(lowerCAmelCase__ , 1_0 )
fact_sum += factorial(lowerCAmelCase__ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
__UpperCAmelCase = int(input('''Enter number: ''').strip())
print(
f"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
) | 251 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__UpperCAmelCase = {
'''facebook/m2m100_418M''': 1024,
}
# fmt: off
__UpperCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ["input_ids", "attention_mask"]
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__( self : Any , a_ : Any , a_ : int , a_ : int=None , a_ : Union[str, Any]=None , a_ : Optional[Any]="<s>" , a_ : Tuple="</s>" , a_ : int="</s>" , a_ : Optional[int]="<pad>" , a_ : List[Any]="<unk>" , a_ : Tuple="m2m100" , a_ : Optional[Dict[str, Any]] = None , a_ : Optional[Any]=8 , **a_ : Union[str, Any] , ) -> None:
'''simple docstring'''
a__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
a__ : List[str] = language_codes
a__ : int = FAIRSEQ_LANGUAGE_CODES[language_codes]
a__ : Tuple = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
a__ : Optional[Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a_ )
for lang_code in fairseq_language_code
if self.get_lang_token(a_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a_ , tgt_lang=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , unk_token=a_ , pad_token=a_ , language_codes=a_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a_ , **a_ , )
a__ : List[str] = vocab_file
a__ : Optional[int] = load_json(a_ )
a__ : List[Any] = {v: k for k, v in self.encoder.items()}
a__ : List[Any] = spm_file
a__ : Any = load_spm(a_ , self.sp_model_kwargs )
a__ : Tuple = len(self.encoder )
a__ : Any = {
self.get_lang_token(a_ ): self.encoder_size + i for i, lang_code in enumerate(a_ )
}
a__ : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a_ )}
a__ : Any = {v: k for k, v in self.lang_token_to_id.items()}
a__ : Union[str, Any] = src_lang if src_lang is not None else "en"
a__ : Union[str, Any] = tgt_lang
a__ : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
a__ : Optional[int] = num_madeup_words
@property
def UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def UpperCAmelCase ( self : List[Any] , a_ : str ) -> None:
'''simple docstring'''
a__ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase ( self : Tuple , a_ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a_ , out_type=a_ )
def UpperCAmelCase ( self : List[Any] , a_ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a_ , self.encoder[self.unk_token] )
def UpperCAmelCase ( self : str , a_ : int ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a_ , self.unk_token )
def UpperCAmelCase ( self : Dict , a_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = []
a__ : Optional[int] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
a__ : List[str] = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
a__ : Any = [1] * len(self.prefix_tokens )
a__ : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
a__ : int = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
'''simple docstring'''
a__ : Tuple = self.__dict__.copy()
a__ : Optional[int] = None
return state
def __setstate__( self : List[str] , a_ : Dict ) -> None:
'''simple docstring'''
a__ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : List[Any] = {}
a__ : Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase ( self : List[Any] , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Dict = Path(a_ )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
a__ : Union[str, Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
a__ : Tuple = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(a_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a_ )
elif not os.path.isfile(self.spm_file ):
with open(a_ , "wb" ) as fi:
a__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (str(a_ ), str(a_ ))
def UpperCAmelCase ( self : Any , a_ : List[str] , a_ : str = "en" , a_ : Optional[List[str]] = None , a_ : str = "ro" , **a_ : Dict , ) -> BatchEncoding:
'''simple docstring'''
a__ : str = src_lang
a__ : Any = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def UpperCAmelCase ( self : Optional[Any] , a_ : Dict , a_ : Optional[str] , a_ : Optional[str] , **a_ : Tuple ) -> str:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__ : List[Any] = src_lang
a__ : Optional[int] = self(a_ , add_special_tokens=a_ , **a_ )
a__ : Any = self.get_lang_id(a_ )
a__ : int = tgt_lang_id
return inputs
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase ( self : Union[str, Any] , a_ : str ) -> None:
'''simple docstring'''
a__ : Optional[int] = self.get_lang_token(a_ )
a__ : Tuple = self.lang_token_to_id[lang_token]
a__ : List[str] = [self.cur_lang_id]
a__ : Optional[int] = [self.eos_token_id]
def UpperCAmelCase ( self : List[str] , a_ : str ) -> None:
'''simple docstring'''
a__ : Optional[int] = self.get_lang_token(a_ )
a__ : int = self.lang_token_to_id[lang_token]
a__ : Tuple = [self.cur_lang_id]
a__ : Optional[int] = [self.eos_token_id]
def UpperCAmelCase ( self : Any , a_ : str ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def UpperCAmelCase ( self : List[str] , a_ : str ) -> int:
'''simple docstring'''
a__ : List[str] = self.get_lang_token(a_ )
return self.lang_token_to_id[lang_token]
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
a__ : Any = sentencepiece.SentencePieceProcessor(**lowerCAmelCase__ )
spm.Load(str(lowerCAmelCase__ ) )
return spm
def lowercase__ ( lowerCAmelCase__ : str ) -> Union[Dict, List]:
'''simple docstring'''
with open(lowerCAmelCase__ , "r" ) as f:
return json.load(lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
with open(lowerCAmelCase__ , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=2 ) | 251 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
lowerCamelCase_ : bool = None
lowerCamelCase_ : bool = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
lowerCamelCase_ : Dict = datasets.Audio()
lowerCamelCase_ : Tuple = '''audio'''
lowerCamelCase_ : Tuple = AudioFolderConfig
lowerCamelCase_ : List[str] # definition at the bottom of the script
lowerCamelCase_ : Any = AudioClassification(audio_column='''audio''', label_column='''label''' )
lowerCAmelCase_ = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase_ = AUDIO_EXTENSIONS
| 60 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__snake_case : List[Any] = get_logger(__name__)
class lowerCamelCase :
'''simple docstring'''
__snake_case = 'dummy_data'
__snake_case = 'datasets'
__snake_case = False
def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[Version, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[List[Callable]] = None , ) -> str:
'''simple docstring'''
A__ : List[str] =0
A__ : int =dataset_name
A__ : Optional[int] =cache_dir
A__ : Optional[int] =use_local_dummy_data
A__ : Optional[Any] =config
# download_callbacks take a single url as input
A__ : List[Callable] =download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
A__ : Any =load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
A__ : List[str] =str(lowerCAmelCase_ )
# to be downloaded
A__ : Union[str, Any] =None
A__ : List[str] =None
@property
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if self._dummy_file is None:
A__ : Tuple =self.download_dummy_data()
return self._dummy_file
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Tuple =(
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
A__ : List[str] =cached_path(
lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ )
return os.path.join(lowerCAmelCase_ , self.dummy_file_name )
@property
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if self._bucket_url is None:
A__ : Any =hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
A__ : int =self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
A__ : Optional[int] =self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : str , *lowerCAmelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.download_and_extract(lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.download_and_extract(lowerCAmelCase_ )
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
return path
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return {}
def lowercase__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
A__ : Union[str, Any] ={}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for single_url in single_urls:
download_callback(lowerCAmelCase_ )
else:
A__ : str =single_urls
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : Dict =[os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls]
else:
A__ : List[str] =single_urls
A__ : Optional[int] =os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) )
A__ : Tuple =value
# make sure that values are unique
if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
A__ : int ={key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =[]
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
A__ : str =all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , lowerCAmelCase_ ) ) for url in data_url )
A__ : List[Any] =all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
A__ : List[str] =[data_url[0]] * len(lowerCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
A__ : Optional[int] =os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(lowerCAmelCase_ )
return dummy_data_list
def lowercase__ ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
A__ : List[Any] =os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : Any , lowerCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
def _iter_archive_members(lowerCAmelCase_ : int ):
# this preserves the order of the members inside the ZIP archive
A__ : Dict =Path(self.dummy_file ).parent
A__ : List[str] =path.relative_to(lowerCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
A__ : int =zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCAmelCase_ )
A__ : Dict =Path(lowerCAmelCase_ )
A__ : Union[str, Any] =_iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("""rb""" )
def lowercase__ ( self : int , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ : str =[paths]
for path in paths:
if os.path.isfile(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(lowerCAmelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
| 215 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Tuple = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ["""DeiTFeatureExtractor"""]
UpperCAmelCase_ : str = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716 |
"""simple docstring"""
from __future__ import annotations
def _A (__a , __a , __a ) -> int | float:
"""simple docstring"""
if len(__a ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(__a )
or left < -len(__a )
or right >= len(__a )
or right < -len(__a )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE_ : str = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE_ : int = find_max(__a , __a , __a ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE_ : str = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 176 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=2 , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Union[str, Any]=32 * 8 , _lowerCAmelCase : Dict=32 * 8 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : List[str]=64 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_size
SCREAMING_SNAKE_CASE_ = max_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = hidden_dim
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE_ = self.num_queries
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE_ = self.num_channels
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
return config
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_layers )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = MaskaFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowerCAmelCase_ ( self : int ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowerCAmelCase_ ( self : int ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : List[Any] ):
pass
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE_ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ ( self : Tuple ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : List[Any] = 1E-4
def UpperCAmelCase_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Any ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None ) | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : Any = 'pt'
elif is_tf_available():
__A : List[str] = 'tf'
else:
__A : Union[str, Any] = 'jax'
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:str = PerceiverTokenizer
SCREAMING_SNAKE_CASE:Optional[int] = False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
a__ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase__ ( self , **_a ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def lowercase__ ( self , _a , _a=False , _a=20 , _a=5 ):
"""simple docstring"""
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
a__ = []
for i in range(len(_a ) ):
try:
a__ = tokenizer.decode([i] , clean_up_tokenization_spaces=_a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
a__ = list(filter(lambda _a : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _a ) )
a__ = list(filter(lambda _a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_a ) , _a ) )
if max_length is not None and len(_a ) > max_length:
a__ = toks[:max_length]
if min_length is not None and len(_a ) < min_length and len(_a ) > 0:
while len(_a ) < min_length:
a__ = toks + toks
# toks_str = [t[1] for t in toks]
a__ = [t[0] for t in toks]
# Ensure consistency
a__ = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
if " " not in output_txt and len(_a ) > 1:
a__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_a )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_a )
)
if with_prefix_space:
a__ = ' ' + output_txt
a__ = tokenizer.encode(_a , add_special_tokens=_a )
return output_txt, output_ids
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = 'Unicode €.'
a__ = tokenizer(_a )
a__ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _a )
# decoding
a__ = tokenizer.decode(_a )
self.assertEqual(_a , '[CLS]Unicode €.[SEP]' )
a__ = tokenizer('e è é ê ë' )
a__ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _a )
# decoding
a__ = tokenizer.decode(_a )
self.assertEqual(_a , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
a__ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
a__ = tokenizer(_a , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
if FRAMEWORK != "jax":
a__ = list(batch.input_ids.numpy()[0] )
else:
a__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_a , _a )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a__ = tokenizer(_a , padding=_a , return_tensors=_a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _a )
self.assertIn('attention_mask' , _a )
self.assertNotIn('decoder_input_ids' , _a )
self.assertNotIn('decoder_attention_mask' , _a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
a__ = [
'Summary of the text.',
'Another summary.',
]
a__ = tokenizer(
text_target=_a , max_length=32 , padding='max_length' , truncation=_a , return_tensors=_a )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase__ ( self ):
"""simple docstring"""
# safety check on max_len default value so we are sure the test works
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ' He is very happy, UNwant\u00E9d,running'
a__ = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
a__ = tokenizer.__class__.from_pretrained(_a )
a__ = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
shutil.rmtree(_a )
a__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a__ = tempfile.mkdtemp()
a__ = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
a__ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
a__ = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
a__ = tokenizer.__class__.from_pretrained(_a )
a__ = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a__ = tokenizer.__class__.from_pretrained(_a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_a )
def lowercase__ ( self ):
"""simple docstring"""
a__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_a )
with open(os.path.join(_a , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
a__ = json.load(_a )
with open(os.path.join(_a , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
a__ = json.load(_a )
a__ = [F'''<extra_id_{i}>''' for i in range(125 )]
a__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
a__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_a , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_a , _a )
with open(os.path.join(_a , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_a , _a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a__ = tokenizer_class.from_pretrained(
_a , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a__ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_a )]
a__ = tokenizer_class.from_pretrained(
_a , additional_special_tokens=_a , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
a__ = self.get_tokenizers(fast=_a , do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a__ = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
a__ = tokenizer.convert_tokens_to_string(_a )
self.assertIsInstance(_a , _a )
| 126 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , _a , _a , _a = None , _a = None ):
"""simple docstring"""
super().__init__()
a__ = pad_token_id
a__ = max_length
a__ = vocab
a__ = merges
a__ = BytePairTokenizer(_a , _a , sequence_length=_a )
@classmethod
def lowercase__ ( cls , _a , *_a , **_a ):
"""simple docstring"""
a__ = [' '.join(_a ) for m in tokenizer.bpe_ranks.keys()]
a__ = tokenizer.get_vocab()
return cls(_a , _a , *_a , **_a )
@classmethod
def lowercase__ ( cls , _a , *_a , **_a ):
"""simple docstring"""
a__ = GPTaTokenizer.from_pretrained(_a , *_a , **_a )
return cls.from_tokenizer(_a , *_a , **_a )
@classmethod
def lowercase__ ( cls , _a ):
"""simple docstring"""
return cls(**_a )
def lowercase__ ( self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self , _a , _a = None ):
"""simple docstring"""
a__ = self.tf_tokenizer(_a )
a__ = tf.ones_like(_a )
if self.pad_token_id is not None:
# pad the tokens up to max length
a__ = max_length if max_length is not None else self.max_length
if max_length is not None:
a__ , a__ = pad_model_inputs(
_a , max_seq_length=_a , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 126 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=1000 ) -> str:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_a = n - 1
_a = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_a = 0
while count < prec:
_a = random.randint(2 , n - 1 )
_a = bin_exp_mod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if b != 1:
_a = True
for _ in range(_UpperCAmelCase ):
if b == n - 1:
_a = False
break
_a = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowercase_ = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 562 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = "naver-clova-ix/donut-base-finetuned-docvqa"
_A = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_A = "document_qa"
_A = AutoProcessor
_A = VisionEncoderDecoderModel
_A = ["image", "text"]
_A = ["text"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : "Image" , SCREAMING_SNAKE_CASE_ : str ):
_a = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_a = task_prompt.replace('{user_input}' , SCREAMING_SNAKE_CASE_ )
_a = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_ids
_a = self.pre_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , ).sequences
def _UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
_a = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE_ )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
_a = re.sub(R'<.*?>' , '' , SCREAMING_SNAKE_CASE_ , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE_ )
return sequence["answer"]
| 562 | 1 |
'''simple docstring'''
from math import pi, sqrt, tan
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values')
return 6 * side_length**2
def UpperCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values')
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCAmelCase ( UpperCAmelCase__ : List[Any]):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values')
return 4 * pi * radius**2
def UpperCAmelCase ( UpperCAmelCase__ : Tuple):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values')
return 3 * pi * radius**2
def UpperCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values')
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values')
lowerCamelCase : Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any]):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values')
return 2 * pi * radius * (height + radius)
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values')
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori')
return 4 * pow(_lowerCAmelCase , 2) * torus_radius * tube_radius
def UpperCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values')
return length * width
def UpperCAmelCase ( UpperCAmelCase__ : Tuple):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values')
return side_length**2
def UpperCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values')
return (base * height) / 2
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values')
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle')
lowerCamelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCamelCase : List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea))
return area
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : str):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values')
return base * height
def UpperCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values')
return 1 / 2 * (basea + basea) * height
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any]):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values')
return pi * radius**2
def UpperCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values')
return pi * radius_x * radius_y
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values')
return 1 / 2 * diagonal_a * diagonal_a
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides')
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \\nlength of a side')
return (sides * length**2) / (4 * tan(pi / sides))
return (sides * length**2) / (4 * tan(pi / sides))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 701 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a__):
_lowerCAmelCase = (DPMSolverSinglestepScheduler,)
_lowerCAmelCase = (('''num_inference_steps''', 25),)
def UpperCAmelCase_ ( self, **A ):
"""simple docstring"""
lowerCamelCase : List[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**A )
return config
def UpperCAmelCase_ ( self, A=0, **A ):
"""simple docstring"""
lowerCamelCase : List[str] = dict(self.forward_default_kwargs )
lowerCamelCase : Optional[Any] = kwargs.pop('num_inference_steps', A )
lowerCamelCase : Union[str, Any] = self.dummy_sample
lowerCamelCase : Dict = 0.1 * sample
lowerCamelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Optional[Any] = self.get_scheduler_config(**A )
lowerCamelCase : Dict = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals
lowerCamelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
lowerCamelCase : List[Any] = scheduler_class.from_pretrained(A )
new_scheduler.set_timesteps(A )
# copy over dummy past residuals
lowerCamelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase , lowerCamelCase : Optional[int] = sample, sample
for t in range(A, time_step + scheduler.config.solver_order + 1 ):
lowerCamelCase : Dict = scheduler.step(A, A, A, **A ).prev_sample
lowerCamelCase : Optional[int] = new_scheduler.step(A, A, A, **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( self, A=0, **A ):
"""simple docstring"""
lowerCamelCase : List[str] = dict(self.forward_default_kwargs )
lowerCamelCase : str = kwargs.pop('num_inference_steps', A )
lowerCamelCase : Union[str, Any] = self.dummy_sample
lowerCamelCase : List[str] = 0.1 * sample
lowerCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCamelCase : Tuple = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals (must be after setting timesteps)
lowerCamelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
lowerCamelCase : Tuple = scheduler_class.from_pretrained(A )
# copy over dummy past residuals
new_scheduler.set_timesteps(A )
# copy over dummy past residual (must be after setting timesteps)
lowerCamelCase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCamelCase : int = scheduler.step(A, A, A, **A ).prev_sample
lowerCamelCase : Dict = new_scheduler.step(A, A, A, **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self, A=None, **A ):
"""simple docstring"""
if scheduler is None:
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Optional[Any] = self.get_scheduler_config(**A )
lowerCamelCase : Optional[int] = scheduler_class(**A )
lowerCamelCase : List[Any] = self.scheduler_classes[0]
lowerCamelCase : Optional[Any] = self.get_scheduler_config(**A )
lowerCamelCase : Optional[int] = scheduler_class(**A )
lowerCamelCase : Any = 10
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : Any = self.dummy_sample_deter
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = model(A, A )
lowerCamelCase : List[str] = scheduler.step(A, A, A ).prev_sample
return sample
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase : Dict = 50
lowerCamelCase : Tuple = self.dummy_model()
lowerCamelCase : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(A )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCamelCase : Any = model(A, A )
lowerCamelCase : Optional[int] = scheduler.step(A, A, A ).prev_sample
lowerCamelCase : Any = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCamelCase : str = self.full_loop(scheduler=A )
lowerCamelCase : Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
lowerCamelCase : Dict = DEISMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCamelCase : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCamelCase : str = self.full_loop(scheduler=A )
lowerCamelCase : Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=A )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A, prediction_type=A, sample_max_value=A, algorithm_type='dpmsolver++', solver_order=A, solver_type=A, )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A, solver_type=A, prediction_type=A, algorithm_type=A, )
lowerCamelCase : Optional[Any] = self.full_loop(
solver_order=A, solver_type=A, prediction_type=A, algorithm_type=A, )
assert not torch.isnan(A ).any(), "Samples have nan numbers"
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=A )
self.check_over_configs(lower_order_final=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(variance_type=A )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=A, time_step=0 )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.full_loop()
lowerCamelCase : str = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.full_loop(use_karras_sigmas=A )
lowerCamelCase : Tuple = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.full_loop(prediction_type='v_prediction' )
lowerCamelCase : Dict = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.full_loop(prediction_type='v_prediction', use_karras_sigmas=A )
lowerCamelCase : Optional[Any] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config(thresholding=A, dynamic_thresholding_ratio=0 )
lowerCamelCase : str = scheduler_class(**A )
lowerCamelCase : List[Any] = 10
lowerCamelCase : List[str] = self.dummy_model()
lowerCamelCase : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : str = model(A, A )
lowerCamelCase : Tuple = scheduler.step(A, A, A ).prev_sample
assert sample.dtype == torch.floataa
| 449 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''owlvit_text_model'''
def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : List[Any] = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit'''
lowerCamelCase_ : Optional[int] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
if text_config is None:
snake_case_ : Tuple = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
snake_case_ : str = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
snake_case_ : str = OwlViTTextConfig(**__magic_name__ )
snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ )
snake_case_ : Any = projection_dim
snake_case_ : Union[str, Any] = logit_scale_init_value
snake_case_ : str = return_dict
snake_case_ : Any = 1.0
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = {}
snake_case_ : Union[str, Any] = text_config
snake_case_ : Optional[Any] = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : List[Any] = self.text_config.to_dict()
snake_case_ : List[Any] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
snake_case_ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 14
| 60 | 0 |
"""simple docstring"""
import math
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( _snake_case : float = 0.1 ):
"""simple docstring"""
UpperCamelCase__ = 3
UpperCamelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 702 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers | 304 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
snake_case = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
snake_case = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
snake_case = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
snake_case = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _A ( self : Any , UpperCAmelCase_ : Dict ):
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _A ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any]=0.9 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=0.5 ):
if NLTK_VERSION >= version.Version("3.6.5" ):
SCREAMING_SNAKE_CASE : Dict = [
meteor_score.single_meteor_score(
word_tokenize(UpperCAmelCase_ ) , word_tokenize(UpperCAmelCase_ ) , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , gamma=UpperCAmelCase_ )
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
else:
SCREAMING_SNAKE_CASE : List[Any] = [
meteor_score.single_meteor_score(UpperCAmelCase_ , UpperCAmelCase_ , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , gamma=UpperCAmelCase_ )
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
return {"meteor": np.mean(UpperCAmelCase_ )}
| 62 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[str] = KandinskyVaaPipeline
UpperCamelCase_ : List[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
]
UpperCamelCase_ : Tuple = ['''image_embeds''', '''negative_image_embeds''']
UpperCamelCase_ : Any = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ : List[str] = False
@property
def _A ( self : List[Any] ):
return 32
@property
def _A ( self : List[Any] ):
return 32
@property
def _A ( self : Any ):
return self.time_input_dim
@property
def _A ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def _A ( self : Tuple ):
return 100
@property
def _A ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def _A ( self : int ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _A ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.dummy_unet
SCREAMING_SNAKE_CASE : str = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _A ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]=0 ):
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = "cpu"
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Tuple = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array(
[0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
SCREAMING_SNAKE_CASE : int = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Tuple = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = "red cat, 4k photo"
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device="cuda" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device="cuda" ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type="np" , )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 62 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( _lowercase ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCamelCase_ : List[Any] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(_lowercase )
lowerCamelCase_ : Dict = [True] * (num + 1)
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Optional[Any] = 2
lowerCamelCase_ : Dict = int(math.sqrt(_lowercase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowercase )
# Set multiples of start be False
for i in range(start * start , num + 1 , _lowercase ):
if sieve[i] is True:
lowerCamelCase_ : Optional[Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_lowercase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 713 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowercase :
def __init__(self , A , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : Dict = 1_3
lowerCamelCase_ : Any = 7
lowerCamelCase_ : Dict = 3_0
lowerCamelCase_ : Optional[Any] = self.seq_length + self.mem_len
lowerCamelCase_ : Tuple = 1_5
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Dict = 9_9
lowerCamelCase_ : Any = [1_0, 5_0, 8_0]
lowerCamelCase_ : List[str] = 3_2
lowerCamelCase_ : Tuple = 3_2
lowerCamelCase_ : Optional[Any] = 4
lowerCamelCase_ : Union[str, Any] = 8
lowerCamelCase_ : Dict = 1_2_8
lowerCamelCase_ : Dict = 2
lowerCamelCase_ : Tuple = 2
lowerCamelCase_ : Any = None
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = self.vocab_size - 1
lowerCamelCase_ : Dict = 0.01
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : str = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCAmelCase__ (self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : int = TFTransfoXLModel(A )
lowerCamelCase_, lowerCamelCase_ : Tuple = model(A ).to_tuple()
lowerCamelCase_ : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowerCamelCase_, lowerCamelCase_ : Any = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : List[Any] = TFTransfoXLLMHeadModel(A )
lowerCamelCase_, lowerCamelCase_ : List[Any] = model(A ).to_tuple()
lowerCamelCase_ : Optional[int] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = model(A ).to_tuple()
lowerCamelCase_, lowerCamelCase_ : Tuple = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase_ : List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowerCamelCase_, lowerCamelCase_ : str = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : Dict = TFTransfoXLForSequenceClassification(A )
lowerCamelCase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.prepare_config_and_inputs()
((lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_)) : List[Any] = config_and_inputs
lowerCamelCase_ : Tuple = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase : Tuple = () if is_tf_available() else ()
lowerCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Dict = False
lowerCamelCase : Optional[int] = False
def UpperCAmelCase__ (self , A , A , A , A , A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = TFTransfoXLModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=A , d_embed=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
self.model_tester.set_seed()
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def UpperCAmelCase__ (self ):
self.model_tester.set_seed()
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class(A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase_ : str = model.get_output_embeddings()
assert isinstance(A , tf.keras.layers.Layer )
lowerCamelCase_ : int = model.get_bias()
assert name is None
else:
lowerCamelCase_ : Tuple = model.get_output_embeddings()
assert x is None
lowerCamelCase_ : int = model.get_bias()
assert name is None
def UpperCAmelCase__ (self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCAmelCase__ (self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Optional[Any] = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCAmelCase__ (self ):
pass
@require_tf
class __lowercase ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowerCamelCase_ : Optional[Any] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase_ : Union[str, Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase_ : Optional[Any] = model.generate(A , max_length=2_0_0 , do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() , A )
| 357 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
__UpperCAmelCase : Optional[Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
return data[1:] + data[0]
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
__UpperCAmelCase : str = """"""
for i in range(len(__lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int ):
__UpperCAmelCase : Optional[Any] = int("""0b""" + data[0] + data[-1] , 2 )
__UpperCAmelCase : str = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : List[str] = message[:4]
__UpperCAmelCase : Optional[int] = message[4:]
__UpperCAmelCase : List[Any] = apply_table(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = xor(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = apply_sbox(__lowerCamelCase , temp[:4] ) # noqa: E741
__UpperCAmelCase : str = apply_sbox(__lowerCamelCase , temp[4:] )
__UpperCAmelCase : Optional[Any] = """0""" * (2 - len(__lowerCamelCase )) + l # noqa: E741
__UpperCAmelCase : Optional[Any] = """0""" * (2 - len(__lowerCamelCase )) + r
__UpperCAmelCase : Tuple = apply_table(l + r , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = xor(__lowerCamelCase , __lowerCamelCase )
return temp + right
if __name__ == "__main__":
a : Dict = input("Enter 10 bit key: ")
a : Tuple = input("Enter 8 bit message: ")
a : Optional[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
a : Tuple = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a : Dict = [2, 4, 3, 1]
a : Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
a : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
a : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
a : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a : Union[str, Any] = apply_table(key, paa_table)
a : Any = temp[:5]
a : Optional[Any] = temp[5:]
a : Optional[int] = left_shift(left)
a : int = left_shift(right)
a : List[str] = apply_table(left + right, pa_table)
a : List[Any] = left_shift(left)
a : int = left_shift(right)
a : Optional[Any] = left_shift(left)
a : str = left_shift(right)
a : Union[str, Any] = apply_table(left + right, pa_table)
# encryption
a : Optional[int] = apply_table(message, IP)
a : Union[str, Any] = function(expansion, sa, sa, keya, temp)
a : Union[str, Any] = temp[4:] + temp[:4]
a : str = function(expansion, sa, sa, keya, temp)
a : Dict = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
a : Any = apply_table(CT, IP)
a : Optional[int] = function(expansion, sa, sa, keya, temp)
a : List[Any] = temp[4:] + temp[:4]
a : List[str] = function(expansion, sa, sa, keya, temp)
a : Any = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 63 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
UpperCAmelCase_ = random.Random()
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict=1.0 , __UpperCAmelCase: str=None , __UpperCAmelCase: Tuple=None ) -> int:
if rng is None:
UpperCamelCase__ : List[Any] = global_rng
UpperCamelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=7, __magic_name__=400, __magic_name__=2000, __magic_name__=2048, __magic_name__=128, __magic_name__=1, __magic_name__=512, __magic_name__=30, __magic_name__=44100, ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : Dict = min_seq_length
UpperCamelCase__ : Dict = max_seq_length
UpperCamelCase__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ : Optional[int] = spectrogram_length
UpperCamelCase__ : Union[str, Any] = feature_size
UpperCamelCase__ : int = num_audio_channels
UpperCamelCase__ : List[Any] = hop_length
UpperCamelCase__ : Union[str, Any] = chunk_length
UpperCamelCase__ : Any = sampling_rate
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase__ ( self, __magic_name__=False, __magic_name__=False ) -> Tuple:
"""simple docstring"""
def _flatten(__magic_name__ ):
return list(itertools.chain(*__magic_name__ ) )
if equal_length:
UpperCamelCase__ : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ : Tuple = [np.asarray(__magic_name__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : str = TvltFeatureExtractor
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = TvltFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__magic_name__, '''spectrogram_length''' ) )
self.assertTrue(hasattr(__magic_name__, '''feature_size''' ) )
self.assertTrue(hasattr(__magic_name__, '''num_audio_channels''' ) )
self.assertTrue(hasattr(__magic_name__, '''hop_length''' ) )
self.assertTrue(hasattr(__magic_name__, '''chunk_length''' ) )
self.assertTrue(hasattr(__magic_name__, '''sampling_rate''' ) )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : str = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(__magic_name__ )
UpperCamelCase__ : List[str] = feat_extract_first.to_dict()
UpperCamelCase__ : Union[str, Any] = feat_extract_second.to_dict()
UpperCamelCase__ : str = dict_first.pop('''mel_filters''' )
UpperCamelCase__ : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__magic_name__, __magic_name__ ) )
self.assertEqual(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Tuple = os.path.join(__magic_name__, '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
UpperCamelCase__ : List[Any] = self.feature_extraction_class.from_json_file(__magic_name__ )
UpperCamelCase__ : List[Any] = feat_extract_first.to_dict()
UpperCamelCase__ : Tuple = feat_extract_second.to_dict()
UpperCamelCase__ : Any = dict_first.pop('''mel_filters''' )
UpperCamelCase__ : Union[str, Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__magic_name__, __magic_name__ ) )
self.assertEqual(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
# Initialize feature_extractor
UpperCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ : int = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ : List[str] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase__ : int = feature_extractor(np_speech_inputs[0], return_tensors='''np''', sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCamelCase__ : Union[str, Any] = feature_extractor(__magic_name__, return_tensors='''np''', sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCamelCase__ : Optional[int] = feature_extractor(
__magic_name__, return_tensors='''np''', sampling_rate=44100, mask_audio=__magic_name__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ : Tuple = np.asarray(__magic_name__ )
UpperCamelCase__ : int = feature_extractor(__magic_name__, return_tensors='''np''', sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
UpperCamelCase__ : Optional[int] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self._load_datasamples(1 )
UpperCamelCase__ : Any = TvltFeatureExtractor()
UpperCamelCase__ : Tuple = feature_extractor(__magic_name__, return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape, (1, 1, 192, 128) )
UpperCamelCase__ : Any = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2], __magic_name__, atol=1E-4 ) )
| 253 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('''Enter image url: ''').strip()
print(f'''Downloading image from {url} ...''')
SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
SCREAMING_SNAKE_CASE_ = soup.find('''meta''', {'''property''': '''og:image'''})['content']
SCREAMING_SNAKE_CASE_ = requests.get(image_url).content
SCREAMING_SNAKE_CASE_ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 720 |
"""simple docstring"""
import requests
SCREAMING_SNAKE_CASE_ = '''''' # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE_ = '''https://api.openweathermap.org/data/2.5/'''
def A__ ( A__ = "Chicago" , A__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def A__ ( A__ = "Kolkata, India" , A__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def A__ ( A__ = 55.68 , A__ = 12.57 , A__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE_ = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 579 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowercase__( A , A , A , A ):
snake_case__ : Any = s.rsplit(A , A )
return new.join(A )
def lowercase__( A ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowercase__( A ):
snake_case__ : str = {}
snake_case__ : Optional[int] = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
snake_case__ : Optional[int] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
snake_case__ : Union[str, Any] = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
snake_case__ : Tuple = rreplace(A , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
snake_case__ : Union[str, Any] = rreplace(A , '.b' , '.bias' , 1 )
snake_case__ : Tuple = value.float()
return upgrade
@torch.no_grad()
def lowercase__( A , A , A=None , A=True ):
from dall_e import Encoder
snake_case__ : Optional[int] = Encoder()
if os.path.exists(A ):
snake_case__ : str = torch.load(A )
else:
snake_case__ : Any = torch.hub.load_state_dict_from_url(A )
if isinstance(A , A ):
snake_case__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(A )
if config_path is not None:
snake_case__ : Tuple = FlavaImageCodebookConfig.from_pretrained(A )
else:
snake_case__ : Optional[int] = FlavaImageCodebookConfig()
snake_case__ : Tuple = FlavaImageCodebook(A ).eval()
snake_case__ : Any = encoder.state_dict()
snake_case__ : str = upgrade_state_dict(A )
hf_model.load_state_dict(A )
snake_case__ : Union[str, Any] = hf_model.state_dict()
snake_case__ : str = count_parameters(A )
snake_case__ : Any = count_parameters(A )
assert torch.allclose(A , A , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(A )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCamelCase : List[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 170 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase : Dict = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='esm'
def __init__( self : str , _lowerCamelCase : Any=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : List[str]=7_6_8 , _lowerCamelCase : Optional[int]=1_2 , _lowerCamelCase : Optional[int]=1_2 , _lowerCamelCase : Union[str, Any]=3_0_7_2 , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : str=0.1 , _lowerCamelCase : List[Any]=1_0_2_6 , _lowerCamelCase : Tuple=0.02 , _lowerCamelCase : int=1E-12 , _lowerCamelCase : Union[str, Any]="absolute" , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : str=False , _lowerCamelCase : Any=False , _lowerCamelCase : str=None , _lowerCamelCase : Union[str, Any]=None , **_lowerCamelCase : Dict , ):
super().__init__(pad_token_id=_lowerCamelCase , mask_token_id=_lowerCamelCase , **_lowerCamelCase )
snake_case__ : Any = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : int = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : Dict = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Union[str, Any] = use_cache
snake_case__ : int = emb_layer_norm_before
snake_case__ : Dict = token_dropout
snake_case__ : List[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
snake_case__ : Optional[int] = EsmFoldConfig()
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ : List[str] = EsmFoldConfig(**_lowerCamelCase )
snake_case__ : int = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
snake_case__ : Tuple = get_default_vocab_list()
else:
snake_case__ : List[Any] = vocab_list
else:
snake_case__ : List[str] = None
snake_case__ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , _lowerCamelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def UpperCAmelCase__ ( self : int ):
snake_case__ : int = super().to_dict()
if isinstance(self.esmfold_config , _lowerCamelCase ):
snake_case__ : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__ :
_lowerCAmelCase =None
_lowerCAmelCase =True
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =0
_lowerCAmelCase =True
_lowerCAmelCase =False
_lowerCAmelCase =128
_lowerCAmelCase =None
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.trunk is None:
snake_case__ : int = TrunkConfig()
elif isinstance(self.trunk , _lowerCamelCase ):
snake_case__ : Tuple = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = asdict(self )
snake_case__ : Union[str, Any] = self.trunk.to_dict()
return output
@dataclass
class snake_case__ :
_lowerCAmelCase =48
_lowerCAmelCase =1024
_lowerCAmelCase =128
_lowerCAmelCase =32
_lowerCAmelCase =32
_lowerCAmelCase =32
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =False
_lowerCAmelCase =4
_lowerCAmelCase =128
_lowerCAmelCase =None
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self.structure_module is None:
snake_case__ : Optional[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , _lowerCamelCase ):
snake_case__ : int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
snake_case__ : Union[str, Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Optional[Any] ):
snake_case__ : Optional[Any] = asdict(self )
snake_case__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__ :
_lowerCAmelCase =384
_lowerCAmelCase =128
_lowerCAmelCase =16
_lowerCAmelCase =128
_lowerCAmelCase =12
_lowerCAmelCase =4
_lowerCAmelCase =8
_lowerCAmelCase =0.1
_lowerCAmelCase =8
_lowerCAmelCase =1
_lowerCAmelCase =2
_lowerCAmelCase =7
_lowerCAmelCase =10
_lowerCAmelCase =1E-8
_lowerCAmelCase =1E5
def UpperCAmelCase__ ( self : Union[str, Any] ):
return asdict(self )
def lowercase__( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 170 | 1 |
"""simple docstring"""
from math import isclose, sqrt
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
__UpperCAmelCase = point_y / 4 / point_x
__UpperCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
__UpperCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
__UpperCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
__UpperCAmelCase = outgoing_gradient**2 + 4
__UpperCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
__UpperCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
__UpperCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
__UpperCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
__UpperCAmelCase = x_minus if isclose(snake_case_ , snake_case_ ) else x_plus
__UpperCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowercase__ ( snake_case_ :float = 1.4 , snake_case_ :float = -9.6 ):
__UpperCAmelCase = 0
__UpperCAmelCase = first_x_coord
__UpperCAmelCase = first_y_coord
__UpperCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = next_point(snake_case_ , snake_case_ , snake_case_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 715 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Any = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "codegen"
a__ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , _lowercase : Optional[int]=5_04_00 , _lowercase : List[str]=20_48 , _lowercase : Optional[int]=20_48 , _lowercase : Tuple=40_96 , _lowercase : Optional[Any]=28 , _lowercase : Tuple=16 , _lowercase : str=64 , _lowercase : Dict=None , _lowercase : Any="gelu_new" , _lowercase : Any=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.0 , _lowercase : str=1E-5 , _lowercase : Union[str, Any]=0.02 , _lowercase : List[str]=True , _lowercase : Dict=5_02_56 , _lowercase : str=5_02_56 , _lowercase : Any=False , **_lowercase : Optional[Any] , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = n_ctx
__UpperCAmelCase = n_positions
__UpperCAmelCase = n_embd
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = n_inner
__UpperCAmelCase = rotary_dim
__UpperCAmelCase = activation_function
__UpperCAmelCase = resid_pdrop
__UpperCAmelCase = embd_pdrop
__UpperCAmelCase = attn_pdrop
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = use_cache
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ):
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , '''pad_token_id''' , _lowercase ):
# TODO: how to do that better?
__UpperCAmelCase = 0
@property
def a ( self : Optional[Any] ):
__UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
__UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a ( self : Dict ):
return self._config.n_layer
@property
def a ( self : Tuple ):
return self._config.n_head
def a ( self : Tuple , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
__UpperCAmelCase = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase = seqlen + 2
__UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCAmelCase = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def a ( self : Any ):
return 13
| 397 | 0 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase_ :list[int] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =len(lowerCamelCase_ )
lowerCamelCase__ : Dict =[0] * len_array
if len_array > 0:
lowerCamelCase__ : List[str] =array[0]
for i in range(1 , lowerCamelCase_ ):
lowerCamelCase__ : Optional[Any] =self.prefix_sum[i - 1] + array[i]
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :int , lowerCamelCase_ :int ):
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :int ):
"""simple docstring"""
lowerCamelCase__ : Any ={0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 174 |
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 16
lowerCAmelCase = 32
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : DatasetDict , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int = 1_6 ) ->List[str]:
lowerCamelCase__ : Union[str, Any] =AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Dict =DatasetDict(
{
'train': dataset['train'].select(snake_case_ ),
'validation': dataset['train'].select(snake_case_ ),
'test': dataset['validation'],
} )
def tokenize_function(snake_case_ : str ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Tuple =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : List[str] =datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : Dict =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case_ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : int =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : str =1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : Union[str, Any] =8
else:
lowerCamelCase__ : int =None
return tokenizer.pad(
snake_case_ , padding='longest' , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[int] =DataLoader(
tokenized_datasets['train'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
lowerCamelCase__ : int =DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
lowerCamelCase__ : List[Any] =DataLoader(
tokenized_datasets['test'] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) ->Union[str, Any]:
# New Code #
lowerCamelCase__ : Optional[int] =[]
# Download the dataset
lowerCamelCase__ : Optional[int] =load_dataset('glue' , 'mrpc' )
# Create our splits
lowerCamelCase__ : List[Any] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCamelCase__ : List[str] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Any =config['lr']
lowerCamelCase__ : List[str] =int(config['num_epochs'] )
lowerCamelCase__ : List[Any] =int(config['seed'] )
lowerCamelCase__ : Tuple =int(config['batch_size'] )
lowerCamelCase__ : List[str] =evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ : Union[str, Any] =batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ : Any =MAX_GPU_BATCH_SIZE
set_seed(snake_case_ )
# New Code #
# Create our folds:
lowerCamelCase__ : Tuple =kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
lowerCamelCase__ : List[str] =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(snake_case_ ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =get_fold_dataloaders(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : int =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : Tuple =AdamW(params=model.parameters() , lr=snake_case_ )
# Instantiate scheduler
lowerCamelCase__ : str =get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=1_0_0 , num_training_steps=(len(snake_case_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ : Dict =model(**snake_case_ )
lowerCamelCase__ : str =outputs.loss
lowerCamelCase__ : Optional[int] =loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : List[Any] =model(**snake_case_ )
lowerCamelCase__ : Dict =outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Tuple =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
lowerCamelCase__ : Dict =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , snake_case_ )
# New Code #
# We also run predictions on the test set at the very end
lowerCamelCase__ : Any =[]
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(**snake_case_ )
lowerCamelCase__ : List[Any] =outputs.logits
lowerCamelCase__ , lowerCamelCase__ : str =accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(snake_case_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCamelCase__ : Dict =torch.cat(snake_case_ , dim=0 )
lowerCamelCase__ : str =torch.stack(snake_case_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCamelCase__ : int =metric.compute(predictions=snake_case_ , references=snake_case_ )
accelerator.print('Average test metrics from all folds:' , snake_case_ )
def lowerCAmelCase_ ( ) ->str:
lowerCamelCase__ : Tuple =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=snake_case_ , default=snake_case_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=snake_case_ , default=3 , help='The number of splits to perform across the dataset' )
lowerCamelCase__ : Tuple =parser.parse_args()
lowerCamelCase__ : Optional[Any] ={'lr': 2E-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main() | 174 | 1 |
"""simple docstring"""
lowercase_ : Dict = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ : Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase_ : str = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def _lowerCAmelCase ( lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> str:
assert len(str(lowerCamelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
_SCREAMING_SNAKE_CASE : Optional[int] = year // 1_0_0
_SCREAMING_SNAKE_CASE : Optional[Any] = (5 * (century % 4) + 2) % 7
_SCREAMING_SNAKE_CASE : List[str] = year % 1_0_0
_SCREAMING_SNAKE_CASE : str = centurian % 1_2
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_SCREAMING_SNAKE_CASE : str = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( lowerCamelCase__ : str ) -> Optional[int]:
def decorator(lowerCamelCase__ : int ):
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowerCamelCase__, "handle_key", [] )
handle += [key]
setattr(lowerCamelCase__, "handle_key", lowerCamelCase__ )
return func
return decorator
def _lowerCAmelCase ( *lowerCamelCase__ : List[str] ) -> Tuple:
def decorator(lowerCamelCase__ : Dict ):
_SCREAMING_SNAKE_CASE : List[Any] = getattr(lowerCamelCase__, "handle_key", [] )
handle += keys
setattr(lowerCamelCase__, "handle_key", lowerCamelCase__ )
return func
return decorator
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __new__( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = super().__new__(cls , snake_case__ , snake_case__ , snake_case__ )
if not hasattr(snake_case__ , "key_handler" ):
setattr(snake_case__ , "key_handler" , {} )
setattr(snake_case__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(snake_case__ , "handle_key" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE : Tuple = value
return new_cls
@staticmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE : Dict = ord(snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = cls.key_handler.get(snake_case__ )
if handler:
_SCREAMING_SNAKE_CASE : Optional[int] = char
return handler(cls )
else:
return None
def _lowerCAmelCase ( cls : List[Any] ) -> str:
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 295 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =val
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE_ : Dict =key.replace('backbone.0.body' ,'backbone.conv_encoder.model' )
SCREAMING_SNAKE_CASE_ : List[str] =value
else:
SCREAMING_SNAKE_CASE_ : List[str] =value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ : str =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE_ : Any =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : Optional[Any] =in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE_ : str =in_proj_bias[:256]
SCREAMING_SNAKE_CASE_ : Dict =in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE_ : List[Any] =in_proj_bias[256:512]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE_ : List[str] =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE_ : str =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ : Tuple =in_proj_weight[:256, :]
SCREAMING_SNAKE_CASE_ : Tuple =in_proj_bias[:256]
SCREAMING_SNAKE_CASE_ : Optional[Any] =in_proj_weight[256:512, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] =in_proj_bias[256:512]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =in_proj_weight[-256:, :]
SCREAMING_SNAKE_CASE_ : str =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE_ : int =state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE_ : int =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE_ : List[str] =in_proj_weight_cross_attn[:256, :]
SCREAMING_SNAKE_CASE_ : Optional[int] =in_proj_bias_cross_attn[:256]
SCREAMING_SNAKE_CASE_ : Optional[int] =in_proj_weight_cross_attn[256:512, :]
SCREAMING_SNAKE_CASE_ : int =in_proj_bias_cross_attn[256:512]
SCREAMING_SNAKE_CASE_ : List[str] =in_proj_weight_cross_attn[-256:, :]
SCREAMING_SNAKE_CASE_ : Any =in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =image.size
SCREAMING_SNAKE_CASE_ : List[Any] =max(lowerCAmelCase_ ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =800 if 'detection' in checkpoint_url else 1000
SCREAMING_SNAKE_CASE_ : Any =target_max_size / current_max_size
SCREAMING_SNAKE_CASE_ : List[Any] =image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =F.to_tensor(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =F.normalize(lowerCAmelCase_ ,mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.hub.load_state_dict_from_url(lowerCAmelCase_ ,map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =rename_backbone_keys(lowerCAmelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE_ : Tuple ='model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
SCREAMING_SNAKE_CASE_ : List[str] =state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TableTransformerConfig(
backbone='resnet18' ,mask_loss_coefficient=1 ,dice_loss_coefficient=1 ,ce_loss_coefficient=1 ,bbox_loss_coefficient=5 ,giou_loss_coefficient=2 ,eos_coefficient=0.4 ,class_cost=1 ,bbox_cost=5 ,giou_cost=2 ,)
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : Optional[int] =15
SCREAMING_SNAKE_CASE_ : Tuple =2
SCREAMING_SNAKE_CASE_ : Tuple ={0: 'table', 1: 'table rotated'}
SCREAMING_SNAKE_CASE_ : Any =idalabel
SCREAMING_SNAKE_CASE_ : List[str] ={v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE_ : Dict =125
SCREAMING_SNAKE_CASE_ : Union[str, Any] =6
SCREAMING_SNAKE_CASE_ : Optional[int] ={
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
SCREAMING_SNAKE_CASE_ : List[str] =idalabel
SCREAMING_SNAKE_CASE_ : Optional[Any] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Tuple =DetrImageProcessor(
format='coco_detection' ,max_size=800 if 'detection' in checkpoint_url else 1000 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =TableTransformerForObjectDetection(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE_ : List[Any] ='example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
SCREAMING_SNAKE_CASE_ : Any =hf_hub_download(repo_id='nielsr/example-pdf' ,repo_type='dataset' ,filename=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any =Image.open(lowerCAmelCase_ ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : List[Any] =normalize(resize(lowerCAmelCase_ ,lowerCAmelCase_ ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ : str =model(lowerCAmelCase_ )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE_ : List[str] =(1, 15, 3)
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
SCREAMING_SNAKE_CASE_ : Any =(1, 125, 7)
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
SCREAMING_SNAKE_CASE_ : int =torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] ,lowerCAmelCase_ ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,lowerCAmelCase_ ,atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
SCREAMING_SNAKE_CASE_ : Optional[int] =(
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(lowerCAmelCase_ )
image_processor.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 220 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__SCREAMING_SNAKE_CASE = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'ernie_m'
_lowercase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __UpperCAmelCase = 250_002 , __UpperCAmelCase = 768 , __UpperCAmelCase = 12 , __UpperCAmelCase = 12 , __UpperCAmelCase = 3_072 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 514 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 1 , __UpperCAmelCase = 1E-05 , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=0.0 , **__UpperCAmelCase , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =vocab_size
SCREAMING_SNAKE_CASE_ : str =hidden_size
SCREAMING_SNAKE_CASE_ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_ : int =num_attention_heads
SCREAMING_SNAKE_CASE_ : str =intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] =hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : int =initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] =classifier_dropout
SCREAMING_SNAKE_CASE_ : List[str] =is_decoder
SCREAMING_SNAKE_CASE_ : Tuple =act_dropout
| 220 | 1 |
"""simple docstring"""
import argparse
import datetime
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
_lowerCamelCase : List[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(A_ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
_lowerCamelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
_lowerCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
_lowerCamelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
_lowerCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
_lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
_lowerCamelCase : Dict = datetime.date(int(A_ ), int(A_ ), int(A_ ) )
# Start math
if m <= 2:
_lowerCamelCase : Tuple = y - 1
_lowerCamelCase : Dict = m + 12
# maths var
_lowerCamelCase : int = int(str(A_ )[:2] )
_lowerCamelCase : int = int(str(A_ )[2:] )
_lowerCamelCase : int = int(2.6 * m - 5.39 )
_lowerCamelCase : int = int(c / 4 )
_lowerCamelCase : int = int(k / 4 )
_lowerCamelCase : int = int(d + k )
_lowerCamelCase : int = int(t + u + v + x )
_lowerCamelCase : int = int(z - (2 * c) )
_lowerCamelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
_lowerCamelCase : str = F'''Your date {date_input}, is a {days[str(A_ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowerCAmelCase__ = parser.parse_args()
zeller(args.date_input)
| 706 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : dict[int, int] = {}
_lowerCamelCase : Optional[Any] = 2
while True:
_lowerCamelCase : Union[str, Any] = factor_map.pop(A_, A_ )
if factor:
_lowerCamelCase : Tuple = factor + prime
while x in factor_map:
x += factor
_lowerCamelCase : Union[str, Any] = factor
else:
_lowerCamelCase : Any = prime
yield prime
prime += 1
def snake_case_ ( A_ : float = 1E10 ):
'''simple docstring'''
_lowerCamelCase : List[str] = sieve()
_lowerCamelCase : Dict = 1
while True:
_lowerCamelCase : Union[str, Any] = next(A_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(A_ )
n += 2
if __name__ == "__main__":
print(solution())
| 598 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=0.9_9_9 ,__UpperCamelCase : Optional[int]="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Dict ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCAmelCase_ : Tuple = []
for i in range(__UpperCamelCase ):
lowerCAmelCase_ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase_ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ):
_a = [e.name for e in KarrasDiffusionSchedulers]
_a = 2
@register_to_config
def __init__( self : Optional[int] , A_ : int = 1_0_0_0 , A_ : float = 0.0_0085 , A_ : float = 0.012 , A_ : str = "linear" , A_ : Optional[Union[np.ndarray, List[float]]] = None , A_ : str = "epsilon" , A_ : str = "linspace" , A_ : int = 0 , ):
if trained_betas is not None:
lowerCAmelCase_ : Any = torch.tensor(A_ , dtype=torch.floataa)
elif beta_schedule == "linear":
lowerCAmelCase_ : Optional[int] = torch.linspace(A_ , A_ , A_ , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase_ : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A_ , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase_ : Dict = betas_for_alpha_bar(A_)
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""")
lowerCAmelCase_ : List[Any] = 1.0 - self.betas
lowerCAmelCase_ : str = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(A_ , A_ , A_)
def UpperCAmelCase__ ( self : List[str] , A_ : List[Any] , A_ : List[Any]=None):
if schedule_timesteps is None:
lowerCAmelCase_ : Optional[int] = self.timesteps
lowerCAmelCase_ : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
lowerCAmelCase_ : Union[str, Any] = 1 if len(A_) > 1 else 0
else:
lowerCAmelCase_ : Optional[int] = timestep.cpu().item() if torch.is_tensor(A_) else timestep
lowerCAmelCase_ : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase__ ( self : Any):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase__ ( self : Any , A_ : torch.FloatTensor , A_ : Union[float, torch.FloatTensor] , ):
lowerCAmelCase_ : Tuple = self.index_for_timestep(A_)
if self.state_in_first_order:
lowerCAmelCase_ : Union[str, Any] = self.sigmas[step_index]
else:
lowerCAmelCase_ : int = self.sigmas_interpol[step_index]
lowerCAmelCase_ : Dict = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase__ ( self : List[str] , A_ : int , A_ : Union[str, torch.device] = None , A_ : Optional[int] = None , ):
lowerCAmelCase_ : Any = num_inference_steps
lowerCAmelCase_ : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase_ : str = np.linspace(0 , num_train_timesteps - 1 , A_ , dtype=A_)[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase_ : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase_ : str = (np.arange(0 , A_) * step_ratio).round()[::-1].copy().astype(A_)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase_ : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase_ : Dict = (np.arange(A_ , 0 , -step_ratio)).round().copy().astype(A_)
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""")
lowerCAmelCase_ : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
lowerCAmelCase_ : List[Any] = torch.from_numpy(np.log(A_)).to(A_)
lowerCAmelCase_ : Any = np.interp(A_ , np.arange(0 , len(A_)) , A_)
lowerCAmelCase_ : str = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
lowerCAmelCase_ : str = torch.from_numpy(A_).to(device=A_)
# interpolate sigmas
lowerCAmelCase_ : Optional[Any] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
lowerCAmelCase_ : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
lowerCAmelCase_ : Dict = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(A_).startswith('''mps'''):
# mps does not support float64
lowerCAmelCase_ : List[str] = torch.from_numpy(A_).to(A_ , dtype=torch.floataa)
else:
lowerCAmelCase_ : List[str] = torch.from_numpy(A_).to(A_)
# interpolate timesteps
lowerCAmelCase_ : Dict = self.sigma_to_t(A_).to(A_ , dtype=timesteps.dtype)
lowerCAmelCase_ : Dict = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
lowerCAmelCase_ : Dict = torch.cat([timesteps[:1], interleaved_timesteps])
lowerCAmelCase_ : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase_ : Union[str, Any] = defaultdict(A_)
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Any):
# get log sigma
lowerCAmelCase_ : List[str] = sigma.log()
# get distribution
lowerCAmelCase_ : Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCAmelCase_ : int = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
lowerCAmelCase_ : str = low_idx + 1
lowerCAmelCase_ : int = self.log_sigmas[low_idx]
lowerCAmelCase_ : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase_ : List[str] = (low - log_sigma) / (low - high)
lowerCAmelCase_ : Any = w.clamp(0 , 1)
# transform interpolation to time range
lowerCAmelCase_ : Tuple = (1 - w) * low_idx + w * high_idx
lowerCAmelCase_ : Tuple = t.view(sigma.shape)
return t
@property
def UpperCAmelCase__ ( self : int):
return self.sample is None
def UpperCAmelCase__ ( self : Dict , A_ : Union[torch.FloatTensor, np.ndarray] , A_ : Union[float, torch.FloatTensor] , A_ : Union[torch.FloatTensor, np.ndarray] , A_ : bool = True , ):
lowerCAmelCase_ : Tuple = self.index_for_timestep(A_)
# advance index counter by 1
lowerCAmelCase_ : Any = timestep.cpu().item() if torch.is_tensor(A_) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase_ : str = self.sigmas[step_index]
lowerCAmelCase_ : str = self.sigmas_interpol[step_index + 1]
lowerCAmelCase_ : Optional[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCAmelCase_ : Optional[int] = self.sigmas[step_index - 1]
lowerCAmelCase_ : List[str] = self.sigmas_interpol[step_index]
lowerCAmelCase_ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Optional[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase_ : str = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase_ : Union[str, Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase_ : Optional[int] = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCAmelCase_ : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''')
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""")
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase_ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase_ : Any = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCAmelCase_ : Any = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCAmelCase_ : Dict = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCAmelCase_ : List[str] = sigma_next - sigma_hat
lowerCAmelCase_ : Optional[Any] = self.sample
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : int = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_)
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : torch.FloatTensor , A_ : torch.FloatTensor , A_ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase_ : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(A_):
# mps does not support float64
lowerCAmelCase_ : str = self.timesteps.to(original_samples.device , dtype=torch.floataa)
lowerCAmelCase_ : str = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
lowerCAmelCase_ : Tuple = self.timesteps.to(original_samples.device)
lowerCAmelCase_ : List[str] = timesteps.to(original_samples.device)
lowerCAmelCase_ : Any = [self.index_for_timestep(A_ , A_) for t in timesteps]
lowerCAmelCase_ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
lowerCAmelCase_ : Tuple = sigma.unsqueeze(-1)
lowerCAmelCase_ : int = original_samples + noise * sigma
return noisy_samples
def __len__( self : Union[str, Any]):
return self.config.num_train_timesteps
| 171 |
def UpperCamelCase( __UpperCamelCase : int = 10**12 ):
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : Dict = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 171 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = 42
__snake_case = 42
def __init__( self , _lowercase , _lowercase ) -> List[Any]:
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = 50 , _lowercase = None , _lowercase = "pil" , _lowercase = True , **_lowercase , ) -> Union[Tuple, ImagePipelineOutput]:
_lowerCamelCase : List[str] = self.unet.config.sample_size
_lowerCamelCase : int = (batch_size, 3, img_size, img_size)
_lowerCamelCase : Optional[int] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowerCamelCase : Any = randn_tensor(_lowercase , generator=_lowercase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_lowerCamelCase : List[Any] = self.scheduler.schedule[t]
_lowerCamelCase : str = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowerCamelCase, _lowerCamelCase : List[Any] = self.scheduler.add_noise_to_input(_lowercase , _lowercase , generator=_lowercase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCamelCase : int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowerCamelCase : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCamelCase : Optional[int] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_lowerCamelCase : List[str] = self.scheduler.step_correct(
_lowercase , _lowercase , _lowercase , _lowercase , step_output.prev_sample , step_output['''derivative'''] , )
_lowerCamelCase : Tuple = step_output.prev_sample
_lowerCamelCase : str = (sample / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Dict = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 558 | """simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->str:
_lowerCamelCase : int = len(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = len(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCamelCase : list = []
for char_count in range(SCREAMING_SNAKE_CASE_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 558 | 1 |
'''simple docstring'''
from collections import defaultdict
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[int] ) -> int:
"""simple docstring"""
__a = 1
__a = True
for v in tree[start]:
if v not in visited:
ret += dfs(SCREAMING_SNAKE_CASE__ )
if ret % 2 == 0:
cuts.append(SCREAMING_SNAKE_CASE__ )
return ret
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__UpperCamelCase : int = 10, 9
__UpperCamelCase : List[str] = defaultdict(list)
__UpperCamelCase : dict[int, bool] = {}
__UpperCamelCase : list[int] = []
__UpperCamelCase : Dict = 0
__UpperCamelCase : List[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 448 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase : List[str] = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(snake_case ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Optional[int] = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowercase : Optional[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowercase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
lowercase : int = black.format_str(snake_case ,mode=snake_case )
lowercase : int = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(snake_case ,"""w""" ,newline="""\n""" ) as f:
f.write(snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=snake_case )
with open(snake_case ,"""r""" ) as f:
self.assertTrue(f.read() ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,snake_case ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,snake_case ) ,)
# Copy consistency with a really long name
lowercase : Dict = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" ,f"{long_class_name}SchedulerOutput" ,re.sub("""Bert""" ,snake_case ,snake_case ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,snake_case ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,snake_case ) ,)
| 336 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any =logging.get_logger(__name__)
_UpperCAmelCase : List[Any] ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """speech_to_text_2"""
SCREAMING_SNAKE_CASE__ : int = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Any = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowercase=1_0_0_0_0 , __lowercase=6 , __lowercase=2_0_4_8 , __lowercase=4 , __lowercase=0.0 , __lowercase=True , __lowercase="relu" , __lowercase=2_5_6 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=2 , __lowercase=True , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase=1_0_2_4 , **__lowercase , ) -> str:
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : List[Any] = d_model
lowerCAmelCase_ : Any = decoder_ffn_dim
lowerCAmelCase_ : Any = decoder_layers
lowerCAmelCase_ : str = decoder_attention_heads
lowerCAmelCase_ : Any = dropout
lowerCAmelCase_ : Union[str, Any] = attention_dropout
lowerCAmelCase_ : Optional[int] = activation_dropout
lowerCAmelCase_ : Dict = activation_function
lowerCAmelCase_ : str = init_std
lowerCAmelCase_ : Tuple = decoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = use_cache
lowerCAmelCase_ : Tuple = decoder_layers
lowerCAmelCase_ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : List[str] = max_target_positions
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , ) | 619 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : List[Any] = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def UpperCamelCase__ ( __magic_name__ : List[Any] , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = True
while ask_again:
snake_case__ : Optional[Any] = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Any=[] , __magic_name__ : Optional[int]=None , __magic_name__ : int=0 ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = BulletMenu(__magic_name__ , __magic_name__ )
snake_case__ : Optional[Any] = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase__ ( __magic_name__ : Any ) -> int:
'''simple docstring'''
snake_case__ : Tuple = int(__magic_name__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def UpperCamelCase__ ( __magic_name__ : str ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = int(__magic_name__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def UpperCamelCase__ ( __magic_name__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = int(__magic_name__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def UpperCamelCase__ ( __magic_name__ : Dict ) -> Tuple:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class __snake_case ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : str = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 38 | 0 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__UpperCAmelCase = _symbol_database.Default()
__UpperCAmelCase = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
__UpperCAmelCase = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__UpperCAmelCase = None
__UpperCAmelCase = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__UpperCAmelCase = 45
__UpperCAmelCase = 15_81
__UpperCAmelCase = 15_17
__UpperCAmelCase = 15_70
__UpperCAmelCase = 15_84
__UpperCAmelCase = 17_93
__UpperCAmelCase = 17_95
__UpperCAmelCase = 19_16
__UpperCAmelCase = 18_64
__UpperCAmelCase = 19_05
__UpperCAmelCase = 19_19
__UpperCAmelCase = 24_29
__UpperCAmelCase = 22_08
__UpperCAmelCase = 24_18
__UpperCAmelCase = 23_23
__UpperCAmelCase = 24_07
# @@protoc_insertion_point(module_scope)
| 705 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__UpperCAmelCase = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def A__ ( __lowerCamelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
SCREAMING_SNAKE_CASE_ = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ = r'''.*/layers_(\d+)'''
SCREAMING_SNAKE_CASE_ = key
if re.match(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = re.sub(r'''layers_(\d+)''', r'''block/\1/layer''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = r'''(encoder|decoder)\/'''
if re.match(__lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = re.match(__lowerCamelCase, __lowerCamelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ = re.sub(r'''/mlp/''', r'''/1/mlp/''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = re.sub(r'''/pre_mlp_layer_norm/''', r'''/1/layer_norm/''', __lowerCamelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ = re.sub(r'''/mlp/''', r'''/2/mlp/''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = re.sub(r'''/pre_mlp_layer_norm/''', r'''/2/layer_norm/''', __lowerCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ = new_key.replace(__lowerCamelCase, __lowerCamelCase )
print(F'''{key} -> {new_key}''' )
SCREAMING_SNAKE_CASE_ = s_dict.pop(__lowerCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ = s_dict[key]
for idx in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(__lowerCamelCase )
return s_dict
__UpperCAmelCase = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def A__ ( __lowerCamelCase, __lowerCamelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(__lowerCamelCase, '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
SCREAMING_SNAKE_CASE_ = re.findall(r'''(.*) = ([0-9.]*)''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ = float(__lowerCamelCase ) if '''.''' in value else int(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = re.findall(r'''(.*activations) = \(\'(.*)\',\)''', __lowerCamelCase )[0]
SCREAMING_SNAKE_CASE_ = str(activation[1] )
SCREAMING_SNAKE_CASE_ = num_experts
SCREAMING_SNAKE_CASE_ = SwitchTransformersConfig(**__lowerCamelCase )
return config
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase="./", __lowerCamelCase=8 ):
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ = convert_gin_to_config(__lowerCamelCase, __lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = SwitchTransformersConfig.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = SwitchTransformersForConditionalGeneration(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = flax_params['''target''']
SCREAMING_SNAKE_CASE_ = flatten_dict(__lowerCamelCase, sep='''/''' )
SCREAMING_SNAKE_CASE_ = rename_keys(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = unflatten_dict(__lowerCamelCase, sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__lowerCamelCase, __lowerCamelCase )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__UpperCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 597 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "num_attention_heads" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : int=13 , _UpperCAmelCase : Union[str, Any]=64 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Dict=[128, 256, 384] , _UpperCAmelCase : int=[4, 6, 8] , _UpperCAmelCase : Dict=[2, 3, 4] , _UpperCAmelCase : Union[str, Any]=[16, 16, 16] , _UpperCAmelCase : int=0 , _UpperCAmelCase : Tuple=[2, 2, 2] , _UpperCAmelCase : Tuple=[2, 2, 2] , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=2 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = LevitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCAmelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCAmelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = LevitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = LevitModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Levit does not output attentions" )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
UpperCAmelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCAmelCase_ , UpperCAmelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCAmelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCAmelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
UpperCAmelCase_ = problem_type["title"]
UpperCAmelCase_ = problem_type["num_labels"]
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = LevitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=4 , ) -> str:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = True
lowercase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = FlaxRobertaModelTester(self )
@slow
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("roberta-base" , from_pt=SCREAMING_SNAKE_CASE )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
| 606 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __snake_case ( _UpperCamelCase ) -> str:
return getitem, k
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
return setitem, k, v
def __snake_case ( _UpperCamelCase ) -> Optional[Any]:
return delitem, k
def __snake_case ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> Dict:
try:
return fun(_UpperCamelCase , *_UpperCamelCase ), None
except Exception as e:
return None, e
lowerCamelCase :Union[str, Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowerCamelCase :str = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowerCamelCase :str = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowerCamelCase :int = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowerCamelCase :Union[str, Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase :List[str] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def __snake_case ( _UpperCamelCase ) -> str:
_a = HashMap(initial_block_size=4 )
_a = {}
for _, (fun, *args) in enumerate(_UpperCamelCase ):
_a , _a = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
_a , _a = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
assert my_res == py_res
assert str(_UpperCamelCase ) == str(_UpperCamelCase )
assert set(_UpperCamelCase ) == set(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def __snake_case ( ) -> str:
def is_public(_UpperCamelCase ) -> bool:
return not name.startswith('''_''' )
_a = {name for name in dir({} ) if is_public(_UpperCamelCase )}
_a = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )}
assert dict_public_names > hash_public_names
| 705 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase :List[Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCamelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCAmelCase( __UpperCAmelCase ):
lowercase__ = 0
lowercase__ = False
lowercase__ = 3.0
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {})
self.assertDictEqual(MockClass(a=2).to_kwargs() , {'''a''': 2})
self.assertDictEqual(MockClass(a=2 , b=snake_case__).to_kwargs() , {'''a''': 2, '''b''': True})
self.assertDictEqual(MockClass(a=2 , c=2.25).to_kwargs() , {'''a''': 2, '''c''': 2.25})
@require_cuda
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = GradScalerKwargs(init_scale=10_24 , growth_factor=2)
AcceleratorState._reset_state()
_UpperCamelCase = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
_UpperCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0)
self.assertEqual(scaler._growth_factor , 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5)
self.assertEqual(scaler._growth_interval , 20_00)
self.assertEqual(scaler._enabled , snake_case__)
@require_multi_gpu
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)]
execute_subprocess_async(snake_case__ , env=os.environ.copy())
if __name__ == "__main__":
_a = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_a = Accelerator(kwargs_handlers=[ddp_scaler])
_a = torch.nn.Linear(100, 200)
_a = accelerator.prepare(model)
# Check the values changed in kwargs
_a = """"""
_a = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 19 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCAmelCase = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCAmelCase = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __snake_case ( self : List[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : str=None , snake_case__ : List[Any]="uniform_average" , snake_case__ : Dict=True ):
'''simple docstring'''
lowercase :Dict = mean_squared_error(
snake_case__ , snake_case__ , sample_weight=snake_case__ , multioutput=snake_case__ , squared=snake_case__ )
return {"mse": mse}
| 677 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase__ : int = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = {}
if top_k is not None:
SCREAMING_SNAKE_CASE__ : List[str] = top_k
return {}, {}, postprocess_params
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=5 ) -> Optional[Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE__ : int = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Optional[int] = model_outputs.logits.softmax(-1 )[0]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = probs.topk(SCREAMING_SNAKE_CASE__ )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Dict = stable_softmax(model_outputs.logits , axis=-1 )[0]
SCREAMING_SNAKE_CASE__ : Tuple = tf.math.top_k(SCREAMING_SNAKE_CASE__ , k=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scores.tolist()
SCREAMING_SNAKE_CASE__ : Tuple = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
| 545 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase_ :
"""simple docstring"""
@staticmethod
def __magic_name__ (*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase_ ( _snake_case ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase__ : str = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
"""document-question-answering""" , model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = INVOICE_URL
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , """""" ) ) )
SCREAMING_SNAKE_CASE__ : Any = """What is the placebo?"""
SCREAMING_SNAKE_CASE__ : Dict = [
{
"""image""": load_image(SCREAMING_SNAKE_CASE__ ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = dqa_pipeline(SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
[
{"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """answer""": ANY(SCREAMING_SNAKE_CASE__ ), """start""": ANY(SCREAMING_SNAKE_CASE__ ), """end""": ANY(SCREAMING_SNAKE_CASE__ )},
{"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """answer""": ANY(SCREAMING_SNAKE_CASE__ ), """start""": ANY(SCREAMING_SNAKE_CASE__ ), """end""": ANY(SCREAMING_SNAKE_CASE__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
SCREAMING_SNAKE_CASE__ : str = INVOICE_URL
SCREAMING_SNAKE_CASE__ : Any = """How many cats are there?"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
SCREAMING_SNAKE_CASE__ : Any = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ , question=SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , SCREAMING_SNAKE_CASE__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE__ : Optional[int] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE__ : List[str] = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ , question=SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE__ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Dict = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ , question=SCREAMING_SNAKE_CASE__ , words=SCREAMING_SNAKE_CASE__ , boxes=SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
SCREAMING_SNAKE_CASE__ : str = INVOICE_URL
SCREAMING_SNAKE_CASE__ : Optional[int] = """What is the invoice number?"""
SCREAMING_SNAKE_CASE__ : List[str] = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ , question=SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
SCREAMING_SNAKE_CASE__ : Tuple = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = INVOICE_URL
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """What is the invoice number?"""
SCREAMING_SNAKE_CASE__ : int = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ , question=SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
SCREAMING_SNAKE_CASE__ : str = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
SCREAMING_SNAKE_CASE__ : List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=SCREAMING_SNAKE_CASE__ , revision="""3dc6de3""" , )
SCREAMING_SNAKE_CASE__ : List[str] = INVOICE_URL
SCREAMING_SNAKE_CASE__ : Any = """What is the invoice number?"""
SCREAMING_SNAKE_CASE__ : int = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ , question=SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
SCREAMING_SNAKE_CASE__ : str = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
SCREAMING_SNAKE_CASE__ : Dict = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , """""" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ : Dict = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=SCREAMING_SNAKE_CASE__ , revision="""3dc6de3""" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ : str = INVOICE_URL
SCREAMING_SNAKE_CASE__ : Dict = """What is the invoice number?"""
SCREAMING_SNAKE_CASE__ : Optional[int] = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ , question=SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ : Any = list(zip(*apply_tesseract(load_image(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , """""" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ : List[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = INVOICE_URL
SCREAMING_SNAKE_CASE__ : List[Any] = """What is the invoice number?"""
SCREAMING_SNAKE_CASE__ : int = dqa_pipeline(image=SCREAMING_SNAKE_CASE__ , question=SCREAMING_SNAKE_CASE__ , top_k=2 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def __magic_name__ (self ) -> int:
"""simple docstring"""
pass
| 545 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str
_snake_case : int
def a__ ( lowercase : str ) -> list[str]:
"""simple docstring"""
if not isinstance(lowercase, lowercase ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(lowercase ) )]
def a__ ( lowercase : str ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(lowercase, lowercase ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
_UpperCamelCase = all_rotations(lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCamelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowercase ),
}
return response
def a__ ( lowercase : str, lowercase : int ) -> str:
"""simple docstring"""
if not isinstance(lowercase, lowercase ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
_UpperCamelCase = int(lowercase )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(lowercase ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
_UpperCamelCase = [''''''] * len(lowercase )
for _ in range(len(lowercase ) ):
for i in range(len(lowercase ) ):
_UpperCamelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase__ : List[str] = 'Provide a string that I will generate its BWT transform: '
lowercase__ : Optional[int] = input(entry_msg).strip()
lowercase__ : Dict = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
lowercase__ : Optional[Any] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 98 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
super().tearDown()
gc.collect()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Tuple = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase, _lowerCamelCase : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase : Union[str, Any] = controlnet_params
_lowerCamelCase : str = '''bird'''
_lowerCamelCase : Union[str, Any] = jax.device_count()
_lowerCamelCase : Dict = pipe.prepare_text_inputs([prompts] * num_samples)
_lowerCamelCase : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''')
_lowerCamelCase : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples)
_lowerCamelCase : Union[str, Any] = jax.random.PRNGKey(0)
_lowerCamelCase : Tuple = jax.random.split(a__ , jax.device_count())
_lowerCamelCase : Dict = replicate(a__)
_lowerCamelCase : Tuple = shard(a__)
_lowerCamelCase : Union[str, Any] = shard(a__)
_lowerCamelCase : Optional[Any] = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_lowerCamelCase : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
_lowerCamelCase : List[Any] = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten()))
_lowerCamelCase : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : Any = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase, _lowerCamelCase : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa)
_lowerCamelCase : Optional[Any] = controlnet_params
_lowerCamelCase : Optional[Any] = '''Chef in the kitchen'''
_lowerCamelCase : int = jax.device_count()
_lowerCamelCase : Dict = pipe.prepare_text_inputs([prompts] * num_samples)
_lowerCamelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''')
_lowerCamelCase : str = pipe.prepare_image_inputs([pose_image] * num_samples)
_lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0)
_lowerCamelCase : List[str] = jax.random.split(a__ , jax.device_count())
_lowerCamelCase : str = replicate(a__)
_lowerCamelCase : Union[str, Any] = shard(a__)
_lowerCamelCase : List[Any] = shard(a__)
_lowerCamelCase : Any = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_lowerCamelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
_lowerCamelCase : Dict = images[0, 253:256, 253:256, -1]
_lowerCamelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten()))
_lowerCamelCase : Optional[int] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 613 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_SCREAMING_SNAKE_CASE = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_SCREAMING_SNAKE_CASE = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_SCREAMING_SNAKE_CASE = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self : Union[str, Any]) -> Any:
if version.parse(scb.__version__) < version.parse('1.4.12'):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Sequence(datasets.Value('string' , id='sequence') , id='references'),
}) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __snake_case ( self : Dict , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , ) -> List[Any]:
A_ = len(references[0])
if any(len(_lowercase) != references_per_prediction for refs in references):
raise ValueError('Sacrebleu requires the same number of references for each prediction')
A_ = [[refs[i] for refs in references] for i in range(_lowercase)]
A_ = TER(
normalized=_lowercase , no_punct=_lowercase , asian_support=_lowercase , case_sensitive=_lowercase , )
A_ = sb_ter.corpus_score(_lowercase , _lowercase)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 366 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
_UpperCamelCase = ["""image_processor""", """tokenizer"""]
_UpperCamelCase = """BlipImageProcessor"""
_UpperCamelCase = """AutoTokenizer"""
def __init__( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : List[str]) -> Union[str, Any]:
super().__init__(_lowercase , _lowercase)
# add QFormer tokenizer
A_ = qformer_tokenizer
def __call__( self : Optional[int] , _lowercase : ImageInput = None , _lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowercase : bool = True , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Union[bool, str, TruncationStrategy] = None , _lowercase : Optional[int] = None , _lowercase : int = 0 , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = True , _lowercase : Optional[Union[str, TensorType]] = None , **_lowercase : List[Any] , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.')
A_ = BatchFeature()
if text is not None:
A_ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase)
A_ = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
A_ = qformer_text_encoding.pop('input_ids')
A_ = qformer_text_encoding.pop('attention_mask')
if images is not None:
A_ = self.image_processor(_lowercase , return_tensors=_lowercase)
encoding.update(_lowercase)
return encoding
def __snake_case ( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : List[Any]) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase)
def __snake_case ( self : Optional[int] , *_lowercase : List[Any] , **_lowercase : List[str]) -> str:
return self.tokenizer.decode(*_lowercase , **_lowercase)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : Any) -> Optional[int]:
A_ = self.tokenizer.model_input_names
A_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def __snake_case ( self : Optional[Any] , _lowercase : Optional[int] , **_lowercase : int) -> Tuple:
if os.path.isfile(_lowercase):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(_lowercase , exist_ok=_lowercase)
A_ = os.path.join(_lowercase , 'qformer_tokenizer')
self.qformer_tokenizer.save_pretrained(_lowercase)
return super().save_pretrained(_lowercase , **_lowercase)
@classmethod
def __snake_case ( cls : Dict , _lowercase : Dict , **_lowercase : int) -> Any:
A_ = AutoTokenizer.from_pretrained(_lowercase , subfolder='qformer_tokenizer')
A_ = cls._get_arguments_from_pretrained(_lowercase , **_lowercase)
args.append(_lowercase)
return cls(*_lowercase)
| 366 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase :
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
pass
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
pass
def _snake_case ( self )->Dict:
'''simple docstring'''
pass
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Dict = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
A_ : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ , A_ : List[str] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = {'''vision_model''': vision_model, '''text_model''': text_model}
A_ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
A_ : int = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ , A_ : List[str] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = {'''vision_model''': vision_model, '''text_model''': text_model}
A_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
A_ : Any = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
A_ : Dict = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : str = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
A_ : Tuple = after_output[0]
A_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ , A_ : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
A_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
A_ : List[str] = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
A_ : Any = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : List[str] = to_atuple(vision_model.config.image_size )
A_ : Any = to_atuple(vision_model.config.patch_size )
A_ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A_ : Any = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A_ : Any = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
pt_model.to(_SCREAMING_SNAKE_CASE )
pt_model.eval()
# prepare inputs
A_ : str = inputs_dict
A_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
A_ : List[Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
A_ : List[str] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ : int = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
A_ : str = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ : str = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
pt_model_loaded.to(_SCREAMING_SNAKE_CASE )
pt_model_loaded.eval()
with torch.no_grad():
A_ : Optional[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2 )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : str = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
A_ : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
A_ : int = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
A_ : str = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Optional[int] = self.prepare_config_and_inputs()
A_ : List[Any] = config_inputs_dict.pop('''vision_config''' )
A_ : int = config_inputs_dict.pop('''text_config''' )
A_ : List[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ , A_ : List[str] = self.get_pretrained_model_and_inputs()
A_ : str = model_a(**_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = model_a(**_SCREAMING_SNAKE_CASE )
A_ : List[Any] = after_outputs[0]
A_ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
@require_flax
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
A_ : Optional[int] = 13
A_ : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
A_ : Optional[int] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
A_ : str = random_attention_mask([batch_size, 4] )
A_ : Tuple = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Any = FlaxViTModel(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = FlaxBertModel(_SCREAMING_SNAKE_CASE )
return vision_model, text_model
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : str = FlaxViTModelTester(self )
A_ : List[str] = FlaxBertModelTester(self )
A_ : int = vit_model_tester.prepare_config_and_inputs()
A_ : Dict = bert_model_tester.prepare_config_and_inputs()
A_ , A_ : Dict = vision_config_and_inputs
A_ , A_ , A_ , A_ : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
A_ : Dict = 13
A_ : Tuple = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
A_ : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
A_ : Any = random_attention_mask([batch_size, 4] )
A_ : Optional[int] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : int = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = FlaxBertModel(_SCREAMING_SNAKE_CASE )
return vision_model, text_model
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : List[str] = FlaxCLIPVisionModelTester(self )
A_ : Optional[int] = FlaxBertModelTester(self )
A_ : Optional[int] = clip_model_tester.prepare_config_and_inputs()
A_ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
A_ , A_ : Union[str, Any] = vision_config_and_inputs
A_ , A_ , A_ , A_ : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
A_ : List[str] = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
A_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
A_ : Any = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ : List[str] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A_ : Union[str, Any] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 152 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = DiTPipeline
snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case = False
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : str = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
A_ : Union[str, Any] = AutoencoderKL()
A_ : Optional[Any] = DDIMScheduler()
A_ : str = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Union[str, Any]:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[int] = '''cpu'''
A_ : Any = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
A_ : Tuple = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
A_ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self )->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = torch.manual_seed(0 )
A_ : int = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
A_ : Any = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
A_ : Any = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
A_ : List[str] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : List[Any] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Tuple = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
A_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
A_ : List[str] = ['''vase''', '''umbrella''']
A_ : List[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Tuple = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 152 | 1 |
'''simple docstring'''
__A = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def _A ( lowercase__ ):
assert type(__SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(__SCREAMING_SNAKE_CASE )
lowercase__ = int(__SCREAMING_SNAKE_CASE )
lowercase__ = """"""
lowercase__ = False
if decimal < 0:
lowercase__ = True
decimal *= -1
while decimal > 0:
lowercase__ , lowercase__ = divmod(__SCREAMING_SNAKE_CASE , 16 )
lowercase__ = values[remainder] + hexadecimal
lowercase__ = """0x""" + hexadecimal
if negative:
lowercase__ = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionPanoramaPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
SCREAMING_SNAKE_CASE_ : Optional[int] = DDIMScheduler()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE_ : Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = sd_pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = 'french fries'
SCREAMING_SNAKE_CASE_ : str = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = output.images
SCREAMING_SNAKE_CASE_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE_ : str = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = sd_pipe(**lowerCAmelCase__ , view_batch_size=2 )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = sd_pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : int = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , skip_prk_steps=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_inputs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = sd_pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , lowerCAmelCase__=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 'stabilityai/stable-diffusion-2-base'
SCREAMING_SNAKE_CASE_ : int = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder='scheduler' )
SCREAMING_SNAKE_CASE_ : int = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_inputs()
SCREAMING_SNAKE_CASE_ : Any = pipe(**lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
def callback_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE_ : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE_ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
SCREAMING_SNAKE_CASE_ : str = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : int = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Any = 'stabilityai/stable-diffusion-2-base'
SCREAMING_SNAKE_CASE_ : int = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder='scheduler' )
SCREAMING_SNAKE_CASE_ : int = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ : int = 'stabilityai/stable-diffusion-2-base'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder='scheduler' )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_inputs()
SCREAMING_SNAKE_CASE_ : int = pipe(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 101 |
def a__ ( A__, A__ ):
def get_matched_characters(A__, A__ ) -> str:
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Any = min(len(_stra ), len(_stra ) ) // 2
for i, l in enumerate(_stra ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(max(0, i - limit ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(min(i + limit + 1, len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = F'''{_stra[0:_stra.index(A__ )]} {_stra[_stra.index(A__ ) + 1:]}'''
return "".join(A__ )
# matching characters
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_matched_characters(A__, A__ )
SCREAMING_SNAKE_CASE_ : int = get_matched_characters(A__, A__ )
SCREAMING_SNAKE_CASE_ : Any = len(A__ )
# transposition
SCREAMING_SNAKE_CASE_ : Optional[int] = (
len([(ca, ca) for ca, ca in zip(A__, A__ ) if ca != ca] ) // 2
)
if not match_count:
SCREAMING_SNAKE_CASE_ : Dict = 0.0
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
1
/ 3
* (
match_count / len(A__ )
+ match_count / len(A__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for ca, ca in zip(stra[:4], stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 101 | 1 |
'''simple docstring'''
import qiskit
def UpperCAmelCase_ ( A = 2 ):
'''simple docstring'''
_a : Union[str, Any] = qubits
# Using Aer's simulator
_a : str = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
_a : Tuple = qiskit.QuantumCircuit(A , A )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A ) ) , list(range(A ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_a : Optional[int] = qiskit.execute(A , A , shots=1_0_0_0 )
return job.result().get_counts(A )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 424 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(A ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 424 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="resnet50" , UpperCAmelCase=3 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , ) -> int:
_snake_case = parent
_snake_case = out_indices if out_indices is not None else [4]
_snake_case = stage_names
_snake_case = out_features
_snake_case = backbone
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = use_pretrained_backbone
_snake_case = is_training
def lowercase (self ) -> Optional[int]:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = self.get_config()
return config, pixel_values
def lowercase (self ) -> Optional[Any]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_snake_case = TimmBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_snake_case = model(UpperCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase (self ) -> Any:
_snake_case = self.prepare_config_and_inputs()
_snake_case, _snake_case = config_and_inputs
_snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _lowerCAmelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (TimmBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> Tuple:
_snake_case = TimmBackboneModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def lowercase (self ) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase (self ) -> str:
_snake_case = """resnet18"""
_snake_case = """microsoft/resnet-18"""
_snake_case = AutoBackbone.from_pretrained(UpperCAmelCase , use_timm_backbone=UpperCAmelCase )
_snake_case = AutoBackbone.from_pretrained(UpperCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_snake_case = AutoBackbone.from_pretrained(UpperCAmelCase , use_timm_backbone=UpperCAmelCase , out_indices=[1, 2, 3] )
_snake_case = AutoBackbone.from_pretrained(UpperCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowercase (self ) -> Dict:
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowercase (self ) -> Tuple:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowercase (self ) -> Union[str, Any]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowercase (self ) -> Tuple:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowercase (self ) -> List[Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowercase (self ) -> Any:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowercase (self ) -> Optional[int]:
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowercase (self ) -> Optional[int]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowercase (self ) -> Dict:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowercase (self ) -> Union[str, Any]:
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowercase (self ) -> List[Any]:
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowercase (self ) -> Union[str, Any]:
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowercase (self ) -> Optional[int]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase (self ) -> Optional[Any]:
pass
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
_snake_case = self.all_model_classes[0]
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
_snake_case = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_snake_case = model(**UpperCAmelCase )
_snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
_snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase (self ) -> Any:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(**UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_snake_case = copy.deepcopy(UpperCAmelCase )
_snake_case = None
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(**UpperCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_snake_case = copy.deepcopy(UpperCAmelCase )
_snake_case = False
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(**UpperCAmelCase ) | 585 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=_SCREAMING_SNAKE_CASE , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=_SCREAMING_SNAKE_CASE , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=_SCREAMING_SNAKE_CASE , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=_SCREAMING_SNAKE_CASE , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=_SCREAMING_SNAKE_CASE , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
_snake_case = parser.parse_args()
return args
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
def fn(_SCREAMING_SNAKE_CASE ):
return tokenizer(examples["""text"""] )
return fn
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
_snake_case = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
_snake_case = tf.train.Features(feature=_SCREAMING_SNAKE_CASE )
_snake_case = tf.train.Example(features=_SCREAMING_SNAKE_CASE )
_snake_case = example.SerializeToString()
records.append(_SCREAMING_SNAKE_CASE )
return records
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_snake_case = min(len(_SCREAMING_SNAKE_CASE ) , args.limit )
_snake_case = dataset.select(range(_SCREAMING_SNAKE_CASE ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_snake_case = os.path.join(args.output_dir , args.split )
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
else:
_snake_case = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_snake_case = tokenize_function(_SCREAMING_SNAKE_CASE )
_snake_case = dataset.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_SCREAMING_SNAKE_CASE ):
# Concatenate all texts.
_snake_case = {k: sum(examples[k] , [] ) for k in examples.keys()}
_snake_case = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_snake_case = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_snake_case = {
k: [t[i : i + args.max_length] for i in range(0 , _SCREAMING_SNAKE_CASE , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_snake_case = dataset_tokenized.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=1000 , num_proc=4 )
_snake_case = 0
_snake_case = 0
for shard in range(0 , len(_SCREAMING_SNAKE_CASE ) , args.shard_size ):
_snake_case = grouped_dataset[shard : shard + args.shard_size]
_snake_case = len(dataset_snapshot["""input_ids"""] )
_snake_case = os.path.join(_SCREAMING_SNAKE_CASE , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
_snake_case = get_serialized_examples(_SCREAMING_SNAKE_CASE )
with tf.io.TFRecordWriter(_SCREAMING_SNAKE_CASE ) as out_file:
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_snake_case = serialized_examples[i]
out_file.write(_SCREAMING_SNAKE_CASE )
print("""Wrote file {} containing {} records""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , """w""" ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = parse_args()
main(args) | 585 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCAmelCase = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
UpperCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCAmelCase = {'''unk_token''': '''<unk>'''}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
UpperCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase = os.path.join(self.tmpdirname , lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
def a__( self : Optional[int] , **lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def a__( self : str , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def a__( self : List[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase )
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase )
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
UpperCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__( self : int )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = processor(text=lowerCAmelCase )
UpperCAmelCase = tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def a__( self : int )-> int:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(lowerCAmelCase )
UpperCAmelCase = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = CLIPProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 50 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a : List[Any] = logging.get_logger(__name__)
__a : Optional[int] = torch.device("""cpu""")
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
def __magic_name__ ( lowercase_ ) -> Any:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase_ )
UpperCamelCase = val
def __magic_name__ ( lowercase_ ) -> int:
'''simple docstring'''
UpperCamelCase = []
for k in state_dict.keys():
UpperCamelCase = k
if ".pwconv" in k:
UpperCamelCase = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
UpperCamelCase = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
UpperCamelCase = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
UpperCamelCase = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCamelCase = k_new.split("." )
if ls[2].isdigit():
UpperCamelCase = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCamelCase = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase = 1000
UpperCamelCase = "huggingface/label-files"
UpperCamelCase = "imagenet-1k-id2label.json"
UpperCamelCase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase = {int(lowercase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase = [3, 3, 6, 4]
UpperCamelCase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase = [3, 3, 9, 6]
UpperCamelCase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase = [4, 3, 10, 5]
UpperCamelCase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase = [4, 4, 12, 6]
UpperCamelCase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCamelCase = torch.hub.load_state_dict_from_url(lowercase_ , map_location="cpu" , check_hash=lowercase_ )
else:
UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )
UpperCamelCase = checkpoint
UpperCamelCase = create_rename_keys(lowercase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
# load HuggingFace model
UpperCamelCase = SwiftFormerForImageClassification(lowercase_ ).eval()
hf_model.load_state_dict(lowercase_ )
# prepare test inputs
UpperCamelCase = prepare_img()
UpperCamelCase = ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCamelCase = processor(images=lowercase_ , return_tensors="pt" )
# compare outputs from both models
UpperCamelCase = get_expected_output(lowercase_ )
UpperCamelCase = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase_ , atol=1E-3 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__a : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 606 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __A :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Optional[int] = seq_length
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : List[Any] = use_token_type_ids
lowerCamelCase__ : str = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : str = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : Any = self.vocab_size - 1
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Any = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Tuple = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase__ : List[Any] = OpenAIGPTModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : Optional[Any] = model(__magic_name__ , token_type_ids=__magic_name__ , head_mask=__magic_name__ )
lowerCamelCase__ : Dict = model(__magic_name__ , token_type_ids=__magic_name__ )
lowerCamelCase__ : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase__ : Optional[Any] = OpenAIGPTLMHeadModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : Dict = model(__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase__ : Optional[int] = OpenAIGPTDoubleHeadsModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : List[str] = model(__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase__ : Optional[int] = self.num_labels
lowerCamelCase__ : Union[str, Any] = OpenAIGPTForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
lowerCamelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = model(__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,(
lowerCamelCase__
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase__ : str = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase :Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCamelCase :Dict = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCamelCase :List[Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__=False ):
lowerCamelCase__ : Tuple = super()._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__magic_name__ , )
lowerCamelCase__ : str = inputs_dict["""labels"""]
lowerCamelCase__ : List[str] = inputs_dict["""labels"""]
lowerCamelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__magic_name__ , )
lowerCamelCase__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def _snake_case (self ):
lowerCamelCase__ : str = OpenAIGPTModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self , config_class=__magic_name__ , n_embd=37 )
def _snake_case (self ):
self.config_tester.run_common_tests()
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__magic_name__ )
def _snake_case (self ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__magic_name__ )
@slow
def _snake_case (self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = OpenAIGPTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
class __A ( unittest.TestCase ):
@slow
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(__magic_name__ )
lowerCamelCase__ : Optional[Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__magic_name__ ) # the president is
lowerCamelCase__ : Tuple = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : Tuple = model.generate(__magic_name__ , do_sample=__magic_name__ )
self.assertListEqual(output_ids[0].tolist() , __magic_name__ )
| 96 |
import itertools
import string
from collections.abc import Generator, Iterable
def _A (UpperCamelCase : Iterable[str] , UpperCamelCase : int ) ->Generator[tuple[str, ...], None, None]:
'''simple docstring'''
lowerCamelCase__ : Any = iter(UpperCamelCase )
while True:
lowerCamelCase__ : Union[str, Any] = tuple(itertools.islice(UpperCamelCase , UpperCamelCase ) )
if not chunk:
return
yield chunk
def _A (UpperCamelCase : str ) ->str:
'''simple docstring'''
lowerCamelCase__ : Tuple = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCamelCase__ : Any = """"""
if len(UpperCamelCase ) < 2:
return dirty
for i in range(len(UpperCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase ) & 1:
clean += "X"
return clean
def _A (UpperCamelCase : str ) ->list[str]:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCamelCase__ : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase )
return table
def _A (UpperCamelCase : str , UpperCamelCase : str ) ->str:
'''simple docstring'''
lowerCamelCase__ : Tuple = generate_table(UpperCamelCase )
lowerCamelCase__ : int = prepare_input(UpperCamelCase )
lowerCamelCase__ : List[str] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase , 2 ):
lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = divmod(table.index(UpperCamelCase ) , 5 )
lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = divmod(table.index(UpperCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _A (UpperCamelCase : str , UpperCamelCase : str ) ->str:
'''simple docstring'''
lowerCamelCase__ : Any = generate_table(UpperCamelCase )
lowerCamelCase__ : Any = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase , 2 ):
lowerCamelCase__ ,lowerCamelCase__ : int = divmod(table.index(UpperCamelCase ) , 5 )
lowerCamelCase__ ,lowerCamelCase__ : Any = divmod(table.index(UpperCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 96 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=0.999 , SCREAMING_SNAKE_CASE_ : List[str]="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase = []
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ):
__lowerCamelCase : Optional[Any] = [e.name for e in KarrasDiffusionSchedulers]
__lowerCamelCase : Any = 2
@register_to_config
def __init__( self , _lowerCAmelCase = 1000 , _lowerCAmelCase = 0.00085 , _lowerCAmelCase = 0.012 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = 1.0 , _lowerCAmelCase = "linspace" , _lowerCAmelCase = 0 , ) -> Union[str, Any]:
if trained_betas is not None:
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="exp" )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = use_karras_sigmas
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
if schedule_timesteps is None:
_lowerCAmelCase = self.timesteps
_lowerCAmelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowerCAmelCase = 1 if len(_lowerCAmelCase ) > 1 else 0
else:
_lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
_lowerCAmelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _snake_case ( self ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , ) -> torch.FloatTensor:
_lowerCAmelCase = self.index_for_timestep(_lowerCAmelCase )
_lowerCAmelCase = self.sigmas[step_index]
_lowerCAmelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> Any:
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCAmelCase = np.linspace(0 , num_train_timesteps - 1 , _lowerCAmelCase , dtype=_lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCAmelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(_lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCAmelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(_lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(_lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_lowerCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowerCAmelCase = np.log(_lowerCAmelCase )
_lowerCAmelCase = np.interp(_lowerCAmelCase , np.arange(0 , len(_lowerCAmelCase ) ) , _lowerCAmelCase )
if self.config.use_karras_sigmas:
_lowerCAmelCase = self._convert_to_karras(in_sigmas=_lowerCAmelCase , num_inference_steps=self.num_inference_steps )
_lowerCAmelCase = np.array([self._sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) for sigma in sigmas] )
_lowerCAmelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase )
_lowerCAmelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase )
_lowerCAmelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_lowerCAmelCase ).startswith("mps" ):
# mps does not support float64
_lowerCAmelCase = timesteps.to(_lowerCAmelCase , dtype=torch.floataa )
else:
_lowerCAmelCase = timesteps.to(device=_lowerCAmelCase )
# empty dt and derivative
_lowerCAmelCase = None
_lowerCAmelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCAmelCase = defaultdict(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
# get log sigma
_lowerCAmelCase = np.log(_lowerCAmelCase )
# get distribution
_lowerCAmelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_lowerCAmelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_lowerCAmelCase = low_idx + 1
_lowerCAmelCase = log_sigmas[low_idx]
_lowerCAmelCase = log_sigmas[high_idx]
# interpolate sigmas
_lowerCAmelCase = (low - log_sigma) / (low - high)
_lowerCAmelCase = np.clip(_lowerCAmelCase , 0 , 1 )
# transform interpolation to time range
_lowerCAmelCase = (1 - w) * low_idx + w * high_idx
_lowerCAmelCase = t.reshape(sigma.shape )
return t
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> torch.FloatTensor:
_lowerCAmelCase = in_sigmas[-1].item()
_lowerCAmelCase = in_sigmas[0].item()
_lowerCAmelCase = 7.0 # 7.0 is the value used in the paper
_lowerCAmelCase = np.linspace(0 , 1 , _lowerCAmelCase )
_lowerCAmelCase = sigma_min ** (1 / rho)
_lowerCAmelCase = sigma_max ** (1 / rho)
_lowerCAmelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _snake_case ( self ) -> Tuple:
return self.dt is None
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
_lowerCAmelCase = self.index_for_timestep(_lowerCAmelCase )
# advance index counter by 1
_lowerCAmelCase = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCAmelCase = self.sigmas[step_index]
_lowerCAmelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_lowerCAmelCase = self.sigmas[step_index - 1]
_lowerCAmelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCAmelCase = 0
_lowerCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_next
_lowerCAmelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = sigma_hat if self.state_in_first_order else sigma_next
_lowerCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
_lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCAmelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCAmelCase = sigma_next - sigma_hat
# store for 2nd order step
_lowerCAmelCase = derivative
_lowerCAmelCase = dt
_lowerCAmelCase = sample
else:
# 2. 2nd order / Heun's method
_lowerCAmelCase = (sample - pred_original_sample) / sigma_next
_lowerCAmelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_lowerCAmelCase = self.dt
_lowerCAmelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowerCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCAmelCase ):
# mps does not support float64
_lowerCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_lowerCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_lowerCAmelCase = self.timesteps.to(original_samples.device )
_lowerCAmelCase = timesteps.to(original_samples.device )
_lowerCAmelCase = [self.index_for_timestep(_lowerCAmelCase , _lowerCAmelCase ) for t in timesteps]
_lowerCAmelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowerCAmelCase = sigma.unsqueeze(-1 )
_lowerCAmelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 18 | """simple docstring"""
import mpmath # for roots of unity
import numpy as np
class _snake_case :
"""simple docstring"""
def __init__( self : Any , _A : Optional[int]=None , _A : int=None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = list(poly_a or [0])[:]
_SCREAMING_SNAKE_CASE : Optional[Any] = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_SCREAMING_SNAKE_CASE : Optional[int] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
_SCREAMING_SNAKE_CASE : str = len(self.polyB)
# Add 0 to make lengths equal a power of 2
_SCREAMING_SNAKE_CASE : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
_SCREAMING_SNAKE_CASE : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
_SCREAMING_SNAKE_CASE : List[Any] = self.__multiply()
def _lowerCAmelCase ( self : List[str] , _A : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(_A) <= 1:
return dft[0]
#
_SCREAMING_SNAKE_CASE : int = self.c_max_length // 2
while next_ncol > 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [[] for i in range(_A)]
_SCREAMING_SNAKE_CASE : Any = self.root**next_ncol
# First half of next step
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_A):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(_A):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
_SCREAMING_SNAKE_CASE : Optional[int] = new_dft
_SCREAMING_SNAKE_CASE : List[str] = next_ncol // 2
return dft[0]
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.__dft("""A""")
_SCREAMING_SNAKE_CASE : Any = self.__dft("""B""")
_SCREAMING_SNAKE_CASE : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
_SCREAMING_SNAKE_CASE : Any = 2
while next_ncol <= self.c_max_length:
_SCREAMING_SNAKE_CASE : int = [[] for i in range(_A)]
_SCREAMING_SNAKE_CASE : List[str] = self.root ** (next_ncol // 2)
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
_SCREAMING_SNAKE_CASE : str = new_inverse_c
next_ncol *= 2
# Unpack
_SCREAMING_SNAKE_CASE : List[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """A = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A]))
_SCREAMING_SNAKE_CASE : Optional[int] = """B = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B]))
_SCREAMING_SNAKE_CASE : Tuple = """A*B = """ + """ + """.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product))
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
UpperCAmelCase = '''xmod'''
def __init__( self :Union[str, Any] ,__UpperCAmelCase :Tuple=3_05_22 ,__UpperCAmelCase :int=7_68 ,__UpperCAmelCase :Tuple=12 ,__UpperCAmelCase :Tuple=12 ,__UpperCAmelCase :Tuple=30_72 ,__UpperCAmelCase :str="gelu" ,__UpperCAmelCase :str=0.1 ,__UpperCAmelCase :str=0.1 ,__UpperCAmelCase :str=5_12 ,__UpperCAmelCase :Optional[Any]=2 ,__UpperCAmelCase :Optional[int]=0.02 ,__UpperCAmelCase :Dict=1E-12 ,__UpperCAmelCase :List[str]=1 ,__UpperCAmelCase :str=0 ,__UpperCAmelCase :Any=2 ,__UpperCAmelCase :List[Any]="absolute" ,__UpperCAmelCase :Union[str, Any]=True ,__UpperCAmelCase :Optional[int]=None ,__UpperCAmelCase :int=False ,__UpperCAmelCase :Tuple=2 ,__UpperCAmelCase :Tuple=False ,__UpperCAmelCase :Dict=True ,__UpperCAmelCase :Tuple=True ,__UpperCAmelCase :str=("en_XX",) ,__UpperCAmelCase :int=None ,**__UpperCAmelCase :str ,) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Optional[Any] = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : Optional[int] = position_embedding_type
lowerCamelCase__ : Optional[Any] = use_cache
lowerCamelCase__ : Optional[Any] = classifier_dropout
lowerCamelCase__ : Optional[int] = pre_norm
lowerCamelCase__ : Tuple = adapter_reduction_factor
lowerCamelCase__ : Dict = adapter_layer_norm
lowerCamelCase__ : Optional[Any] = adapter_reuse_layer_norm
lowerCamelCase__ : Dict = ln_before_adapter
lowerCamelCase__ : Dict = list(__UpperCAmelCase )
lowerCamelCase__ : List[Any] = default_language
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@property
def lowercase_ ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase__ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 121 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : Dict = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Any = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = pipe.dual_guided(
prompt='''first prompt''' ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
lowerCamelCase__ : Dict = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : Dict = generator.manual_seed(0 )
lowerCamelCase__ : List[str] = pipe.dual_guided(
prompt='''first prompt''' ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowercase_ ( self :Tuple ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : Tuple = '''cyberpunk 2077'''
lowerCamelCase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : str = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = pipe.dual_guided(
prompt=__UpperCAmelCase ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ,).images
lowerCamelCase__ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCamelCase__ : Tuple = '''A painting of a squirrel eating a burger '''
lowerCamelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe.text_to_image(
prompt=__UpperCAmelCase ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ).images
lowerCamelCase__ : List[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCamelCase__ : Any = pipe.image_variation(__UpperCAmelCase ,generator=__UpperCAmelCase ,output_type='''numpy''' ).images
lowerCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : str = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 121 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
return len(set(__magic_name__ ) ) == len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 1 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase__ (*snake_case__ : List[Any] , snake_case__ : Optional[Union[Dict, Any]] = None , snake_case__ : str=True , snake_case__ : Dict=2 ):
"""simple docstring"""
from .. import __version__
_snake_case : Tuple = take_from
_snake_case : Tuple = ()
if not isinstance(args[0] , snake_case__ ):
_snake_case : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(snake_case__ ).base_version ) >= version.parse(snake_case__ ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
_snake_case : Optional[Any] = None
if isinstance(snake_case__ , snake_case__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(snake_case__ ),)
_snake_case : List[Any] = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(snake_case__ , snake_case__ ):
values += (getattr(snake_case__ , snake_case__ ),)
_snake_case : Union[str, Any] = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
_snake_case : List[Any] = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
_snake_case : Any = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , snake_case__ , stacklevel=snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
_snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
_snake_case : str = call_frame.filename
_snake_case : int = call_frame.lineno
_snake_case : Any = call_frame.function
_snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(snake_case__ ) == 0:
return
elif len(snake_case__ ) == 1:
return values[0]
return values
| 714 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase:
'''simple docstring'''
def __init__( self: List[str], a_: List[Any], a_: str=13, a_: Dict=32, a_: Union[str, Any]=3, a_: Union[str, Any]=4, a_: Tuple=[10, 20, 30, 40], a_: Dict=[2, 2, 3, 2], a_: Tuple=True, a_: Optional[Any]=True, a_: Any=37, a_: Any="gelu", a_: int=10, a_: Tuple=0.02, a_: str=["stage2", "stage3", "stage4"], a_: List[str]=[2, 3, 4], a_: List[str]=None, ):
'''simple docstring'''
_snake_case : int = parent
_snake_case : int = batch_size
_snake_case : List[Any] = image_size
_snake_case : List[str] = num_channels
_snake_case : Tuple = num_stages
_snake_case : Union[str, Any] = hidden_sizes
_snake_case : List[Any] = depths
_snake_case : Tuple = is_training
_snake_case : List[str] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[Any] = num_labels
_snake_case : Tuple = initializer_range
_snake_case : Tuple = out_features
_snake_case : Tuple = out_indices
_snake_case : Dict = scope
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Any = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size], self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase_ ( self: int, a_: Tuple, a_: Any, a_: Dict ):
'''simple docstring'''
_snake_case : int = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
_snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase_ ( self: Optional[int], a_: List[str], a_: Tuple, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
_snake_case : Optional[int] = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self: Union[str, Any], a_: Tuple, a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : int = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Tuple = None
_snake_case : Tuple = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
_snake_case : List[Any] = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Any = config_and_inputs
_snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : List[str] = config_and_inputs
_snake_case : Any = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = ConvNextVaModelTester(self )
_snake_case : int = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[Any] = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
_snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.train()
_snake_case : Optional[Any] = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Any = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Any = False
_snake_case : List[Any] = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Dict = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : str = self._prepare_for_class(a_, a_, return_labels=a_ )
_snake_case : Optional[int] = model(**a_ ).loss
loss.backward()
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[str] = model_class(a_ )
_snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : int = [*signature.parameters.keys()]
_snake_case : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(a_: str, a_: Tuple, a_: Tuple ):
_snake_case : Optional[Any] = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
_snake_case : Any = model(**self._prepare_for_class(a_, a_ ) )
_snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : List[str] = True
check_hidden_states_output(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(a_ )
_snake_case : Union[str, Any] = self.default_image_processor
_snake_case : List[Any] = prepare_img()
_snake_case : Optional[int] = preprocessor(images=a_, return_tensors="""pt""" ).to(a_ )
# forward pass
with torch.no_grad():
_snake_case : Optional[int] = model(**a_ )
# verify the logits
_snake_case : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a_ )
_snake_case : Optional[int] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1E-4 ) )
| 28 | 0 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int, lowerCamelCase : str, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = name
lowercase__ = value
lowercase__ = weight
def __repr__( self : List[str] ):
'''simple docstring'''
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.value
def lowercase__ ( self : str ):
'''simple docstring'''
return self.name
def lowercase__ ( self : Dict ):
'''simple docstring'''
return self.weight
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return self.value / self.weight
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = sorted(lowerCamelCase_ , key=lowerCamelCase_ , reverse=lowerCamelCase_ )
lowercase__ = []
lowercase__ , lowercase__ = 0.0, 0.0
for i in range(len(lowerCamelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 183 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
A__ : Optional[Any] = None
A__ : str = logging.get_logger(__name__)
A__ : Optional[int] = '▁'
A__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A__ : Dict = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
A__ : int = {
'google/pegasus-xsum': 5_12,
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PegasusTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict, lowerCamelCase : str=None, lowerCamelCase : List[str]=None, lowerCamelCase : int="<pad>", lowerCamelCase : str="</s>", lowerCamelCase : Optional[Any]="<unk>", lowerCamelCase : Any="<mask_2>", lowerCamelCase : Optional[Any]="<mask_1>", lowerCamelCase : int=None, lowerCamelCase : Dict=103, **lowerCamelCase : List[Any], ):
'''simple docstring'''
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError(
F"""additional_special_tokens should be of type {type(lowerCamelCase )}, but is"""
F""" {type(lowerCamelCase )}""" )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(lowerCamelCase ), self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2, self.offset )]
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, pad_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase, )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowercase__ ( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self : Optional[Any], lowerCamelCase : List, lowerCamelCase : Optional[List] = None, lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file, lowerCamelCase )
return (out_vocab_file,)
| 183 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase_ = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class a_ ( SCREAMING_SNAKE_CASE__ ):
@staticmethod
def __a ( _lowercase :str) -> List[str]:
UpperCAmelCase_ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_lowercase , required=_lowercase , help='''Model\'s type.''')
train_parser.add_argument(
'''--tf_checkpoint''' , type=_lowercase , required=_lowercase , help='''TensorFlow checkpoint path or folder.''')
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_lowercase , required=_lowercase , help='''Path to the PyTorch saved model output.''')
train_parser.add_argument('''--config''' , type=_lowercase , default='''''' , help='''Configuration file path or folder.''')
train_parser.add_argument(
'''--finetuning_task_name''' , type=_lowercase , default=_lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_lowercase)
def __init__( self :Any , _lowercase :Optional[Any] , _lowercase :Tuple , _lowercase :int , _lowercase :Optional[Any] , _lowercase :Optional[int] , *_lowercase :Optional[int] , ) -> Tuple:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/converting''')
self._logger.info(f"Loading model {model_type}")
UpperCAmelCase_ = model_type
UpperCAmelCase_ = tf_checkpoint
UpperCAmelCase_ = pytorch_dump_output
UpperCAmelCase_ = config
UpperCAmelCase_ = finetuning_task_name
def __a ( self :Tuple) -> Any:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase)
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase_ = self._tf_checkpoint
UpperCAmelCase_ = """"""
else:
UpperCAmelCase_ = self._tf_checkpoint
UpperCAmelCase_ = """"""
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''')
| 721 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return int(x / 2**20 )
class a_ :
def __enter__( self :Any) -> str:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase_ = torch.cuda.memory_allocated()
return self
def __exit__( self :Tuple , *_lowercase :Tuple) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
UpperCAmelCase_ = bamb(self.end - self.begin)
UpperCAmelCase_ = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A ( __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = "bert-base-cased" , __UpperCAmelCase = 320 , __UpperCAmelCase = 160 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
UpperCAmelCase_ = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"train[:{n_train}]", '''validation''': f"validation[:{n_val}]"} )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config['''lr''']
UpperCAmelCase_ = int(config['''num_epochs'''] )
UpperCAmelCase_ = int(config['''seed'''] )
UpperCAmelCase_ = int(config['''batch_size'''] )
UpperCAmelCase_ = args.model_name_or_path
set_seed(__UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase )
# Instantiate optimizer
UpperCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ = optimizer_cls(params=model.parameters() , lr=__UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ = 1
UpperCAmelCase_ = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=0 , num_training_steps=__UpperCAmelCase , )
else:
UpperCAmelCase_ = DummyScheduler(__UpperCAmelCase , total_num_steps=__UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ = 0
# Now we train the model
UpperCAmelCase_ = {}
for epoch in range(__UpperCAmelCase , __UpperCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
UpperCAmelCase_ = model(**__UpperCAmelCase )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase_ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__UpperCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=__UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=__UpperCAmelCase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=__UpperCAmelCase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=__UpperCAmelCase , default=1 , help='''Number of train epochs.''' , )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 561 | 0 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCamelCase :
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE = inputs['prompt']
SCREAMING_SNAKE_CASE = inputs['generator']
SCREAMING_SNAKE_CASE = inputs['num_inference_steps']
SCREAMING_SNAKE_CASE = inputs['output_type']
if "image" in inputs:
SCREAMING_SNAKE_CASE = inputs['image']
else:
SCREAMING_SNAKE_CASE = None
if "mask_image" in inputs:
SCREAMING_SNAKE_CASE = inputs['mask_image']
else:
SCREAMING_SNAKE_CASE = None
if "original_image" in inputs:
SCREAMING_SNAKE_CASE = inputs['original_image']
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pipe.encode_prompt(__A )
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE = image
if mask_image is not None:
SCREAMING_SNAKE_CASE = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
SCREAMING_SNAKE_CASE = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE = inputs['generator']
SCREAMING_SNAKE_CASE = inputs['num_inference_steps']
SCREAMING_SNAKE_CASE = inputs['output_type']
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE = image
if mask_image is not None:
SCREAMING_SNAKE_CASE = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE = original_image
SCREAMING_SNAKE_CASE = pipe_loaded(**__A )[0]
SCREAMING_SNAKE_CASE = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__A )
SCREAMING_SNAKE_CASE = pipe_loaded(**__A )[0]
SCREAMING_SNAKE_CASE = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
| 439 |
def a (lowerCAmelCase__ = 1_000_000 ):
__a = 1
__a = 1
__a = {1: 1}
for inputa in range(2 , lowerCAmelCase__ ):
__a = 0
__a = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a = (3 * number) + 1
counter += 1
if inputa not in counters:
__a = counter
if counter > pre_counter:
__a = inputa
__a = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 99 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase_ ( *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
SCREAMING_SNAKE_CASE : Tuple = [
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
]
return object_detector, examples
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ )
self.assertGreater(lowerCamelCase_ , 0 )
self.assertEqual(
lowerCamelCase_ , [
{
"""score""": ANY(lowerCamelCase_ ),
"""label""": ANY(lowerCamelCase_ ),
"""box""": {"""xmin""": ANY(lowerCamelCase_ ), """ymin""": ANY(lowerCamelCase_ ), """xmax""": ANY(lowerCamelCase_ ), """ymax""": ANY(lowerCamelCase_ )},
}
for i in range(lowerCamelCase_ )
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = pipeline(
"""zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" )
SCREAMING_SNAKE_CASE : int = object_detector(
"""./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.7_235, """label""": """cat""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}},
{"""score""": 0.7_218, """label""": """remote""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}},
{"""score""": 0.7_184, """label""": """couch""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}},
{"""score""": 0.6_748, """label""": """remote""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}},
{"""score""": 0.6_656, """label""": """cat""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}},
{"""score""": 0.6_614, """label""": """couch""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}},
{"""score""": 0.6_456, """label""": """remote""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 2_74, """xmax""": 93, """ymax""": 2_97}},
{"""score""": 0.6_419, """label""": """cat""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}},
] , )
SCREAMING_SNAKE_CASE : List[str] = object_detector(
[
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{"""score""": 0.7_235, """label""": """cat""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}},
{"""score""": 0.7_218, """label""": """remote""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}},
{"""score""": 0.7_184, """label""": """couch""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}},
{"""score""": 0.6_748, """label""": """remote""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}},
{"""score""": 0.6_656, """label""": """cat""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}},
{"""score""": 0.6_614, """label""": """couch""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}},
{"""score""": 0.6_456, """label""": """remote""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}},
{"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 2_74, """xmax""": 93, """ymax""": 2_97}},
{"""score""": 0.6_419, """label""": """cat""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}},
]
] , )
@require_torch
@slow
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""zero-shot-object-detection""" )
SCREAMING_SNAKE_CASE : Dict = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}},
] , )
SCREAMING_SNAKE_CASE : str = object_detector(
[
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
{
"""image""": """http://images.cocodataset.org/val2017/000000039769.jpg""",
"""candidate_labels""": ["""cat""", """remote""", """couch"""],
},
] , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}},
],
[
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}},
{"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}},
{"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}},
],
] , )
@require_tf
@unittest.skip("""Zero Shot Object Detection not implemented in TF""" )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0.2
SCREAMING_SNAKE_CASE : List[Any] = pipeline("""zero-shot-object-detection""" )
SCREAMING_SNAKE_CASE : Any = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCamelCase_ , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}},
{"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}},
] , )
@require_torch
@slow
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Optional[int] = pipeline("""zero-shot-object-detection""" )
SCREAMING_SNAKE_CASE : Any = object_detector(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCamelCase_ , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}},
{"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}},
] , )
| 79 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ):
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 79 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase_ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowercase_ = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCAmelCase ( ):
lowercase__ = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCAmelCase ( ):
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __lowerCAmelCase ( ):
lowercase__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCAmelCase ( ):
lowercase__ = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowercase__ = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCAmelCase ( ):
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def __lowerCAmelCase ( ):
# laplace diagonals
lowercase__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowercase__ = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def __lowerCAmelCase ( ):
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def __lowerCAmelCase ( ):
lowercase__ , lowercase__ = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def __lowerCAmelCase ( ):
lowercase__ = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ):
lowercase__ = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ):
lowercase__ = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowerCAmelCase ( ):
lowercase__ = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
lowercase__ = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowercase__ = 0
lowercase__ = 0
lowercase__ = image[x_coordinate][y_coordinate]
lowercase__ = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowercase__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowercase__ = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 413 |
import numpy as np
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413 | 1 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : int ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
__lowerCAmelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowerCAmelCase = v
else:
__lowerCAmelCase = v
__lowerCAmelCase = chkpt["""params"""]
__lowerCAmelCase = {n: v for n, v in config.items() if not isinstance(snake_case_ , (torch.FloatTensor, numpy.ndarray) )}
__lowerCAmelCase = chkpt["""dico_word2id"""]
__lowerCAmelCase = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__lowerCAmelCase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_ , indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(snake_case_ , indent=2 ) + """\n""" )
if __name__ == "__main__":
_A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_A : List[Any] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 330 | '''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_A : str = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = ["""audio_values""", """audio_mask"""]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Tuple=[16, 16] , SCREAMING_SNAKE_CASE__ : Optional[int]=1_28 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_41_00 , SCREAMING_SNAKE_CASE__ : Tuple=86 , SCREAMING_SNAKE_CASE__ : List[str]=20_48 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Tuple:
super().__init__(
feature_size=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , padding_value=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = spectrogram_length
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = feature_size // self.patch_size[1]
__lowerCAmelCase = n_fft
__lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = padding_value
__lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=SCREAMING_SNAKE_CASE__ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def a ( self : str , SCREAMING_SNAKE_CASE__ : np.array ) -> np.ndarray:
__lowerCAmelCase = spectrogram(
SCREAMING_SNAKE_CASE__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , )
__lowerCAmelCase = log_spec[:, :-1]
__lowerCAmelCase = log_spec - 2_0.0
__lowerCAmelCase = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = True , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__lowerCAmelCase = isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowerCAmelCase = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
__lowerCAmelCase = np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [np.asarray(SCREAMING_SNAKE_CASE__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowerCAmelCase = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa )
# convert into correct format for padding
__lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowerCAmelCase = np.ones([len(SCREAMING_SNAKE_CASE__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__lowerCAmelCase = audio_features[i]
__lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
__lowerCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
__lowerCAmelCase = {"""audio_values""": padded_audio_features}
__lowerCAmelCase = BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
return encoded_inputs
| 330 | 1 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_UpperCAmelCase : Dict = parse(importlib.metadata.version('''torch'''))
def UpperCamelCase ( lowercase_ : Union[str, Version] , lowercase_ : str , lowercase_ : str ) -> List[Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase =STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase_ , lowercase_ ):
lowercase =parse(importlib.metadata.version(lowercase_ ) )
return operation(lowercase_ , parse(lowercase_ ) )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return compare_versions(lowercase_ , lowercase_ , lowercase_ )
| 72 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'BlipImageProcessor'
UpperCamelCase__ = 'AutoTokenizer'
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
# add QFormer tokenizer
lowercase =qformer_tokenizer
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase =BatchFeature()
if text is not None:
lowercase =self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
encoding.update(snake_case_ )
lowercase =self.qformer_tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
lowercase =qformer_text_encoding.pop('''input_ids''' )
lowercase =qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase =self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _A( self ):
lowercase =self.tokenizer.model_input_names
lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _A( self , snake_case_ , **snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
lowercase =os.path.join(snake_case_ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(snake_case_ )
return super().save_pretrained(snake_case_ , **snake_case_ )
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
lowercase =AutoTokenizer.from_pretrained(snake_case_ , subfolder='''qformer_tokenizer''' )
lowercase =cls._get_arguments_from_pretrained(snake_case_ , **snake_case_ )
args.append(snake_case_ )
return cls(*snake_case_ )
| 72 | 1 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : List[Any] = ['image_processor', 'tokenizer']
UpperCAmelCase_ : int = 'FlavaImageProcessor'
UpperCAmelCase_ : Optional[int] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__ ) -> Optional[int]:
__A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase__ , )
__A = kwargs.pop("feature_extractor" )
__A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase__ , lowercase__ )
__A = self.image_processor
def __call__( self , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__A = self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if images is not None:
__A = self.image_processor(
lowercase__ , return_image_mask=lowercase__ , return_codebook_pixels=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if text is not None and images is not None:
encoding.update(lowercase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def _lowerCamelCase ( self , *lowercase__ , **lowercase__ ) -> List[Any]:
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def _lowerCamelCase ( self , *lowercase__ , **lowercase__ ) -> Dict:
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def _lowerCamelCase ( self ) -> Tuple:
__A = self.tokenizer.model_input_names
__A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase__ , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase__ , )
return self.image_processor
| 715 |
from __future__ import annotations
from typing import Any
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
create_state_space_tree(lowerCAmelCase__ , [] , 0 )
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if index == len(lowerCAmelCase__ ):
print(lowerCAmelCase__ )
return
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
snake_case_ : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 205 | 0 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[int, int]:
if b == 0:
return (1, 0)
((UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = extended_euclid(lowerCAmelCase__ , a % b )
UpperCAmelCase__ : List[str] = a // b
return (y, x - k * y)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
((UpperCAmelCase__) , (UpperCAmelCase__)) : Optional[int] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = na * na
UpperCAmelCase__ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
((UpperCAmelCase__) , (UpperCAmelCase__)) : Tuple = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
UpperCAmelCase__ : int = (b % n + n) % n
return b
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Any = na * na
UpperCAmelCase__ : Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 75 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """facebook/bart-large-mnli"""
_A : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_A : Any = """text_classifier"""
_A : Optional[int] = AutoTokenizer
_A : List[str] = AutoModelForSequenceClassification
_A : Tuple = ["""text""", ["""text"""]]
_A : Any = ["""text"""]
def __UpperCamelCase (self ):
super().setup()
snake_case_ : List[Any] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
snake_case_ : Dict = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = labels
return self.pre_processor(
[text] * len(lowercase__ ) , [f'This example is {label}' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = outputs.logits
snake_case_ : Optional[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 480 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Any = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 581 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a ( UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> Optional[int]:
# Initialise PyTorch model
snake_case__ =TaConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case__ =TaForConditionalGeneration(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 581 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Any = WavaVecaForSequenceClassification.from_pretrained(UpperCamelCase , config=UpperCamelCase )
__UpperCAmelCase : int = downstream_dict["projector.weight"]
__UpperCAmelCase : List[Any] = downstream_dict["projector.bias"]
__UpperCAmelCase : Optional[Any] = downstream_dict["model.post_net.linear.weight"]
__UpperCAmelCase : List[Any] = downstream_dict["model.post_net.linear.bias"]
return model
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = WavaVecaForAudioFrameClassification.from_pretrained(UpperCamelCase , config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.linear.weight"]
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.linear.bias"]
return model
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[str] = WavaVecaForXVector.from_pretrained(UpperCamelCase , config=UpperCamelCase )
__UpperCAmelCase : Tuple = downstream_dict["connector.weight"]
__UpperCAmelCase : str = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase : int = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__UpperCAmelCase : Union[str, Any] = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__UpperCAmelCase : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
__UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
__UpperCAmelCase : int = downstream_dict["objective.W"]
return model
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location="cpu" )
__UpperCAmelCase : Optional[Any] = checkpoint["Downstream"]
__UpperCAmelCase : int = WavaVecaConfig.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase , return_attention_mask=UpperCamelCase , do_normalize=UpperCamelCase )
__UpperCAmelCase : Dict = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
__UpperCAmelCase : List[Any] = convert_classification(UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
__UpperCAmelCase : List[str] = convert_diarization(UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif arch.endswith("ForXVector" ):
__UpperCAmelCase : str = convert_xvector(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase : Optional[Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(UpperCamelCase )
hf_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
A = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 77 |
from PIL import Image
def _SCREAMING_SNAKE_CASE ( lowercase : Image ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = image.size
lowerCamelCase_ = 0
lowerCamelCase_ = image.load()
for i in range(lowercase ):
for j in range(lowercase ):
lowerCamelCase_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase ):
for i in range(lowercase ):
lowerCamelCase_ = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 70 | 0 |
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
snake_case__ : str =0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
snake_case__ : List[str] =0
while b > 0:
if b & 1:
snake_case__ : Union[str, Any] =((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 408 |
def lowercase_ ( SCREAMING_SNAKE_CASE : int = 10_00 ):
"""simple docstring"""
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 408 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , _A : Dict , _A : Tuple=1_3 , _A : Optional[Any]=7 , _A : Union[str, Any]=True , _A : Tuple=True , _A : Union[str, Any]=True , _A : List[Any]=True , _A : Optional[int]=9_9 , _A : int=3_2 , _A : Optional[int]=5 , _A : str=4 , _A : Optional[Any]=3_7 , _A : Union[str, Any]="gelu" , _A : Any=0.1 , _A : Optional[Any]=0.1 , _A : List[Any]=5_1_2 , _A : List[str]=1_6 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : List[Any]=4 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Dict = batch_size
_SCREAMING_SNAKE_CASE : Tuple = seq_length
_SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
_SCREAMING_SNAKE_CASE : Any = use_attention_mask
_SCREAMING_SNAKE_CASE : Any = use_token_type_ids
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Any = type_vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = num_choices
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_SCREAMING_SNAKE_CASE : int = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_SCREAMING_SNAKE_CASE : List[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowercase_ , )
return config, input_ids, attention_mask
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
_SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = FlaxDistilBertModelTester(self)
@slow
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""")
_SCREAMING_SNAKE_CASE : Dict = model(np.ones((1, 1)))
self.assertIsNotNone(lowercase_)
@require_flax
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""")
_SCREAMING_SNAKE_CASE : List[str] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_SCREAMING_SNAKE_CASE : int = model(lowercase_ , attention_mask=lowercase_)[0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , lowercase_)
_SCREAMING_SNAKE_CASE : Any = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1e-4))
| 338 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _UpperCAmelCase :
def __init__( self , lowercase_ = "cpu" , lowercase_ = "openai/clip-vit-large-patch14" ) -> None:
UpperCAmelCase = device
UpperCAmelCase = CLIPTokenizerFast.from_pretrained(lowercase_ )
UpperCAmelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
UpperCAmelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
UpperCAmelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCAmelCase = torchvision.transforms.Resize(2_2_4 )
UpperCAmelCase = torchvision.transforms.CenterCrop(2_2_4 )
def a_ ( self , lowercase_ ) -> Any:
UpperCAmelCase = self.resize(lowercase_ )
UpperCAmelCase = self.center_crop(lowercase_ )
UpperCAmelCase = self.normalize(lowercase_ )
return images
def __call__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer(text=lowercase_ , **lowercase_ )
UpperCAmelCase = self.preprocess_img(lowercase_ )
UpperCAmelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _UpperCAmelCase ( nn.Module ):
def __init__( self , lowercase_=1_0 , lowercase_=0.0_1 , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_="image" , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , ) -> None:
super().__init__()
UpperCAmelCase = None
UpperCAmelCase = device if device else get_device()
if vqgan:
UpperCAmelCase = vqgan
else:
UpperCAmelCase = load_vqgan(self.device , conf_path=lowercase_ , ckpt_path=lowercase_ )
self.vqgan.eval()
if clip:
UpperCAmelCase = clip
else:
UpperCAmelCase = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
UpperCAmelCase = ProcessorGradientFlow(device=self.device )
UpperCAmelCase = iterations
UpperCAmelCase = lr
UpperCAmelCase = log
UpperCAmelCase = make_grid
UpperCAmelCase = return_val
UpperCAmelCase = quantize
UpperCAmelCase = self.vqgan.decoder.z_shape
def a_ ( self , lowercase_=None , lowercase_=None , lowercase_=5 , lowercase_=True ) -> Dict:
UpperCAmelCase = []
if output_path is None:
UpperCAmelCase = './animation.gif'
if input_path is None:
UpperCAmelCase = self.save_path
UpperCAmelCase = sorted(glob(input_path + '/*' ) )
if not len(lowercase_ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(lowercase_ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
UpperCAmelCase = total_duration / len(lowercase_ )
UpperCAmelCase = [frame_duration] * len(lowercase_ )
if extend_frames:
UpperCAmelCase = 1.5
UpperCAmelCase = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(lowercase_ ) )
imageio.mimsave(lowercase_ , lowercase_ , duration=lowercase_ )
print(F"gif saved to {output_path}" )
def a_ ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
UpperCAmelCase = preprocess(Image.open(lowercase_ ) , target_image_size=2_5_6 ).to(self.device )
UpperCAmelCase = preprocess_vqgan(lowercase_ )
UpperCAmelCase , *UpperCAmelCase = self.vqgan.encode(lowercase_ )
return z
def a_ ( self , lowercase_ ) -> Optional[int]:
UpperCAmelCase = self.latent.detach().requires_grad_()
UpperCAmelCase = base_latent + transform_vector
if self.quantize:
UpperCAmelCase , *UpperCAmelCase = self.vqgan.quantize(lowercase_ )
else:
UpperCAmelCase = trans_latent
return self.vqgan.decode(lowercase_ )
def a_ ( self , lowercase_ , lowercase_ , lowercase_=None ) -> str:
UpperCAmelCase = self.clip_preprocessor(text=lowercase_ , images=lowercase_ , return_tensors='pt' , padding=lowercase_ )
UpperCAmelCase = self.clip(**lowercase_ )
UpperCAmelCase = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase = similarity_logits * weights
return similarity_logits.sum()
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
UpperCAmelCase = self._get_clip_similarity(pos_prompts['prompts'] , lowercase_ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
UpperCAmelCase = self._get_clip_similarity(neg_prompts['prompts'] , lowercase_ , weights=neg_prompts['weights'] )
else:
UpperCAmelCase = torch.tensor([1] , device=self.device )
UpperCAmelCase = -torch.log(lowercase_ ) + torch.log(lowercase_ )
return loss
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
UpperCAmelCase = torch.randn_like(self.latent , requires_grad=lowercase_ , device=self.device )
UpperCAmelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase = self._add_vector(lowercase_ )
UpperCAmelCase = loop_post_process(lowercase_ )
UpperCAmelCase = self._get_CLIP_loss(lowercase_ , lowercase_ , lowercase_ )
print('CLIP loss' , lowercase_ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=lowercase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
wandb.init(reinit=lowercase_ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
UpperCAmelCase = Image.open(lowercase_ )
UpperCAmelCase = image.resize((2_5_6, 2_5_6) )
wandb.log('Original Image' , wandb.Image(lowercase_ ) )
def a_ ( self , lowercase_ ) -> Tuple:
if not prompts:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(lowercase_ , (tuple, list) ):
UpperCAmelCase = prompt[0]
UpperCAmelCase = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase , UpperCAmelCase = prompt.split(':' )
UpperCAmelCase = float(lowercase_ )
else:
UpperCAmelCase = prompt
UpperCAmelCase = 1.0
processed_prompts.append(lowercase_ )
weights.append(lowercase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase_ , device=self.device ),
}
def a_ ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=None , ) -> List[str]:
if image_path:
UpperCAmelCase = self._get_latent(lowercase_ )
else:
UpperCAmelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowercase_ , lowercase_ , lowercase_ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase = self.process_prompts(lowercase_ )
UpperCAmelCase = self.process_prompts(lowercase_ )
if save_final and save_path is None:
UpperCAmelCase = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
else:
UpperCAmelCase = save_path + '_' + get_timestamp()
os.makedirs(lowercase_ )
UpperCAmelCase = save_path
UpperCAmelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(lowercase_ ) )
UpperCAmelCase = loop_post_process(lowercase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase_ , lowercase_ , lowercase_ ) ):
if show_intermediate:
show_pil(lowercase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(lowercase_ )} )
if show_final:
show_pil(lowercase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png" ) )
| 373 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
snake_case : Any = inspect.getfile(accelerate.test_utils )
snake_case : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
snake_case : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
snake_case : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices." )
snake_case : List[str] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices." )
snake_case : List[Any] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(F"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Union[str, Any] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase( self : int ):
"""simple docstring"""
print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" )
snake_case : List[str] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
_a : Dict = Accelerator()
_a : Optional[Any] = (accelerator.state.process_index + 2, 10)
_a : Dict = torch.randint(0, 10, shape).to(accelerator.device)
_a : Optional[Any] = ''
_a : Tuple = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_a : str = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_a : List[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 84 |
from sklearn.metrics import fa_score
import datasets
_a : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
_a : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
_a : List[Any] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[str]="binary" , UpperCAmelCase__ : str=None ):
"""simple docstring"""
snake_case : List[Any] = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__ )
return {"f1": float(UpperCAmelCase__ ) if score.size == 1 else score}
| 84 | 1 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = DDIMPipeline
_UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCamelCase : int = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
_UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : List[str] = False
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_snake_case : List[Any] = DDIMScheduler()
_snake_case : Union[str, Any] = {'unet': unet, 'scheduler': scheduler}
return components
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str]=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
_snake_case : str = torch.manual_seed(lowerCamelCase_ )
else:
_snake_case : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu'
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : int = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Dict = self.get_dummy_inputs(lowerCamelCase_ )
_snake_case : Optional[Any] = pipe(**lowerCamelCase_ ).images
_snake_case : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_snake_case : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_snake_case : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ , 1e-3 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = 'google/ddpm-cifar10-32'
_snake_case : Any = UNetaDModel.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = DDIMScheduler()
_snake_case : List[str] = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddim.to(lowerCamelCase_ )
ddim.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = torch.manual_seed(0 )
_snake_case : List[Any] = ddim(generator=lowerCamelCase_ , eta=0.0 , output_type='numpy' ).images
_snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case : Union[str, Any] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = 'google/ddpm-ema-bedroom-256'
_snake_case : Union[str, Any] = UNetaDModel.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[int] = DDIMScheduler.from_pretrained(lowerCamelCase_ )
_snake_case : str = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddpm.to(lowerCamelCase_ )
ddpm.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = torch.manual_seed(0 )
_snake_case : Optional[Any] = ddpm(generator=lowerCamelCase_ , output_type='numpy' ).images
_snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_snake_case : Dict = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
from __future__ import annotations
def _lowercase( __a : int ):
a__ =2
a__ =[]
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__a )
if n > 1:
factors.append(__a )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = {'facebook/bart-base': BartForConditionalGeneration}
__lowerCAmelCase = {'facebook/bart-base': BartTokenizer}
def _UpperCAmelCase ( ):
a_ : int = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=__A , default=__A , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=__A , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=__A , default=__A , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=__A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__A , )
parser.add_argument(
'''--config_name''' , type=__A , default=__A , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=__A , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=__A , default=__A , help='''Where to store the final ONNX file.''' )
a_ : Tuple = parser.parse_args()
return args
def _UpperCAmelCase ( __A : List[str] , __A : Dict="cpu" ):
a_ : str = model_dict[model_name].from_pretrained(__A ).to(__A )
a_ : str = tokenizer_dict[model_name].from_pretrained(__A )
if model_name in ["facebook/bart-base"]:
a_ : str = 0
a_ : Any = None
a_ : int = 0
return huggingface_model, tokenizer
def _UpperCAmelCase ( __A : int , __A : Dict , __A : str , __A : Optional[Any] , __A : Any ):
model.eval()
a_ : Union[str, Any] = None
a_ : str = torch.jit.script(BARTBeamSearchGenerator(__A ) )
with torch.no_grad():
a_ : Any = '''My friends are cool but they eat too many carbs.'''
a_ : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='''pt''' ).to(model.device )
a_ : Dict = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=__A , max_length=__A , early_stopping=__A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__A , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __A , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=__A , )
logger.info('''Model exported to {}'''.format(__A ) )
a_ : Any = remove_dup_initializers(os.path.abspath(__A ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(__A ) )
a_ : int = onnxruntime.InferenceSession(__A )
a_ : Optional[Any] = ort_sess.run(
__A , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(__A ),
'''max_length''': np.array(__A ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _UpperCAmelCase ( ):
a_ : int = parse_args()
a_ : List[str] = 5
a_ : List[str] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
a_ : Any = torch.device(args.device )
a_ , a_ : Optional[Any] = load_model_tokenizer(args.model_name_or_path , __A )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(__A )
if args.max_length:
a_ : Dict = args.max_length
if args.num_beams:
a_ : str = args.num_beams
if args.output_file_path:
a_ : str = args.output_file_path
else:
a_ : Union[str, Any] = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(__A , __A , __A , __A , __A )
if __name__ == "__main__":
main()
| 466 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
A : Any = None
A : Any = logging.get_logger(__name__)
A : int = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A : Any = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
A : List[Any] = {
"camembert-base": 5_12,
}
A : str = "▁"
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = CamembertTokenizer
def __init__( self : Optional[Any] , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[str]=None , __magic_name__ : int="<s>" , __magic_name__ : int="</s>" , __magic_name__ : Optional[Any]="</s>" , __magic_name__ : str="<s>" , __magic_name__ : Optional[Any]="<unk>" , __magic_name__ : str="<pad>" , __magic_name__ : Optional[Any]="<mask>" , __magic_name__ : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , **__magic_name__ : List[str] , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def __A ( self : Any , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self : Dict , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : int , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 706 | from __future__ import annotations
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = str(__UpperCamelCase )
return n == n[::-1]
def a__ ( __UpperCamelCase = 1_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE_ = 0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 356 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = """\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"""
SCREAMING_SNAKE_CASE_ = """\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n"""
SCREAMING_SNAKE_CASE_ = """\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def snake_case_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def snake_case_ ( self , a_ , a_ , a_=False ):
if return_pvalue:
a_ : Dict = pearsonr(a_ , a_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a_ , a_ )[0] )} | 237 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 0 |
from typing import Any
import numpy as np
def lowerCamelCase_(lowerCamelCase_ ) -> bool:
return np.array_equal(lowerCamelCase_ , matrix.conjugate().T )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Any:
UpperCAmelCase = v.conjugate().T
UpperCAmelCase = v_star.dot(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , np.ndarray )
return (v_star_dot.dot(lowerCamelCase_ )) / (v_star.dot(lowerCamelCase_ ))
def lowerCamelCase_() -> None:
UpperCAmelCase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCAmelCase = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCamelCase_ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCAmelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCamelCase_ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowerCamelCase_ , lowerCamelCase_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 457 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __magic_name__ ( A__ ):
lowercase : "DiagonalGaussianDistribution"
class __magic_name__ ( A__, A__ ):
lowercase : Union[str, Any] =True
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase__ : Tuple[int] = (64,) , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : int = 4 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : float = 0.1_82_15 , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase = Encoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , down_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , act_fn=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , double_z=UpperCamelCase__ , )
# pass init params to Decoder
UpperCAmelCase = Decoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , up_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , act_fn=UpperCamelCase__ , )
UpperCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
UpperCAmelCase = False
UpperCAmelCase = False
# only relevant if vae tiling is enabled
UpperCAmelCase = self.config.sample_size
UpperCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase = 0.25
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=False ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , (Encoder, Decoder) ):
UpperCAmelCase = value
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : bool = True ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = use_tiling
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.enable_tiling(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = True
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCAmelCase = {}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase__ , "set_processor" ):
UpperCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , UpperCamelCase__ , UpperCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return processors
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(UpperCamelCase__ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : List[Any] ):
if hasattr(UpperCamelCase__ , "set_processor" ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.set_processor(UpperCamelCase__ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , UpperCamelCase__ , UpperCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase__ , return_dict=UpperCamelCase__ )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase = [self.encoder(UpperCamelCase__ ) for x_slice in x.split(1 )]
UpperCAmelCase = torch.cat(UpperCamelCase__ )
else:
UpperCAmelCase = self.encoder(UpperCamelCase__ )
UpperCAmelCase = self.quant_conv(UpperCamelCase__ )
UpperCAmelCase = DiagonalGaussianDistribution(UpperCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase__ , return_dict=UpperCamelCase__ )
UpperCAmelCase = self.post_quant_conv(UpperCamelCase__ )
UpperCAmelCase = self.decoder(UpperCamelCase__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
@apply_forward_hook
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase = [self._decode(UpperCamelCase__ ).sample for z_slice in z.split(1 )]
UpperCAmelCase = torch.cat(UpperCamelCase__ )
else:
UpperCAmelCase = self._decode(UpperCamelCase__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = min(a.shape[2] , b.shape[2] , UpperCamelCase__ )
for y in range(UpperCamelCase__ ):
UpperCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = min(a.shape[3] , b.shape[3] , UpperCamelCase__ )
for x in range(UpperCamelCase__ ):
UpperCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
UpperCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase = []
for i in range(0 , x.shape[2] , UpperCamelCase__ ):
UpperCAmelCase = []
for j in range(0 , x.shape[3] , UpperCamelCase__ ):
UpperCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase = self.encoder(UpperCamelCase__ )
UpperCAmelCase = self.quant_conv(UpperCamelCase__ )
row.append(UpperCamelCase__ )
rows.append(UpperCamelCase__ )
UpperCAmelCase = []
for i, row in enumerate(UpperCamelCase__ ):
UpperCAmelCase = []
for j, tile in enumerate(UpperCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase__ , UpperCamelCase__ )
if j > 0:
UpperCAmelCase = self.blend_h(row[j - 1] , UpperCamelCase__ , UpperCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase__ , dim=3 ) )
UpperCAmelCase = torch.cat(UpperCamelCase__ , dim=2 )
UpperCAmelCase = DiagonalGaussianDistribution(UpperCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase = []
for i in range(0 , z.shape[2] , UpperCamelCase__ ):
UpperCAmelCase = []
for j in range(0 , z.shape[3] , UpperCamelCase__ ):
UpperCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase = self.post_quant_conv(UpperCamelCase__ )
UpperCAmelCase = self.decoder(UpperCamelCase__ )
row.append(UpperCamelCase__ )
rows.append(UpperCamelCase__ )
UpperCAmelCase = []
for i, row in enumerate(UpperCamelCase__ ):
UpperCAmelCase = []
for j, tile in enumerate(UpperCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase__ , UpperCamelCase__ )
if j > 0:
UpperCAmelCase = self.blend_h(row[j - 1] , UpperCamelCase__ , UpperCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase__ , dim=3 ) )
UpperCAmelCase = torch.cat(UpperCamelCase__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase = sample
UpperCAmelCase = self.encode(UpperCamelCase__ ).latent_dist
if sample_posterior:
UpperCAmelCase = posterior.sample(generator=UpperCamelCase__ )
else:
UpperCAmelCase = posterior.mode()
UpperCAmelCase = self.decode(UpperCamelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
| 457 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.