max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tools/process-wasm-compilation-times.py | EXHades/v8 | 20,995 | 12760997 | <filename>tools/process-wasm-compilation-times.py<gh_stars>1000+
#!/usr/bin/env python3
# Copyright 2021 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Processes {stdout} output generated by --trace-wasm-compilation-times
# for easier consumption by human readers.
import sys
def SizeInternal(number, suffix):
if suffix == "": return "%d" % number
if number < 10: return "%.1f%s" % (number, suffix)
return "%d%s" % (number, suffix)
def Size(number):
if (number < 1024): return SizeInternal(number, "")
number /= 1024
if (number < 1024): return SizeInternal(number, "K")
number /= 1024
if (number < 1024): return SizeInternal(number, "M")
number /= 1024
if (number < 1024): return SizeInternal(number, "G")
return SizeInternal(number / 1024, "T")
modules = {}
max_module = 0
total_tf_time = 0
total_tf_size = 0
def RegisterName(raw):
global max_module
parts = raw.split("#")
m = parts[0]
if m not in modules:
modules[m] = max_module
max_module += 1
def Name(raw):
parts = raw.split("#")
if len(modules) == 1: return "#%s" % parts[1]
return "m%d#%s" % (modules[parts[0]], parts[1])
class Function:
def __init__(self, index):
self.index = index
self.has_lo = False
self.has_tf = False
self.time_lo = -1
self.time_tf = -1
self.mem_lo = -1
self.mem_tf_max = -1
self.mem_tf_total = -1
self.name = ""
self.size_wasm = -1
self.size_lo = -1
self.size_tf = -1
def AddLine(self, words):
assert self.index == words[2], "wrong function"
if words[4] == "TurboFan,":
self.AddTFLine(words)
elif words[4] == "Liftoff,":
self.AddLiftoffLine(words)
else:
raise Exception("unknown compiler: %s" % words[4])
def AddTFLine(self, words):
assert not self.has_tf, "duplicate TF line for %s" % self.index
self.has_tf = True
# 0 1 2 3 4 5 6 7 8 9 10 11
# Compiled function #6 using TurboFan, took 0 ms and 14440 / 44656
# 12 13 14 15 16 17
# max/total bytes, codesize 24 name wasm-function#6
self.time_tf = int(words[6])
self.mem_tf_max = int(words[9])
self.mem_tf_total = int(words[11])
self.size_tf = int(words[15])
self.name = words[17]
def AddLiftoffLine(self, words):
assert self.index == words[2], "wrong function"
assert not self.has_lo, "duplicate Liftoff line for %s" % self.index
self.has_lo = True
# 0 1 2 3 4 5 6 7 8 9 10 11 12
# Compiled function #6 using Liftoff, took 0 ms and 968 bytes; bodysize 4
# 13 14
# codesize 68
self.time_lo = int(words[6])
self.mem_lo = int(words[9])
self.size_lo = int(words[14])
self.size_wasm = int(words[12])
def __str__(self):
return "%s: time %d %d mem %s %s %s size %s %s %s name %s" % (
Name(self.index), self.time_lo, self.time_tf,
Size(self.mem_lo), Size(self.mem_tf_max), Size(self.mem_tf_total),
Size(self.size_wasm), Size(self.size_lo), Size(self.size_tf), self.name
)
funcs_dict = {}
funcs_list = []
if len(sys.argv) < 2 or sys.argv[1] in ("-h", "--help", "help"):
print("Pass output file (generated with --trace-wasm-compilation-times) as "
"argument")
sys.exit(1)
with open(sys.argv[1], "r") as f:
for line in f.readlines():
words = line.strip().split(" ")
if words[0] != "Compiled": continue
name = words[2]
RegisterName(name)
if name in funcs_dict:
func = funcs_dict[name]
else:
func = Function(name)
funcs_dict[name] = func
funcs_list.append(func)
func.AddLine(words)
funcs_list.sort(key=lambda fun: fun.time_tf)
for f in funcs_list:
print(f)
total_tf_time += f.time_tf
total_tf_size += f.size_tf
print("Total TF time: %d" % total_tf_time)
print("Total TF size: %d" % total_tf_size)
|
official/vision/segmentation/tools/train.py | pepperonibo/Models | 294 | 12761025 | <reponame>pepperonibo/Models
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import os
import time
import numpy as np
import megengine as mge
import megengine.distributed as dist
import megengine.functional as F
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler, dataset
from megengine.data import transform as T
from megengine.optimizer import SGD
from official.vision.segmentation.tools.utils import AverageMeter, get_config_info, import_from_file
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-n", "--devices", type=int, default=8, help="batch size for training"
)
parser.add_argument(
"-d", "--dataset_dir", type=str, default="/data/datasets",
)
parser.add_argument(
"-r", "--resume", type=str, default=None, help="resume model file"
)
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
log_dir = "log-of-{}".format(os.path.basename(args.file).split(".")[0])
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
# pylint: disable=too-many-branches
def worker(args):
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
backbone_params = []
head_params = []
for name, param in model.named_parameters():
if "backbone" in name:
backbone_params.append(param)
else:
head_params.append(param)
opt = SGD(
[
{
"params": backbone_params,
"lr": model.cfg.learning_rate * dist.get_world_size() * 0.1,
},
{"params": head_params},
],
lr=model.cfg.learning_rate * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
model.parameters(),
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(model.parameters())
cur_epoch = 0
if args.resume is not None:
pretrained = mge.load(args.resume)
cur_epoch = pretrained["epoch"] + 1
model.load_state_dict(pretrained["state_dict"])
opt.load_state_dict(pretrained["opt"])
if dist.get_rank() == 0:
logger.info("load success: epoch %d", cur_epoch)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(
build_dataloader(model.cfg.batch_size, args.dataset_dir, model.cfg)
)
for epoch in range(cur_epoch, model.cfg.max_epoch):
train_one_epoch(model, train_loader, opt, gm, epoch)
if dist.get_rank() == 0:
save_path = "log-of-{}/epoch_{}.pkl".format(
os.path.basename(args.file).split(".")[0], epoch
)
mge.save({
"epoch": epoch,
"state_dict": model.state_dict(),
"opt": opt.state_dict()
}, save_path)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, data_queue, opt, gm, epoch):
def train_func(data, label):
with gm:
pred = model(data)
loss = cross_entropy(
pred, label, ignore_label=model.cfg.ignore_label
)
gm.backward(loss)
opt.step().clear_grad()
return loss
meter = AverageMeter(record_len=1)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (
model.cfg.batch_size * dist.get_world_size()
)
for step in range(tot_step):
adjust_learning_rate(opt, epoch, step, tot_step, model.cfg)
data_tik = time.time()
inputs, labels = next(data_queue)
labels = np.squeeze(labels, axis=1).astype(np.int32)
data_tok = time.time()
tik = time.time()
loss = train_func(mge.tensor(inputs), mge.tensor(labels))
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if dist.get_rank() == 0:
info_str = "e%d, %d/%d, lr:%f, "
loss_str = ", ".join(["{}:%f".format(loss) for loss in ["loss"]])
time_str = ", train_time:%.3fs, data_time:%.3fs"
log_info_str = info_str + loss_str + time_str
meter.update([loss.numpy() for loss in [loss]])
if step % log_interval == 0:
logger.info(
log_info_str,
epoch,
step,
tot_step,
opt.param_groups[1]["lr"],
*meter.average(),
*time_meter.average()
)
meter.reset()
time_meter.reset()
def adjust_learning_rate(optimizer, epoch, step, tot_step, cfg):
max_iter = cfg.max_epoch * tot_step
cur_iter = epoch * tot_step + step
cur_lr = cfg.learning_rate * (1 - cur_iter / (max_iter + 1)) ** 0.9
optimizer.param_groups[0]["lr"] = cur_lr * 0.1
optimizer.param_groups[1]["lr"] = cur_lr
def cross_entropy(pred, label, axis=1, ignore_label=255):
mask = label != ignore_label
pred = pred.transpose(0, 2, 3, 1)
return F.loss.cross_entropy(pred[mask], label[mask], axis)
def build_dataloader(batch_size, dataset_dir, cfg):
if cfg.dataset == "VOC2012":
train_dataset = dataset.PascalVOC(
dataset_dir,
cfg.data_type,
order=["image", "mask"]
)
elif cfg.dataset == "Cityscapes":
train_dataset = dataset.Cityscapes(
dataset_dir,
"train",
mode='gtFine',
order=["image", "mask"]
)
else:
raise ValueError("Unsupported dataset {}".format(cfg.dataset))
train_sampler = Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.RandomHorizontalFlip(0.5),
T.RandomResize(scale_range=(0.5, 2)),
T.RandomCrop(
output_size=(cfg.img_height, cfg.img_width),
padding_value=[0, 0, 0],
padding_maskvalue=255,
),
T.Normalize(mean=cfg.img_mean, std=cfg.img_std),
T.ToMode(),
],
order=["image", "mask"],
),
num_workers=2,
)
return train_dataloader
if __name__ == "__main__":
main()
|
src/tools/nuscenes-devkit/utils/color_map.py | jie311/TraDeS | 1,284 | 12761053 | from typing import Dict, Tuple
def get_colormap() -> Dict[str, Tuple[int, int, int]]:
"""
Get the defined colormap.
:return: A mapping from the class names to the respective RGB values.
"""
classname_to_color = { # RGB.
"noise": (0, 0, 0), # Black.
"animal": (70, 130, 180), # Steelblue
"human.pedestrian.adult": (0, 0, 230), # Blue
"human.pedestrian.child": (135, 206, 235), # Skyblue,
"human.pedestrian.construction_worker": (100, 149, 237), # Cornflowerblue
"human.pedestrian.personal_mobility": (219, 112, 147), # Palevioletred
"human.pedestrian.police_officer": (0, 0, 128), # Navy,
"human.pedestrian.stroller": (240, 128, 128), # Lightcoral
"human.pedestrian.wheelchair": (138, 43, 226), # Blueviolet
"movable_object.barrier": (112, 128, 144), # Slategrey
"movable_object.debris": (210, 105, 30), # Chocolate
"movable_object.pushable_pullable": (105, 105, 105), # Dimgrey
"movable_object.trafficcone": (47, 79, 79), # Darkslategrey
"static_object.bicycle_rack": (188, 143, 143), # Rosybrown
"vehicle.bicycle": (220, 20, 60), # Crimson
"vehicle.bus.bendy": (255, 127, 80), # Coral
"vehicle.bus.rigid": (255, 69, 0), # Orangered
"vehicle.car": (255, 158, 0), # Orange
"vehicle.construction": (233, 150, 70), # Darksalmon
"vehicle.emergency.ambulance": (255, 83, 0),
"vehicle.emergency.police": (255, 215, 0), # Gold
"vehicle.motorcycle": (255, 61, 99), # Red
"vehicle.trailer": (255, 140, 0), # Darkorange
"vehicle.truck": (255, 99, 71), # Tomato
"flat.driveable_surface": (0, 207, 191), # nuTonomy green
"flat.other": (175, 0, 75),
"flat.sidewalk": (75, 0, 75),
"flat.terrain": (112, 180, 60),
"static.manmade": (222, 184, 135), # Burlywood
"static.other": (255, 228, 196), # Bisque
"static.vegetation": (0, 175, 0), # Green
"vehicle.ego": (255, 240, 245)
}
return classname_to_color
|
qprotocal/utils/xbin.py | gorgiaxx/qq-protocal-library | 109 | 12761056 | #!/usr/bin/env python
import binascii
import hashlib
import random
class Xbin(object):
# def __init__(self):
# get random hex by length
def get_random_hex(self, length=1, is_bytes=0):
random_hex = ''
for _ in range(0, length):
random_hex += "{:0>2x}".format(random.randrange(0, 255))
if is_bytes:
return bytes().fromhex(random_hex)
else:
return random_hex
def get_md5_value(src, is_bytes=0):
md5 = hashlib.md5()
md5.update(src)
md5_digest = md5.hexdigest()
if is_bytes:
return bytes().fromhex(md5_digest)
else:
return md5_digest
|
netket/utils/struct/utils.py | gpescia/MyNetKet | 352 | 12761089 | <gh_stars>100-1000
import sys
import builtins
from dataclasses import MISSING
## STUFF FROM python/lib/dataclasses.py
def _set_new_attribute(cls, name, value):
# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
if name in cls.__dict__:
return True
setattr(cls, name, value)
return False
def _create_fn(
name, args, body, *, globals=None, locals=None, return_type=MISSING, doc=None
):
# Note that we mutate locals when exec() is called. Caller
# beware! The only callers are internal to this module, so no
# worries about external callers.
if locals is None:
locals = {}
if "BUILTINS" not in locals:
locals["BUILTINS"] = builtins
return_annotation = ""
if return_type is not MISSING:
locals["_return_type"] = return_type
return_annotation = "->_return_type"
args = ",".join(args)
body = "\n".join(f" {b}" for b in body)
# Compute the text of the entire function.
txt = f" def {name}({args}){return_annotation}:\n{body}"
local_vars = ", ".join(locals.keys())
txt = f"def __create_fn__({local_vars}):\n{txt}\n return {name}"
ns = {}
exec(txt, globals, ns) # noqa: W0122
fn = ns["__create_fn__"](**locals)
if doc is not None:
fn.__doc__ = doc
return fn
def get_class_globals(clz):
if clz.__module__ in sys.modules:
globals = sys.modules[clz.__module__].__dict__
else:
globals = {}
return globals
|
h2o-py/tests/testdir_algos/psvm/pyunit_svm_svmguide3.py | ahmedengu/h2o-3 | 6,098 | 12761110 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.psvm import H2OSupportVectorMachineEstimator
def svm_svmguide3():
svmguide3 = h2o.import_file(pyunit_utils.locate("smalldata/svm_test/svmguide3scale.svm"))
svmguide3_test = h2o.import_file(pyunit_utils.locate("smalldata/svm_test/svmguide3scale_test.svm"))
# parameters taken from libsvm guide
svm_tuned = H2OSupportVectorMachineEstimator(hyper_param=128, gamma=0.125, disable_training_metrics=False)
svm_tuned.train(y="C1", training_frame=svmguide3, validation_frame=svmguide3_test)
accuracy = svm_tuned.model_performance(valid=True).accuracy()[0][1]
assert accuracy >= 0.80 # guide has 87% - this just shows it is not completely off
if __name__ == "__main__":
pyunit_utils.standalone_test(svm_svmguide3)
else:
svm_svmguide3()
|
src/test/blocked_bad_ip.py | jalapenopuzzle/rr | 5,156 | 12761161 | from util import *
send_gdb('c')
expect_rr('EXIT-SUCCESS')
expect_gdb('SIGSEGV')
send_gdb('reverse-stepi')
expect_gdb('SIGSEGV')
send_gdb('reverse-stepi')
expect_gdb('start_thread')
ok()
|
mmgen/datasets/quick_test_dataset.py | plutoyuxie/mmgeneration | 718 | 12761188 | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.utils.data import Dataset
from .builder import DATASETS
@DATASETS.register_module()
class QuickTestImageDataset(Dataset):
"""Dataset for quickly testing the correctness.
Args:
size (tuple[int]): The size of the images. Defaults to `None`.
"""
def __init__(self, *args, size=None, **kwargs):
super().__init__()
self.size = size
self.img_tensor = torch.randn(3, self.size[0], self.size[1])
def __len__(self):
return 10000
def __getitem__(self, idx):
return dict(real_img=self.img_tensor)
|
tests/integration/test_storage_s3/s3_mocks/echo.py | pdv-ru/ClickHouse | 15,577 | 12761210 | import http.server
import sys
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_HEAD(self):
if self.path.startswith("/get-my-path/"):
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
elif self.path == "/":
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
else:
self.send_response(404)
self.send_header("Content-Type", "text/plain")
self.end_headers()
def do_GET(self):
self.do_HEAD()
if self.path.startswith("/get-my-path/"):
self.wfile.write(b'/' + self.path.split('/', maxsplit=2)[2].encode())
elif self.path == "/":
self.wfile.write(b"OK")
httpd = http.server.HTTPServer(("0.0.0.0", int(sys.argv[1])), RequestHandler)
httpd.serve_forever()
|
amadeus/airport/__init__.py | akshitsingla/amadeus-python | 125 | 12761228 | from ._predictions import AirportOnTime
__all__ = ['AirportOnTime']
|
homeassistant/components/geocaching/sensor.py | liangleslie/core | 30,023 | 12761232 | """Platform for sensor integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import cast
from geocachingapi.models import GeocachingStatus
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import GeocachingDataUpdateCoordinator
@dataclass
class GeocachingRequiredKeysMixin:
"""Mixin for required keys."""
value_fn: Callable[[GeocachingStatus], str | int | None]
@dataclass
class GeocachingSensorEntityDescription(
SensorEntityDescription, GeocachingRequiredKeysMixin
):
"""Define Sensor entity description class."""
SENSORS: tuple[GeocachingSensorEntityDescription, ...] = (
GeocachingSensorEntityDescription(
key="find_count",
name="Total finds",
icon="mdi:notebook-edit-outline",
native_unit_of_measurement="caches",
value_fn=lambda status: status.user.find_count,
),
GeocachingSensorEntityDescription(
key="hide_count",
name="Total hides",
icon="mdi:eye-off-outline",
native_unit_of_measurement="caches",
entity_registry_visible_default=False,
value_fn=lambda status: status.user.hide_count,
),
GeocachingSensorEntityDescription(
key="favorite_points",
name="Favorite points",
icon="mdi:heart-outline",
native_unit_of_measurement="points",
entity_registry_visible_default=False,
value_fn=lambda status: status.user.favorite_points,
),
GeocachingSensorEntityDescription(
key="souvenir_count",
name="Total souvenirs",
icon="mdi:license",
native_unit_of_measurement="souvenirs",
value_fn=lambda status: status.user.souvenir_count,
),
GeocachingSensorEntityDescription(
key="awarded_favorite_points",
name="Awarded favorite points",
icon="mdi:heart",
native_unit_of_measurement="points",
entity_registry_visible_default=False,
value_fn=lambda status: status.user.awarded_favorite_points,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up a Geocaching sensor entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
GeocachingSensor(coordinator, description) for description in SENSORS
)
class GeocachingSensor(
CoordinatorEntity[GeocachingDataUpdateCoordinator], SensorEntity
):
"""Representation of a Sensor."""
entity_description: GeocachingSensorEntityDescription
def __init__(
self,
coordinator: GeocachingDataUpdateCoordinator,
description: GeocachingSensorEntityDescription,
) -> None:
"""Initialize the Geocaching sensor."""
super().__init__(coordinator)
self.entity_description = description
self._attr_name = (
f"Geocaching {coordinator.data.user.username} {description.name}"
)
self._attr_unique_id = (
f"{coordinator.data.user.reference_code}_{description.key}"
)
self._attr_device_info = DeviceInfo(
name=f"Geocaching {coordinator.data.user.username}",
identifiers={(DOMAIN, cast(str, coordinator.data.user.reference_code))},
entry_type=DeviceEntryType.SERVICE,
manufacturer="Groundspeak, Inc.",
)
@property
def native_value(self) -> str | int | None:
"""Return the state of the sensor."""
return self.entity_description.value_fn(self.coordinator.data)
|
rapidsms/backends/kannel/migrations/0002_auto_20150801_2142.py | catalpainternational/rapidsms | 330 | 12761237 | <reponame>catalpainternational/rapidsms
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('kannel', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='deliveryreport',
name='message_id',
field=models.CharField(max_length=255, verbose_name='Message ID'),
preserve_default=True,
),
migrations.AlterField(
model_name='deliveryreport',
name='sms_id',
field=models.CharField(max_length=36, verbose_name='SMS ID'),
preserve_default=True,
),
migrations.AlterField(
model_name='deliveryreport',
name='smsc',
field=models.CharField(max_length=255, verbose_name='SMSC'),
preserve_default=True,
),
migrations.AlterField(
model_name='deliveryreport',
name='status',
field=models.SmallIntegerField(choices=[(1, 'Delivery Success'), (2, 'Delivery Failure'), (4, 'Message Buffered'), (8, 'SMSC Submit'), (16, 'SMSC Reject')]),
preserve_default=True,
),
]
|
caql/utils.py | deepneuralmachine/google-research | 23,901 | 12761284 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
import os
import pickle
from absl import flags
from absl import logging
import gym
import numpy as np
import tensorflow.compat.v1 as tf
from tf_agents.environments import suite_mujoco
from tf_agents.specs import array_spec
flags.DEFINE_integer('checkpoint_iterations', 50, 'Periodicity of checkpoints.')
flags.DEFINE_integer('eval_iterations', 50, 'Periodicity of evaluations.')
flags.DEFINE_integer('num_evals', 10, 'Number of evaluations.')
FLAGS = flags.FLAGS
_CHECKPOINT_FILENAME = 'model.ckpt'
def get_state_and_action_specs(gym_env, action_bounds=None):
"""Returns state and action specs for a Gym environment.
Args:
gym_env: gym.core.Env. A Gym environment.
action_bounds: list of strings. Min and max values in string for action
variables.
Returns:
(BoundedArraySpec, BoundedArraySpec). The first is a state spec and the
second is a action spec.
"""
if isinstance(gym_env.observation_space, gym.spaces.Box):
state_spec = array_spec.BoundedArraySpec(
shape=gym_env.observation_space.shape,
dtype=gym_env.observation_space.dtype,
minimum=gym_env.observation_space.low,
maximum=gym_env.observation_space.high)
else:
raise NotImplementedError(type(gym_env.observation_space))
if action_bounds:
assert len(action_bounds) == 2
action_min = np.tile(float(action_bounds[0]), gym_env.action_space.shape)
action_max = np.tile(float(action_bounds[1]), gym_env.action_space.shape)
else:
action_min = gym_env.action_space.low
action_max = gym_env.action_space.high
if isinstance(gym_env.action_space, gym.spaces.Box):
action_spec = array_spec.BoundedArraySpec(
shape=gym_env.action_space.shape,
dtype=gym_env.action_space.dtype,
minimum=action_min,
maximum=action_max)
else:
raise NotImplementedError(type(gym_env.action_space))
return state_spec, action_spec
def create_env(env_name):
"""Creates Environment."""
if env_name == 'Pendulum':
env = gym.make('Pendulum-v0')
elif env_name == 'Hopper':
env = suite_mujoco.load('Hopper-v2')
elif env_name == 'Walker2D':
env = suite_mujoco.load('Walker2d-v2')
elif env_name == 'HalfCheetah':
env = suite_mujoco.load('HalfCheetah-v2')
elif env_name == 'Ant':
env = suite_mujoco.load('Ant-v2')
elif env_name == 'Humanoid':
env = suite_mujoco.load('Humanoid-v2')
else:
raise ValueError('Unsupported environment: %s' % env_name)
return env
def _env_reset(env):
if hasattr(env, 'time_step_spec'):
return env.reset().observation
else:
return env.reset()
def _env_step(env, action):
if hasattr(env, 'time_step_spec'):
ts = env.step(action)
return ts.observation, ts.reward, env.done, env.get_info()
else:
return env.step(action)
def warm_up_replay_memory(session, behavior_policy, time_out, discount_factor,
replay_memory):
# The number of events in an epsidoe could be less than the maximum episode
# length (i.e., time_out) when the environment has a termination state.
min_replay_memory_size = FLAGS.batch_size * FLAGS.train_steps_per_iteration
while replay_memory.size < min_replay_memory_size:
num_events = min_replay_memory_size - replay_memory.size
num_episodes = int(num_events / time_out) + 1
collect_experience_parallel(num_episodes, session, behavior_policy,
time_out, discount_factor, replay_memory)
def collect_experience_parallel(num_episodes,
session,
behavior_policy,
time_out,
discount_factor,
replay_memory,
collect_init_state_step=False):
"""Executes threads for data collection."""
old_size = replay_memory.size
if num_episodes > 1:
with futures.ThreadPoolExecutor(
max_workers=FLAGS.collect_experience_parallelism) as executor:
for _ in range(num_episodes):
executor.submit(collect_experience, session, behavior_policy, time_out,
discount_factor, replay_memory, collect_init_state_step)
else:
collect_experience(session, behavior_policy, time_out, discount_factor,
replay_memory, collect_init_state_step)
return replay_memory.size - old_size
def collect_experience(session,
behavior_policy,
time_out,
discount_factor,
replay_memory,
collect_init_state_step=False):
"""Adds experiences into replay memory.
Generates an episode, computes Q targets for state and action pairs in the
episode, and adds them into the replay memory.
"""
with session.as_default():
with session.graph.as_default():
env = create_env(FLAGS.env_name)
episode, _, _ = _collect_episode(env, time_out, discount_factor,
behavior_policy, collect_init_state_step)
replay_memory.extend(episode)
if hasattr(env, 'close'):
env.close()
def _collect_episode(env, time_out, discount_factor, behavior_policy,
collect_init_state_step=False):
"""Collects episodes of trajectories by following a behavior policy."""
episode = []
episode_lengths = []
episode_rewards = []
state = _env_reset(env)
init_state = _env_reset(env)
done = False
episode_step_count = 0
e_reward = 0
for _ in range(time_out):
# First, sample an action
action = behavior_policy.action(state, use_action_function=True)
if action is None:
break
next_state, reward, done, info = _env_step(env, action)
reward = reward if not done else 0.0
# Save the experience to our buffer
if collect_init_state_step:
episode.append([
init_state, state, action, reward, next_state, episode_step_count,
done, info
])
else:
episode.append([state, action, reward, next_state, done, info])
# update state, e_reward and step count
state = next_state
if discount_factor < 1:
e_reward += (discount_factor**episode_step_count) * reward
else:
e_reward += reward
episode_step_count += 1
if done:
break
if episode_step_count > 0:
episode_lengths.append(episode_step_count)
episode_rewards.append(e_reward)
return (episode, episode_lengths, episode_rewards)
def periodic_updates(iteration,
train_step,
replay_memories,
greedy_policy,
saver,
sess,
time_out,
use_action_function=True,
tf_summary=None):
"""Evaluates the algorithm."""
if (FLAGS.checkpoint_dir and FLAGS.checkpoint_iterations and
iteration % FLAGS.checkpoint_iterations == 0):
logging.info('Iteration: %d, writing checkpoints..', iteration)
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
checkpoint_file = os.path.join(FLAGS.checkpoint_dir, _CHECKPOINT_FILENAME)
saver.save(
sess, checkpoint_file, global_step=train_step, write_meta_graph=False)
for replay_memory in replay_memories:
replay_memory.save(FLAGS.checkpoint_dir, delete_old=True)
logging.info('Iteration: %d, completed writing checkpoints.', iteration)
if FLAGS.eval_iterations and iteration % FLAGS.eval_iterations == 0:
logging.info('Iteration: %d, evaluating the model..', iteration)
scores = []
action_magnitudes = []
episode_lens = []
future_list = []
with futures.ThreadPoolExecutor(max_workers=FLAGS.num_evals) as executor:
for _ in range(FLAGS.num_evals):
future_list.append(
executor.submit(
_evaluate_model,
time_out,
greedy_policy,
use_action_function=use_action_function,
render=False))
for future in futures.as_completed(future_list):
score, action_magnitude, episode_len = future.result()
scores.append(score)
action_magnitudes.append(action_magnitude)
episode_lens.append(episode_len)
avg_score = np.mean(scores)
avg_action_magitude = np.mean(action_magnitudes)
avg_episode_len = np.mean(episode_lens)
logging.info(
'Iteration: %d, avg_score: %.3f, avg_episode_len: %.3f, '
'avg_action_magnitude: %.3f', iteration, avg_score, avg_episode_len,
avg_action_magitude)
if tf_summary:
tf_summary.value.extend([
tf.Summary.Value(tag='avg_score', simple_value=avg_score),
tf.Summary.Value(
tag='avg_action_magnitude', simple_value=avg_action_magitude),
tf.Summary.Value(tag='avg_episode_len', simple_value=avg_episode_len)
])
def _evaluate_model(time_out,
greedy_policy,
use_action_function=False,
render=False):
"""Evaluates the model."""
env = create_env(FLAGS.env_name)
state = _env_reset(env)
total_reward = 0.0
total_action_magnitude = 0.0
episode_len = 0
for _ in range(time_out):
if render:
env.render()
action = greedy_policy.action(
np.reshape(state, [1, -1]), use_action_function)
if action is None:
break
next_state, reward, done, _ = _env_step(env, action)
state = next_state
total_reward += reward
if greedy_policy.continuous_action:
total_action_magnitude += np.linalg.norm(action, np.inf)
episode_len += 1
if done:
break
return total_reward, total_action_magnitude / episode_len, episode_len
def save_hparam_config(dict_to_save, config_dir):
"""Saves config file of hparam."""
filename = os.path.join(config_dir, 'hparam.pickle')
print('Saving results to %s' % filename)
if not tf.gfile.Exists(config_dir):
tf.gfile.MakeDirs(config_dir)
with tf.gfile.GFile(filename, 'w') as f:
pickle.dump(dict_to_save, f, protocol=2)
def action_projection(action, action_spec, softmax=False):
"""Projects action tensor onto a bound."""
if isinstance(action, np.ndarray):
if softmax:
e_x = np.exp(action - np.max(action, axis=1))
return e_x / np.sum(e_x, axis=1)
else:
return np.minimum(action_spec.maximum,
np.maximum(action_spec.minimum, action))
else:
# TF version
if softmax:
return tf.nn.softmax(action, axis=1)
else:
return tf.minimum(action_spec.maximum,
tf.maximum(action_spec.minimum, action))
def create_placeholders_for_q_net(tf_vars):
"""Creates placeholders for feeding values to TF variables.
Args:
tf_vars: list. A list of TF variables. These are variables for a neural
network approximating a Q function.
Returns:
dict. A dictionary mapping a string to a tf.placeholder.
"""
ph_dict = {}
for var in tf_vars:
ph_dict['{}_ph'.format(var.name)] = tf.placeholder(
dtype=var.dtype, shape=var.shape)
return ph_dict
def build_dummy_q_net(state, action, ph_dict, q_net_vars):
"""Builds a dummy Q network.
This function builds a neural network where parameters are given by
placeholders.
Args:
state: TF Tensor. State tensor.
action: TF Tensor. Action tensor.
ph_dict: dict. A dictionary mapping a TF variable's name to a
tf.placeholder. There is one placeholder for each variable in
`q_net_vars`.
q_net_vars: list. A list of TF variables. The list should have even number
of variables. One for weights and other for bias for each layer of a
neural network.
Returns:
TF Tensor. Output tensor of a Q network.
"""
assert bool(q_net_vars) and len(q_net_vars) % 2 == 0
net = tf.concat([state, action], axis=1)
# Specific for MLP
for itr, var in enumerate(q_net_vars):
if itr % 2 == 0:
# even itr, multiplicative weights
net = tf.einsum('ij,jk->ik', net, ph_dict['{}_ph'.format(var.name)])
else:
# odd itr, additive weights
net = tf.nn.bias_add(net, ph_dict['{}_ph'.format(var.name)])
# Output layer doesn't have an activation function.
if itr < len(q_net_vars) - 1:
net = tf.nn.relu(net)
return net
def make_tf_summary_histogram(values, num_bins=10):
"""Constructs a tf Summary of type histogram from a np array of values.
Args:
values: list or np.array.
num_bins: int. Number of histogram bins.
Returns:
tf.HistogramProto.
"""
values = np.reshape(values, [-1])
counts, limits = np.histogram(values, bins=num_bins)
return tf.HistogramProto(
min=np.amin(values),
max=np.amax(values),
num=values.size,
sum=np.sum(values),
sum_squares=np.sum(values**2),
bucket_limit=limits.tolist()[1:],
bucket=counts.tolist())
|
externals/skia/third_party/externals/sfntly/cpp/tools/utils.py | terrajobst/linux-packaging-skiasharp | 2,151 | 12761297 | <gh_stars>1000+
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limit
"""Common utility functions used by multiple scripts."""
import os
def GetFontList(path, exts, negate=False):
"""Recursively gets the list of files that from path such that."""
# negate = False: files that match one of the extensions in exts.
# negate = True: files that match no extension in exts.
paths = []
# for root, dirs, files in os.walk(path): makes the lint tool unhappy
# because of dirs being unused :(
for entry in os.walk(path):
root = entry[0]
files = entry[2]
for path in files:
has_ext_list = map(lambda ext: path[-len(ext):] == ext, exts)
result = reduce(lambda a, h: a or h, has_ext_list, False)
# normal: we want to include a file that matches at least one extension
# negated: we want to include a file that matches no extension
if negate != result:
paths.append(os.path.join(root, path))
return paths
def GetLevelList(path, max_level=1, negate=False):
"""Recursively gets the list of files that from path such that."""
# negate = False: files that are at most |max_level|s deep.
# negate = True: files that are more than |max_level|s deep.
paths = []
for entry in os.walk(path):
root = entry[0]
files = entry[2]
for path in files:
root_path = os.path.join(root, path)
level = path.count(os.path.sep)
if (not negate and level <= max_level) or (negate and level > max_level):
paths.append(root_path)
return paths
def FixPath(path):
if path[-1] != '/':
return path + '/'
return path
|
payloads/promethea.py | k3170makan/PyMLProjects | 156 | 12761311 | #!/usr/bin/python
import numpy
from random import random
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from sys import argv
from sys import stdout
from sys import exit
import model
from model import PasswordLSTM
"""
Promethea - a mysical female half-god who walks between the real and the immateira
(the realm of the idealistic real) granting man kind access to this magical realm that makes anything possible.
Promethea is meant to be a simple front end to making use of the LSTM stuff to plugin into other tools
like Burp, ZapProxy, Terminal etc all you do is call this script give it a payload and it returns the autocomplete
according to the way you trained it and the weight file you give it.
class Promethea:
def __init__(self,payload_filename, - name of file with the payloads used to train
weights_filename, - name of file with trained up weights in
payload, - stirng of charaters for the seed of predicition
nchars - number of characters to predict):
Fuzzing with Promethea:
1 - payload "autocomplete" mode (here's some input that is well formed, what do you think would be a good
way to complete this IF IT WERE a payload actually?)
2 - blind payload generation (just spit out what you know to spit out)
"""
class Promethea:
def __init__(self,payload_filename,weights_filename,payload,nchars):
self.payload_filename = payload_filename
self.weights_filename = weights_filename
self.prep_data(self.payload_filename,payload)
self.init_payload = self.payload
self.lstm = PasswordLSTM(self.X,self.y)
self.lstm.load_weights(weights_filename)
self.predict_length = nchars
"""
Returns next character in sequence prediction
Args:
current_sequence (char) - sequence to predict from
Returns:
(char) - next character in sequence
"""
def predict(self):
return self.get_next(self.init_payload)
def get_next(self,seed):
outstring = ""
for i in range(self.predict_length):
x = numpy.reshape(seed,(1,len(seed),1))
x = x / float(self.n_vocab)
prediction = self.lstm.predict(x,verbose=0)
index = numpy.argmax(prediction)
result = self.int_to_char[index]
outstring = outstring + result
seed.append(index)
seed = seed[1:len(seed)]
return outstring
"""
prep_data(data_filename,
payload)
Prepares the data to feed to the nextwork for prediction
The Keras Sequential model needs a presentation of the vocab we taught it to generate from,
essentially it only spits out character positions in a table of all possible characters - so if you want
her to speak payloads you need to give her that list of chars she as trained on.
Args:
input_file (string) - list of payloads promethea was trained on (we might move over to a simpler
vocab reload mechanism perhaps since this is annoying)
Returns:
(x <list>) - x a hot encoding of the vocabulary holding initial character sequences
"""
def prep_data(self,data_filename,payload):
seq_length = model.SEQ_LEN #need to make this SEQ_LEN an LSTM attribute rather than model level one
raw_text = open(data_filename).read()
self.chars = sorted(list(set(raw_text)))
self.n_chars = len(raw_text)
self.n_vocab = len(self.chars)
self.int_to_char = dict((i,c) for i,c in enumerate(self.chars))
self.char_to_int = dict((c,i) for i,c in enumerate(self.chars))
self.payload = [self.char_to_int[char] for char in payload]
dataX = []
dataY = []
for i in range(self.n_chars - seq_length):
seq_in = raw_text[i:i + seq_length]
seq_out = raw_text[i + seq_length]
dataX.append([self.char_to_int[char] for char in seq_in])
dataY.append(self.char_to_int[seq_out])
self.n_patterns = len(dataX)
X = numpy.reshape(dataX,(self.n_patterns,seq_length,1))
self.X = X / float(self.n_vocab)
self.y = np_utils.to_categorical(dataY)
if __name__=="__main__":
seq_length = model.SEQ_LEN
#ill modularize this eventually
if len(argv) != 5:
print "Usage: %s [payload] [nchars] [data file] [weights filename]" % (argv[0])
print "Example: %s 'javascript' 100 awesome_polyglots.txt weights-for-generating-xss-payloads.txt" % (argv[0])
print "Example: %s 'body onload=' 100 more_polyglots.txt weights-for-generating-phpxss.txt" % (argv[0])
exit(1)
payload = argv[1]
print "[*] Seed: '%s'\n" % payload
nchars = int(argv[2])
data_filename = argv[3]
#generate using LSTM network
weights_filename = argv[4]
promethea = Promethea(data_filename,weights_filename,payload,nchars)
print promethea.predict()
|
src/ralph/assets/models/assets.py | pinoatrome/ralph | 1,668 | 12761323 | <filename>src/ralph/assets/models/assets.py
# -*- coding: utf-8 -*-
import datetime
import logging
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
from ralph.accounts.models import Team
from ralph.admin.autocomplete import AutocompleteTooltipMixin
from ralph.assets.models.base import BaseObject
from ralph.assets.models.choices import (
ModelVisualizationLayout,
ObjectModelType
)
from ralph.lib.custom_fields.models import (
CustomFieldMeta,
WithCustomFieldsMixin
)
from ralph.lib.mixins.fields import NullableCharField
from ralph.lib.mixins.models import (
AdminAbsoluteUrlMixin,
NamedMixin,
PriceMixin,
TimeStampMixin
)
from ralph.lib.permissions import PermByFieldMixin
from ralph.lib.permissions.models import PermissionsBase
logger = logging.getLogger(__name__)
class AssetHolder(
AdminAbsoluteUrlMixin,
NamedMixin.NonUnique,
TimeStampMixin,
models.Model
):
pass
class BusinessSegment(AdminAbsoluteUrlMixin, NamedMixin, models.Model):
pass
class ProfitCenter(AdminAbsoluteUrlMixin, NamedMixin, models.Model):
description = models.TextField(blank=True)
class Environment(
AdminAbsoluteUrlMixin, NamedMixin, TimeStampMixin, models.Model
):
pass
class Service(
PermByFieldMixin,
AdminAbsoluteUrlMixin,
NamedMixin,
TimeStampMixin,
models.Model
):
# Fixme: let's do service catalog replacement from that
_allow_in_dashboard = True
active = models.BooleanField(default=True)
uid = NullableCharField(max_length=40, unique=True, blank=True, null=True)
profit_center = models.ForeignKey(ProfitCenter, null=True, blank=True)
business_segment = models.ForeignKey(BusinessSegment, null=True, blank=True)
cost_center = models.CharField(max_length=100, blank=True)
environments = models.ManyToManyField(
'Environment', through='ServiceEnvironment'
)
business_owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='services_business_owner',
blank=True,
)
technical_owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='services_technical_owner',
blank=True,
)
support_team = models.ForeignKey(
Team, null=True, blank=True, related_name='services',
)
def __str__(self):
return '{}'.format(self.name)
@classmethod
def get_autocomplete_queryset(cls):
return cls._default_manager.filter(active=True)
class ServiceEnvironment(
AdminAbsoluteUrlMixin,
AutocompleteTooltipMixin,
BaseObject
):
_allow_in_dashboard = True
service = models.ForeignKey(Service)
environment = models.ForeignKey(Environment)
autocomplete_tooltip_fields = [
'service__business_owners',
'service__technical_owners',
'service__support_team',
]
def __str__(self):
return '{} - {}'.format(self.service.name, self.environment.name)
class Meta:
unique_together = ('service', 'environment')
ordering = ('service__name', 'environment__name')
@property
def service_name(self):
return self.service.name
@property
def service_uid(self):
return self.service.uid
@property
def environment_name(self):
return self.environment.name
@classmethod
def get_autocomplete_queryset(cls):
return cls._default_manager.filter(service__active=True)
class ManufacturerKind(AdminAbsoluteUrlMixin, NamedMixin, models.Model):
pass
class Manufacturer(
AdminAbsoluteUrlMixin,
NamedMixin,
TimeStampMixin,
models.Model
):
_allow_in_dashboard = True
manufacturer_kind = models.ForeignKey(
ManufacturerKind, verbose_name=_('manufacturer kind'),
null=True,
blank=True,
on_delete=models.SET_NULL,
)
AssetModelMeta = type('AssetModelMeta', (CustomFieldMeta, PermissionsBase), {})
class AssetModel(
PermByFieldMixin,
NamedMixin.NonUnique,
TimeStampMixin,
AdminAbsoluteUrlMixin,
WithCustomFieldsMixin,
models.Model,
metaclass=AssetModelMeta
):
# TODO: should type be determined based on category?
_allow_in_dashboard = True
type = models.PositiveIntegerField(
verbose_name=_('type'), choices=ObjectModelType(),
)
manufacturer = models.ForeignKey(
Manufacturer, on_delete=models.PROTECT, blank=True, null=True
)
category = TreeForeignKey(
'Category', null=True, related_name='models'
)
power_consumption = models.PositiveIntegerField(
verbose_name=_("Power consumption"),
default=0,
)
height_of_device = models.FloatField(
verbose_name=_("Height of device"),
default=0,
validators=[MinValueValidator(0)],
)
cores_count = models.PositiveIntegerField(
verbose_name=_("Cores count"),
default=0,
)
visualization_layout_front = models.PositiveIntegerField(
verbose_name=_("visualization layout of front side"),
choices=ModelVisualizationLayout(),
default=ModelVisualizationLayout().na.id,
blank=True,
)
visualization_layout_back = models.PositiveIntegerField(
verbose_name=_("visualization layout of back side"),
choices=ModelVisualizationLayout(),
default=ModelVisualizationLayout().na.id,
blank=True,
)
# Used in the visualization Data Center as is_blade
has_parent = models.BooleanField(default=False)
class Meta:
verbose_name = _('model')
verbose_name_plural = _('models')
def __str__(self):
if self.category_id:
return '[{}] {} {}'.format(
self.category, self.manufacturer, self.name
)
else:
return '{} {}'.format(
self.manufacturer, self.name
)
def _get_layout_class(self, field):
item = ModelVisualizationLayout.from_id(field)
return getattr(item, 'css_class', '')
def get_front_layout_class(self):
return self._get_layout_class(self.visualization_layout_front)
def get_back_layout_class(self):
return self._get_layout_class(self.visualization_layout_back)
class Category(
AdminAbsoluteUrlMixin,
MPTTModel,
NamedMixin.NonUnique,
TimeStampMixin,
models.Model
):
_allow_in_dashboard = True
code = models.CharField(max_length=4, blank=True, default='')
parent = TreeForeignKey(
'self',
null=True,
blank=True,
related_name='children',
db_index=True
)
imei_required = models.BooleanField(default=False)
allow_deployment = models.BooleanField(default=False)
show_buyout_date = models.BooleanField(default=False)
default_depreciation_rate = models.DecimalField(
blank=True,
decimal_places=2,
default=settings.DEFAULT_DEPRECIATION_RATE,
help_text=_(
'This value is in percentage.'
' For example value: "100" means it depreciates during a year.'
' Value: "25" means it depreciates during 4 years, and so on... .'
),
max_digits=5,
)
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
class MPTTMeta:
order_insertion_by = ['name']
def __str__(self):
return self.name
def get_default_depreciation_rate(self, category=None):
if category is None:
category = self
if category.default_depreciation_rate:
return category.default_depreciation_rate
elif category.parent:
return self.get_default_depreciation_rate(category.parent)
return 0
class AssetLastHostname(models.Model):
prefix = models.CharField(max_length=30, db_index=True)
counter = models.PositiveIntegerField(default=1)
postfix = models.CharField(max_length=30, db_index=True)
class Meta:
unique_together = ('prefix', 'postfix')
def formatted_hostname(self, fill=5):
return '{prefix}{counter:0{fill}}{postfix}'.format(
prefix=self.prefix,
counter=int(self.counter),
fill=fill,
postfix=self.postfix,
)
@classmethod
# TODO: select_for_update
def increment_hostname(cls, prefix, postfix=''):
obj, created = cls.objects.get_or_create(
prefix=prefix,
postfix=postfix,
)
if not created:
# F() avoid race condition problem
obj.counter = models.F('counter') + 1
obj.save()
return cls.objects.get(pk=obj.pk)
else:
return obj
@classmethod
def get_next_free_hostname(
cls, prefix, postfix, fill=5, availability_checker=None, _counter=1
):
try:
last_hostname = cls.objects.get(prefix=prefix, postfix=postfix)
except cls.DoesNotExist:
last_hostname = cls(prefix=prefix, postfix=postfix, counter=0)
last_hostname.counter += _counter
hostname = last_hostname.formatted_hostname(fill=fill)
if availability_checker is None or availability_checker(hostname):
return hostname
else:
return cls.get_next_free_hostname(
prefix, postfix, fill, availability_checker, _counter + 1
)
def __str__(self):
return self.formatted_hostname()
class BudgetInfo(
AdminAbsoluteUrlMixin,
NamedMixin,
TimeStampMixin,
models.Model
):
class Meta:
verbose_name = _('Budget info')
verbose_name_plural = _('Budgets info')
def __str__(self):
return self.name
class Asset(AdminAbsoluteUrlMixin, PriceMixin, BaseObject):
model = models.ForeignKey(
AssetModel, related_name='assets', on_delete=models.PROTECT
)
# TODO: unify hostname for DCA, VirtualServer, Cluster and CloudHost
# (use another model?)
hostname = NullableCharField(
blank=True,
default=None,
max_length=255,
null=True,
verbose_name=_('hostname'), # TODO: unique
)
sn = NullableCharField(
blank=True,
max_length=200,
null=True,
verbose_name=_('SN'),
unique=True,
)
barcode = NullableCharField(
blank=True,
default=None,
max_length=200,
null=True,
unique=True,
verbose_name=_('barcode')
)
niw = NullableCharField(
blank=True,
default=None,
max_length=200,
null=True,
verbose_name=_('inventory number'),
)
required_support = models.BooleanField(default=False)
order_no = models.CharField(
verbose_name=_('order number'),
blank=True,
max_length=50,
null=True,
)
invoice_no = models.CharField(
verbose_name=_('invoice number'),
blank=True,
db_index=True,
max_length=128,
null=True,
)
invoice_date = models.DateField(blank=True, null=True)
# to discuss: foreign key?
provider = models.CharField(
blank=True,
max_length=100,
null=True,
)
depreciation_rate = models.DecimalField(
blank=True,
decimal_places=2,
default=settings.DEFAULT_DEPRECIATION_RATE,
help_text=_(
'This value is in percentage.'
' For example value: "100" means it depreciates during a year.'
' Value: "25" means it depreciates during 4 years, and so on... .'
),
max_digits=5,
)
force_depreciation = models.BooleanField(
help_text=(
'Check if you no longer want to bill for this asset'
),
default=False,
)
depreciation_end_date = models.DateField(blank=True, null=True)
buyout_date = models.DateField(blank=True, null=True, db_index=True)
task_url = models.URLField(
blank=True,
help_text=('External workflow system URL'),
max_length=2048,
null=True,
)
budget_info = models.ForeignKey(
BudgetInfo,
blank=True,
default=None,
null=True,
on_delete=models.PROTECT,
)
property_of = models.ForeignKey(
AssetHolder,
on_delete=models.PROTECT,
null=True,
blank=True,
)
start_usage = models.DateField(
blank=True,
null=True,
help_text=(
'Fill it if date of first usage is different then date of creation'
)
)
def __str__(self):
return self.hostname or ''
def calculate_buyout_date(self):
"""
Get buyout date.
Calculate buyout date:
invoice_date + depreciation_rate months + custom buyout date delay
Returns:
Deprecation date
"""
if self.depreciation_end_date:
return self.depreciation_end_date
elif self.invoice_date:
months = self.get_depreciation_months() + 1 + \
settings.ASSET_BUYOUT_DELAY_MONTHS
return self.invoice_date + relativedelta(months=months)
else:
return None
def get_depreciation_months(self):
return int(
(1 / (self.depreciation_rate / 100) * 12)
if self.depreciation_rate else 0
)
def is_depreciated(self, date=None):
date = date or datetime.date.today()
if self.force_depreciation or not self.invoice_date:
return True
if self.depreciation_end_date:
deprecation_date = self.deprecation_end_date
else:
deprecation_date = self.invoice_date + relativedelta(
months=self.get_depreciation_months(),
)
return deprecation_date < date
def get_depreciated_months(self):
# DEPRECATED
# BACKWARD_COMPATIBILITY
return self.get_depreciation_months()
def is_deprecated(self, date=None):
# DEPRECATED
# BACKWARD_COMPATIBILITY
return self.is_depreciated()
def _liquidated_at(self, date):
liquidated_history = self.get_history().filter(
new_value='liquidated',
field_name='status',
).order_by('-date')[:1]
return liquidated_history and liquidated_history[0].date.date() <= date
def clean(self):
if not self.sn and not self.barcode:
error_message = [_('SN or BARCODE field is required')]
raise ValidationError(
{
'sn': error_message,
'barcode': error_message
}
)
def save(self, *args, **kwargs):
# if you save barcode as empty string (instead of None) you could have
# only one asset with empty barcode (because of `unique` constraint)
# if you save barcode as None you could have many assets with empty
# barcode (becasue `unique` constrainst is skipped)
for unique_field in ['barcode', 'sn']:
value = getattr(self, unique_field, None)
if value == '':
value = None
setattr(self, unique_field, value)
if not self.buyout_date:
self.buyout_date = self.calculate_buyout_date()
return super(Asset, self).save(*args, **kwargs)
|
wouso/games/grandchallenge/models.py | AlexandruGhergut/wouso | 117 | 12761332 | from django.db import models
from django.db.models import Q, Max
import logging
from wouso.core.config.models import IntegerSetting
from wouso.core.game.models import Game
from wouso.core.user.models import Player
from wouso.games.challenge.models import Challenge, ChallengeUser
class GrandChallengeUser(Player):
""" Extension of the user profile for GrandChallenge """
lost = models.IntegerField(default=0)
last_round = models.IntegerField(default=0)
def get_challenges(self):
"""
Return a queryset of grandchallenges for this player
"""
return Challenge.objects.filter(id__in=GrandChallenge.objects.filter(Q(challenge__user_from__user__id=self.id)|Q(challenge__user_to__user__id=self.id)).order_by('round').values('challenge'))
def get_active(self):
"""
Return a list of active GrandChallenges for this user
"""
return self.get_challenges().filter(status='A')
def get_played(self):
"""
Return a list of played GrandChallenges, ordered by round
"""
return self.get_challenges().filter(status__in=('D', 'P'))
def increase_lost(self):
self.lost += 1
self.save()
def set_last_round(self, round_number):
self.last_round = round_number
self.save()
class GrandChallenge(models.Model):
challenge = models.ForeignKey(Challenge, blank=True, null=True)
round = models.IntegerField(blank=True, null=True)
ALL = []
OUT_PLAY = []
CHALLENGES= []
def __oldinit__(self, user_from, user_to):
# TODO: change this constructor to a classmethod
if not GrandChallengeGame.is_final() and not GrandChallengeGame.is_winner():
self.branch = max(user_from.lost, user_to.lost)
else:
self.branch = min(user_from.lost, user_to.lost)
self.user_from = user_from
self.user_to = user_to
self.__class__.ALL.append(self)
self.won, self.lost = None, None
self.active = True
self.round_number = None
challenge_user_to = user_to.user.get_profile().get_extension(ChallengeUser)
challenge_user_from = user_from.user.get_profile().get_extension(ChallengeUser)
chall = Challenge.create(challenge_user_from, challenge_user_to)
chall.accept()
self.challenge_id = chall.id
self.__class__.CHALLENGES.append(chall.id)
@classmethod
def create(cls, user_from, user_to, round):
""" Create a new Challenge and automatically accept it.
"""
grand_challenge = cls.objects.create(round=round)
user_from = user_from.user.get_profile()
user_to = user_to.user.get_profile()
grand_challenge.challenge = Challenge.create(user_from.get_extension(ChallengeUser), user_to.get_extension(ChallengeUser))
grand_challenge.challenge.accept()
grand_challenge.save()
return grand_challenge
@classmethod
def get_challenges(cls):
return cls.ALL
@classmethod
def active(cls):
return filter(lambda c: c.active, cls.ALL)
@classmethod
def all_done(cls):
for i in cls.CHALLENGES:
x = Challenge.objects.get(id = i)
if x.status != "P":
return False
return True
def play(self, round_number):
winner = Challenge.objects.get(id= self.challenge_id).winner #trebuie generat de joc
if winner.user == self.user_from.user:
self.won = self.user_from
self.lost = self.user_to
self.user_to.lost += 1
else:
self.won = self.user_to
self.lost = self.user_from
self.user_from.lost += 1
self.active = False
self.round_number = round_number
@classmethod
def played_with(cls, user):
ret = []
for c in [c for c in cls.ALL if not c.active]:
if c.user_from == user:
ret.append(c.user_to)
elif c.user_to == user:
ret.append(c.user_from)
return ret
@classmethod
def joaca(cls, round_number):
for c in GrandChallenge.active():
#numarul rundei...
c.play(round_number)
if(c.lost.lost == 2):
cls.OUT_PLAY.append(c.lost)
#print c.lost
@classmethod
def clasament(cls):
arb_win = GrandChallengeGame.eligible(0)
arb_lose = GrandChallengeGame.eligible(1)
if(len(arb_win) == 1):
cls.OUT_PLAY.append(arb_win[0])
if(len(arb_lose) == 1):
cls.OUT_PLAY.append(arb_lose[0])
results = cls.OUT_PLAY
results.reverse()
return results
class Round(object):
def __init__(self, round_number):
self.round_number = int(round_number)
def challenges(self):
"""
Return a list of challenges in this round, ordered by status
"""
return [gc.challenge for gc in GrandChallenge.objects.filter(round=self.round_number).order_by('challenge__status')]
def info(self):
"""
Return a dictionary with information about this round
"""
return {}
def participants(self):
ps = set([c.user_from.user for c in self.challenges()] + [c.user_to.user for c in self.challenges()])
ps = map(lambda a: a.get_extension(GrandChallengeUser), ps)
return ps
def rounds(self):
"""
Return a list of previous rounds, as an iterator
"""
if self.round_number > 0:
for i in range(self.round_number):
yield Round(i + 1)
def __repr__(self):
return '<' + 'Round ' + unicode(self.round_number) + '>'
class GrandChallengeGame(Game):
ALL = []
round_number = 0
def __init__(self, *args, **kwargs):
# Set parent's fields
self._meta.get_field('verbose_name').default = "GrandChallenges"
self._meta.get_field('short_name').default = ""
# the url field takes as value only a named url from module's urls.py
self._meta.get_field('url').default = "grandchallenge_index_view"
super(GrandChallengeGame, self).__init__(*args, **kwargs)
@classmethod
def base_query(cls):
return GrandChallengeUser.objects.exclude(user__is_superuser=True).exclude(race__can_play=False)
@classmethod
def is_started(cls):
setting_round = IntegerSetting.get('gc_round')
return setting_round.get_value() > 0
@classmethod
def reset(cls):
"""
Reset a GC game, set every user lost to 0
"""
GrandChallenge.objects.all().delete()
GrandChallengeUser.objects.update(lost=0, last_round=0)
cls.set_current_round(0)
@classmethod
def create_users(cls):
"""
Create GrandChallengeUser extensions for all eligibile players.
"""
for p in Player.objects.exclude(race__can_play=False):
p.get_extension(GrandChallengeUser)
@classmethod
def start(cls):
"""
Create challenges for each consecutive players. Return a list of created challenges.
"""
cls.create_users()
challenges = []
round = 1
last = None
for user in cls.base_query():
u = user.user.get_profile()
if last is None:
last = u
else:
c = GrandChallenge.create(u, last, round)
challenges.append(c)
last = None
setting_round = IntegerSetting.get('gc_round')
setting_round.set_value(round)
return challenges
@classmethod
def eligible(cls, lost_count):
""" Return a queryset with players of lost_count
"""
return cls.base_query().filter(lost=lost_count)
@classmethod
def is_final(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
if (len(arb_win) == 1) and (len(arb_lose) == 1):
return True
return False
@classmethod
def final_round(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
GrandChallenge(arb_win[0], arb_lose[0])
@classmethod
def final_second_round(cls):
GrandChallengeGame.play_round(1)
@classmethod
def is_winner(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
if (len(arb_win) == 0) and (len(arb_lose) == 2):
return False
return True
@classmethod
def is_finished(cls):
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
if len(arb_win) == 0 or (len(arb_win) == 1 and len(arb_lose) != 1):
return True
return False
@classmethod
def play_round(cls, lost_count, round_number):
"""
Create new challenges.
"""
if lost_count == 0:
all = GrandChallengeGame.eligible(0)
elif lost_count == 1:
all = GrandChallengeGame.eligible(1)
all = list(all)
challenges = []
while len(all):
u = all[0]
played_with = GrandChallenge.played_with(u)
adversari = [eu for eu in all if ((eu.lost == u.lost) and (eu != u) and ((eu not in played_with) or (eu == all[-1])) )]
if not len(adversari):
break
try:
adversar = adversari[0]
all.remove(adversar)
all.remove(u)
c = GrandChallenge.create(u, adversar, round_number)
challenges.append(c)
except Exception as e:
logging.exception(e)
return challenges
@classmethod
def set_current_round(cls, number):
setting_round = IntegerSetting.get('gc_round')
setting_round.set_value(number)
@classmethod
def get_current_round(cls):
setting_round = IntegerSetting.get('gc_round')
round = setting_round.get_value()
if round == 0:
return None
return cls.get_round(round)
@classmethod
def get_round(cls, round):
return Round(round_number=round)
@classmethod
def get_winner(cls):
"""
Return gc winner
"""
if cls.is_finished():
final_gc = GrandChallenge.objects.filter(round=cls.get_current_round().round_number)[0]
return final_gc.challenge.winner.user.get_profile()
return None
@classmethod
def force_round_close(cls, round):
"""
Finish every challenge in the round
"""
for c in round.challenges():
if c.is_runnable():
c.set_expired()
if c.is_draw():
# Temporary hack FIXME
if c.user_from.seconds_took < c.user_to.seconds_took:
c.set_won_by_player(c.user_from.user)
else:
c.set_won_by_player(c.user_to.user)
gc_user_from = c.user_from.user.get_extension(GrandChallengeUser)
gc_user_to = c.user_to.user.get_extension(GrandChallengeUser)
# Upgrade lost count
if c.user_from.user == c.winner:
if gc_user_to.last_round < round.round_number:
gc_user_to.increase_lost()
elif c.user_to.user == c.winner:
if gc_user_from.last_round < round.round_number:
gc_user_from.increase_lost()
gc_user_from.set_last_round(round.round_number)
gc_user_to.set_last_round(round.round_number)
@classmethod
def round_next(cls):
"""
Progress to next round
"""
if cls.is_finished():
logging.error('Grand challenge finished.')
return None
round = cls.get_current_round()
cls.force_round_close(round)
challenges = []
if cls.is_final():
# Only two players left in the game
arb_win = cls.eligible(0)
arb_lose = cls.eligible(1)
challenges.append(GrandChallenge.create(arb_win[0], arb_lose[0], round.round_number + 1))
else:
# More than two players, create new challenges
if round.round_number % 2 == 1:
challenges += cls.play_round(1, round.round_number + 1)
challenges += cls.play_round(0, round.round_number + 1)
else:
challenges += cls.play_round(1, round.round_number + 1)
if challenges:
# Update round number
round.round_number += 1
cls.set_current_round(round.round_number)
logging.debug('Played round %s' % round.round_number)
return round |
rdtools/test/filtering_test.py | kperrynrel/rdtools | 107 | 12761348 | <filename>rdtools/test/filtering_test.py
""" Filtering Module Tests. """
import pytest
import pandas as pd
import numpy as np
from rdtools import (csi_filter,
poa_filter,
tcell_filter,
clip_filter,
quantile_clip_filter,
normalized_filter,
logic_clip_filter,
xgboost_clip_filter)
import warnings
def test_csi_filter():
''' Unit tests for clear sky index filter.'''
measured_poa = np.array([1, 1, 0, 1.15, 0.85])
clearsky_poa = np.array([1, 2, 1, 1.00, 1.00])
filtered = csi_filter(measured_poa,
clearsky_poa,
threshold=0.15)
# Expect clearsky index is filtered with threshold of +/- 0.15.
expected_result = np.array([True, False, False, True, True])
assert filtered.tolist() == expected_result.tolist()
def test_poa_filter():
''' Unit tests for plane of array insolation filter.'''
measured_poa = np.array([201, 1199, 500, 200, 1200])
filtered = poa_filter(measured_poa,
poa_global_low=200,
poa_global_high=1200)
# Expect high and low POA cutoffs to be non-inclusive.
expected_result = np.array([True, True, True, False, False])
assert filtered.tolist() == expected_result.tolist()
def test_tcell_filter():
''' Unit tests for cell temperature filter.'''
tcell = np.array([-50, -49, 0, 109, 110])
filtered = tcell_filter(tcell,
temperature_cell_low=-50,
temperature_cell_high=110)
# Expected high and low tcell cutoffs to be non-inclusive.
expected_result = np.array([False, True, True, True, False])
assert filtered.tolist() == expected_result.tolist()
@pytest.fixture
def generate_power_time_series_no_clipping():
power_no_datetime_index = pd.Series(np.arange(1, 101))
power_datetime_index = pd.Series(np.arange(1, 101))
# Add datetime index to second series
time_range = pd.date_range('2016-12-02T11:00:00.000Z',
'2017-06-06T07:00:00.000Z', freq='H')
power_datetime_index.index = pd.to_datetime(time_range[:100])
# Create a series that is tz-naive to test on
power_datetime_index_tz_naive = power_datetime_index.copy()
power_datetime_index_tz_naive.index = \
power_datetime_index_tz_naive.index.tz_localize(None)
# Note: Power is expected to be Series object with a datetime index.
return power_no_datetime_index, power_datetime_index, \
power_datetime_index_tz_naive
@pytest.fixture
def generate_power_time_series_irregular_intervals():
power_datetime_index = pd.Series(np.arange(1, 62))
# Add datetime index to second series
time_range_1 = pd.date_range('2016-12-02T11:00:00.000Z',
'2017-06-06T07:00:00.000Z', freq='1T')
power_datetime_index.index = pd.to_datetime(time_range_1[:61])
power_datetime_index_2 = pd.Series(np.arange(100, 200))
time_range_2 = pd.date_range(power_datetime_index.index.max(),
'2017-06-06T07:00:00.000Z', freq='15T')
power_datetime_index_2.index = pd.to_datetime(time_range_2[:100])
power_datetime_index_2 = power_datetime_index_2.iloc[1:]
power_datetime_index = pd.concat([power_datetime_index,
power_datetime_index_2])
power_datetime_index_3 = pd.Series(list(reversed(np.arange(100, 200))))
time_range_3 = pd.date_range(power_datetime_index.index.max(),
'2017-06-06T07:00:00.000Z', freq='5T')
power_datetime_index_3.index = pd.to_datetime(time_range_3[:100])
power_datetime_index_3 = power_datetime_index_3.iloc[1:]
power_datetime_index = pd.concat([power_datetime_index,
power_datetime_index_3])
power_datetime_index.sort_index()
# Note: Power is expected to be Series object with a datetime index.
return power_datetime_index
@pytest.fixture
def generate_power_time_series_one_min_intervals():
power_datetime_index = pd.Series(np.arange(1, 51))
power_datetime_index = pd.concat([power_datetime_index,
power_datetime_index[::-1]])
# Add datetime index to second series
time_range = pd.date_range('2016-12-02T11:00:00.000Z',
'2017-06-06T07:00:00.000Z', freq='1T')
power_datetime_index.index = pd.to_datetime(time_range[:100])
# Note: Power is expected to be Series object with a datetime index.
return power_datetime_index
@pytest.fixture
def generate_power_time_series_clipping():
power_no_datetime_index = pd.Series(np.arange(2, 101, 2))
power_no_datetime_index = pd.concat([power_no_datetime_index,
power_no_datetime_index[::-1]])
power_no_datetime_index[48:52] = 110
power_no_datetime_index = power_no_datetime_index.reset_index(drop=True)
power_datetime_index = power_no_datetime_index.copy()
# Add datetime index to second series
time_range = pd.date_range('2016-12-02T11:00:00.000Z',
'2017-06-06T07:00:00.000Z', freq='H')
power_datetime_index.index = pd.to_datetime(time_range[:100])
# Note: Power is expected to be Series object with a datetime index.
return power_no_datetime_index, power_datetime_index
def test_quantile_clip_filter():
''' Unit tests for inverter clipping filter.'''
power = pd.Series(np.arange(1, 101))
# Note: Power is expected to be Series object because clip_filter makes
# use of the Series.quantile() method.
filtered = quantile_clip_filter(power, quantile=0.98)
# Expect 99% of the 98th quantile to be filtered
expected_result = power < (98 * 0.99)
assert ((expected_result == filtered).all())
def test_logic_clip_filter(generate_power_time_series_no_clipping,
generate_power_time_series_clipping,
generate_power_time_series_one_min_intervals,
generate_power_time_series_irregular_intervals):
''' Unit tests for logic clipping filter.'''
power_no_datetime_index_nc, power_datetime_index_nc, power_nc_tz_naive = \
generate_power_time_series_no_clipping
# Test that a Type Error is raised when a pandas series
# without a datetime index is used.
pytest.raises(TypeError, logic_clip_filter,
power_no_datetime_index_nc)
# Test that an error is thrown when we don't include the correct
# mounting configuration input
pytest.raises(ValueError, logic_clip_filter,
power_datetime_index_nc, 'not_fixed')
# Test that an error is thrown when there are 10 or fewer readings
# in the time series
pytest.raises(Exception, logic_clip_filter,
power_datetime_index_nc[:9])
# Test that a warning is thrown when the time series is tz-naive
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
logic_clip_filter(power_nc_tz_naive)
# Warning thrown for it being an experimental filter + tz-naive
assert len(w) == 2
# Scramble the index and run through the filter. This should throw
# an IndexError.
power_datetime_index_nc_shuffled = power_datetime_index_nc.sample(frac=1)
pytest.raises(IndexError, logic_clip_filter,
power_datetime_index_nc_shuffled, 'fixed')
# Generate 1-minute interval data, run it through the function, and
# check that the associated data returned is 1-minute
power_datetime_index_one_min_intervals = \
generate_power_time_series_one_min_intervals
mask_one_min = logic_clip_filter(power_datetime_index_one_min_intervals)
# Generate irregular interval data, and run it through the XGBoost model
power_datetime_index_irregular = \
generate_power_time_series_irregular_intervals
# Make sure that the routine throws a warning when the data sampling
# frequency is less than 95% consistent
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
logic_clip_filter(power_datetime_index_irregular)
# Warning thrown for it being an experimental filter + irregular
# sampling frequency.
assert len(w) == 2
# Check that the returned time series index for the logic filter is
# the same as the passed time series index
mask_irregular = logic_clip_filter(power_datetime_index_irregular)
# Expect none of the sequence to be clipped (as it's
# constantly increasing)
mask_nc = logic_clip_filter(power_datetime_index_nc)
# Test the time series where the data is clipped
power_no_datetime_index_c, power_datetime_index_c = \
generate_power_time_series_clipping
# Expect 4 values in middle of sequence to be clipped (when x=50)
mask_c = logic_clip_filter(power_datetime_index_c)
filtered_c = power_datetime_index_c[mask_c]
assert bool(mask_nc.all(axis=None))
assert (len(filtered_c) == 96)
assert bool((mask_one_min.index.to_series().diff()[1:] ==
np.timedelta64(60, 's')).all(axis=None))
assert bool((mask_irregular.index == power_datetime_index_irregular.index)
.all(axis=None))
def test_xgboost_clip_filter(generate_power_time_series_no_clipping,
generate_power_time_series_clipping,
generate_power_time_series_one_min_intervals,
generate_power_time_series_irregular_intervals):
''' Unit tests for XGBoost clipping filter.'''
# Test the time series where the data isn't clipped
power_no_datetime_index_nc, power_datetime_index_nc, power_nc_tz_naive = \
generate_power_time_series_no_clipping
# Test that a Type Error is raised when a pandas series
# without a datetime index is used.
pytest.raises(TypeError, xgboost_clip_filter,
power_no_datetime_index_nc)
# Test that an error is thrown when we don't include the correct
# mounting configuration input
pytest.raises(ValueError, xgboost_clip_filter,
power_datetime_index_nc, 'not_fixed')
# Test that an error is thrown when there are 10 or fewer readings
# in the time series
pytest.raises(Exception, xgboost_clip_filter,
power_datetime_index_nc[:9])
# Test that a warning is thrown when the time series is tz-naive
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
xgboost_clip_filter(power_nc_tz_naive)
# Warning thrown for it being an experimental filter + tz-naive
assert len(w) == 2
# Scramble the index and run through the filter. This should throw
# an IndexError.
power_datetime_index_nc_shuffled = power_datetime_index_nc.sample(frac=1)
pytest.raises(IndexError, xgboost_clip_filter,
power_datetime_index_nc_shuffled, 'fixed')
# Generate 1-minute interval data, run it through the function, and
# check that the associated data returned is 1-minute
power_datetime_index_one_min_intervals = \
generate_power_time_series_one_min_intervals
mask_one_min = xgboost_clip_filter(power_datetime_index_one_min_intervals)
# Generate irregular interval data, and run it through the XGBoost model
power_datetime_index_irregular = \
generate_power_time_series_irregular_intervals
# Check that the returned time series index for XGBoost is the same
# as the passed time series index
mask_irregular = xgboost_clip_filter(power_datetime_index_irregular)
# Expect none of the sequence to be clipped (as it's
# constantly increasing)
mask_nc = xgboost_clip_filter(power_datetime_index_nc)
# Test the time series where the data is clipped
power_no_datetime_index_c, power_datetime_index_c = \
generate_power_time_series_clipping
# Expect 4 values in middle of sequence to be clipped (when x=50)
mask_c = xgboost_clip_filter(power_datetime_index_c)
filtered_c = power_datetime_index_c[mask_c]
assert bool(mask_nc.all(axis=None))
assert (len(filtered_c) == 96)
assert bool((mask_one_min.index.to_series().diff()[1:] ==
np.timedelta64(60, 's')).all(axis=None))
assert bool((mask_irregular.index == power_datetime_index_irregular.index)
.all(axis=None))
def test_clip_filter(generate_power_time_series_no_clipping):
''' Unit tests for inverter clipping filter.'''
# Create a time series to test
power_no_datetime_index_nc, power_datetime_index_nc, power_nc_tz_naive = \
generate_power_time_series_no_clipping
# Check that the master wrapper defaults to the
# quantile_clip_filter_function.
# Note: Power is expected to be Series object because clip_filter makes
# use of the Series.quantile() method.
filtered_quantile = clip_filter(power_no_datetime_index_nc, quantile=0.98)
# Expect 99% of the 98th quantile to be filtered
expected_result_quantile = power_no_datetime_index_nc < (98 * 0.99)
# Check that the clip filter defaults to quantile clip filter when
# deprecated params are passed
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
clip_filter(power_datetime_index_nc, 0.98)
assert len(w) == 1
# Check that a ValueError is thrown when a model is passed that
# is not in the acceptable list.
pytest.raises(ValueError, clip_filter,
power_datetime_index_nc,
'random_forest')
# Check that the wrapper handles the xgboost clipping
# function with kwargs.
filtered_xgboost = clip_filter(power_datetime_index_nc,
'xgboost',
mounting_type="fixed")
# Check that the wrapper handles the logic clipping
# function with kwargs.
filtered_logic = clip_filter(power_datetime_index_nc,
'logic',
mounting_type="fixed",
rolling_range_max_cutoff=0.3)
# Check that the function returns a Typr Error if a wrong keyword
# arg is passed in the kwarg arguments.
pytest.raises(TypeError, clip_filter, power_datetime_index_nc,
'xgboost',
rolling_range_max_cutoff=0.3)
assert bool((expected_result_quantile == filtered_quantile)
.all(axis=None))
assert bool(filtered_xgboost.all(axis=None))
assert bool(filtered_logic.all(axis=None))
def test_normalized_filter_default():
pd.testing.assert_series_equal(normalized_filter(pd.Series([-5, 5])),
pd.Series([False, True]))
pd.testing.assert_series_equal(normalized_filter(
pd.Series([-1e6, 1e6]),
energy_normalized_low=None,
energy_normalized_high=None),
pd.Series([True, True]))
pd.testing.assert_series_equal(normalized_filter(
pd.Series([-2, 2]),
energy_normalized_low=-1,
energy_normalized_high=1),
pd.Series([False, False]))
eps = 1e-16
pd.testing.assert_series_equal(normalized_filter(
pd.Series([0.01 - eps, 0.01 + eps, 1e308])),
pd.Series([False, True, True]))
|
t/test_salsa20.py | warmchang/umash | 108 | 12761399 | """
Quick smoke test that our implementation of salsa20 does the right thing.
"""
from hypothesis import given
import hypothesis.strategies as st
from Crypto.Cipher import Salsa20
from umash import C, FFI
@given(
length=st.integers(min_value=1, max_value=512),
nonce=st.binary(min_size=8, max_size=8),
key=st.binary(min_size=32, max_size=32),
)
def test_salsa20(length, nonce, key):
expected = Salsa20.new(key, nonce).encrypt(b"\x00" * length)
buf = FFI.new("char[]", length)
C.salsa20_stream(buf, length, nonce, key)
assert bytes(FFI.buffer(buf, length)) == expected
|
WebMirror/management/rss_parser_funcs/feed_parse_extractPenguTaichou.py | fake-name/ReadableWebProxy | 193 | 12761406 | def extractPenguTaichou(item):
"""
<NAME>
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if item['title'].lower().startswith('sword shisho chapter'):
return buildReleaseMessageWithType(item, 'I was a Sword when I Reincarnated!', vol, chp, frag=frag, postfix=postfix)
return False
|
src/falconpy/host_group.py | CrowdStrike/falconpy | 111 | 12761411 | """CrowdStrike Falcon Host Groups API interface class
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
from ._util import generate_error_result, force_default
from ._util import handle_single_argument, process_service_request
from ._payload import host_group_create_payload, host_group_update_payload
from ._payload import generic_payload_list
from ._service_class import ServiceClass
from ._endpoint._host_group import _host_group_endpoints as Endpoints
class HostGroup(ServiceClass):
"""The only requirement to instantiate an instance of this class is one of the following:
- a valid client_id and client_secret provided as keywords.
- a credential dictionary with client_id and client_secret containing valid API credentials
{
"client_id": "CLIENT_ID_HERE",
"client_secret": "CLIENT_SECRET_HERE"
}
- a previously-authenticated instance of the authentication service class (oauth2.py)
- a valid token provided by the authentication service class (OAuth2.token())
"""
@force_default(defaults=["parameters"], default_types=["dict"])
def query_combined_group_members(self: object, parameters: dict = None, **kwargs) -> dict:
"""Search for members of a Host Group in your environment by providing an FQL filter
and paging details. Returns a set of host details which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
id -- The ID of the Host Group to search for members of. String
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. name|asc).
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryCombinedGroupMembers
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="queryCombinedGroupMembers",
keywords=kwargs,
params=parameters
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_combined_host_groups(self: object, parameters: dict = None, **kwargs) -> dict:
"""Search for Host Groups in your environment by providing an FQL filter and
paging details. Returns a set of Host Groups which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
Available filter fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from. Integer.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. created_timestamp|asc).
Available sort fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryCombinedHostGroups
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="queryCombinedHostGroups",
keywords=kwargs,
params=parameters
)
@force_default(defaults=["body", "parameters"], default_types=["dict", "dict"])
def perform_group_action(self: object,
body: dict = None,
parameters: dict = None,
**kwargs
) -> dict:
"""Perform the specified action on the Host Groups specified in the request.
Keyword arguments:
action_name -- Action to perform on the host group. String.
Allowed values: 'add-hosts' or 'remove-hosts'.
action_parameters - List of dictionaries containing action specific parameter settings.
body -- full body payload, not required when using other keywords.
{
"action_parameters": [
{
"name": "string",
"value": "string"
}
],
"ids": [
"string"
]
}
ids -- List of host group IDs to perform an action against. String or list of strings.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/performGroupAction
"""
if not body:
body = generic_payload_list(submitted_keywords=kwargs,
payload_value="ids"
)
if kwargs.get("action_parameters", None):
body["action_parameters"] = kwargs.get("action_parameters", None)
# _allowed_actions = ['add-hosts', 'remove-hosts']
# operation_id = "performGroupAction"
# parameter_payload = args_to_params(parameters, kwargs, Endpoints, operation_id)
# action_name = parameter_payload.get("action_name", "Not Specified")
# act = kwargs.get("action_name", "Not Specified")
if kwargs.get("action_name", "Not Specified").lower() in ['add-hosts', 'remove-hosts']:
returned = process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="performGroupAction",
body=body,
keywords=kwargs,
params=parameters
)
else:
returned = generate_error_result("Invalid value specified for action_name parameter.")
return returned
@force_default(defaults=["parameters"], default_types=["dict"])
def get_host_groups(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Retrieve a set of Host Groups by specifying their IDs.
Keyword arguments:
ids -- List of host group IDs to retrieve. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/getHostGroups
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="getHostGroups",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["body"], default_types=["dict"])
def create_host_groups(self: object, body: dict = None, **kwargs) -> dict:
"""Create Host Groups by specifying details about the group to create.
Keyword arguments:
assignment_rule -- Assignment rule to apply. String.
body -- full body payload, not required when using other keywords.
{
"resources": [
{
"assignment_rule": "string",
"description": "string",
"group_type": "static",
"name": "string"
}
]
}
description -- Description of the host group. String.
group_type -- Type of Host Group to create. String.
name -- The Host Group name. String.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/createHostGroups
"""
if not body:
body = host_group_create_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="createHostGroups",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def delete_host_groups(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Delete a set of Host Groups by specifying their IDs.
Keyword arguments:
ids -- List of host group IDs to delete. String or list of strings.
parameters -- full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: DELETE
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/deleteHostGroups
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="deleteHostGroups",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["body"], default_types=["dict"])
def update_host_groups(self: object, body: dict = None, **kwargs) -> dict:
"""
Update Host Groups by specifying the ID of the group and details to update.
Keyword arguments:
assignment_rule -- Assignment rule to apply. String.
body -- full body payload, not required when using other keywords.
{
"resources": [
{
"assignment_rule": "string",
"description": "string",
"id": "string",
"name": "string"
}
]
}
description -- Description of the host group. String.
id -- Host Group ID to be updated. String.
name -- The Host Group name. String.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: PATCH
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/updateHostGroups
"""
if not body:
body = host_group_update_payload(passed_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="updateHostGroups",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_group_members(self: object, parameters: dict = None, **kwargs) -> dict:
"""Search for members of a Host Group in your environment by providing an FQL filter
and paging details. Returns a set of Agent IDs which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
id -- The ID of the Host Group to search for members of. String.
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. name|asc).
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryGroupMembers
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="queryGroupMembers",
keywords=kwargs,
params=parameters
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_host_groups(self: object, parameters: dict = None, **kwargs) -> dict:
"""Search for Host Groups in your environment by providing an FQL filter and
paging details. Returns a set of Host Group IDs which match the filter criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
An asterisk wildcard '*' includes all results.
Available filter fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
limit -- The maximum number of records to return in this response. [Integer, 1-5000]
Use with the offset parameter to manage pagination of results.
offset -- The offset to start retrieving records from.
Use with the limit parameter to manage pagination of results.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax (e.g. created_timestamp|asc).
Available sort fields:
created_by modified_by
created_timestamp modified_timestamp
group_type name
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/host-group/queryHostGroups
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="queryHostGroups",
keywords=kwargs,
params=parameters
)
# These method names align to the operation IDs in the API but
# do not conform to snake_case / PEP8 and are defined here for
# backwards compatibility / ease of use purposes
queryCombinedGroupMembers = query_combined_group_members
queryCombinedHostGroups = query_combined_host_groups
performGroupAction = perform_group_action
getHostGroups = get_host_groups
createHostGroups = create_host_groups
deleteHostGroups = delete_host_groups
updateHostGroups = update_host_groups
queryGroupMembers = query_group_members
queryHostGroups = query_host_groups
# The legacy name for this class does not conform to PascalCase / PEP8
# It is defined here for backwards compatibility purposes only.
Host_Group = HostGroup # pylint: disable=C0103
|
egs/wsj/s5/utils/data/extend_segment_times.py | shuipi100/kaldi | 805 | 12761430 | <filename>egs/wsj/s5/utils/data/extend_segment_times.py
#!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
from collections import defaultdict
parser = argparse.ArgumentParser(description="""
Usage: extend_segment_times.py [options] <input-segments >output-segments
This program pads the times in a 'segments' file (e.g. data/train/segments)
with specified left and right context (for cases where there was no
silence padding in the original segments file)""")
parser.add_argument("--start-padding", type = float, default = 0.1,
help="Amount of padding, in seconds, for the start time of "
"each segment (start times <0 will be set to zero).")
parser.add_argument("--end-padding", type = float, default = 0.1,
help="Amount of padding, in seconds, for the end time of "
"each segment.")
parser.add_argument("--last-segment-end-padding", type = float, default = 0.1,
help="Amount of padding, in seconds, for the end time of "
"the last segment of each file (maximum allowed).")
parser.add_argument("--fix-overlapping-segments", type = str,
default = 'true', choices=['true', 'false'],
help="If true, prevent segments from overlapping as a result "
"of the padding (or that were already overlapping)")
args = parser.parse_args()
# the input file will be a sequence of lines which are each of the form:
# <utterance-id> <recording-id> <start-time> <end-time>
# e.g.
# utt-1 recording-1 0.62 5.40
# The output will be in the same format and in the same
# order, except wiht modified times.
# This variable maps from a recording-id to a listof the utterance
# indexes (as integer indexes into 'entries']
# that are part of that recording.
recording_to_utt_indexes = defaultdict(list)
# This is an array of the entries in the segments file, in the fomrat:
# (utterance-id as astring, recording-id as string,
# start-time as float, end-time as float)
entries = []
while True:
line = sys.stdin.readline()
if line == '':
break
try:
[ utt_id, recording_id, start_time, end_time ] = line.split()
start_time = float(start_time)
end_time = float(end_time)
except:
sys.exit("extend_segment_times.py: could not interpret line: " + line)
if not end_time > start_time:
print("extend_segment_times.py: bad segment (ignoring): " + line,
file = sys.stderr)
recording_to_utt_indexes[recording_id].append(len(entries))
entries.append([utt_id, recording_id, start_time, end_time])
num_times_fixed = 0
for recording, utt_indexes in recording_to_utt_indexes.items():
# this_entries is a list of lists, sorted on mid-time.
# Notice: because lists are objects, when we change 'this_entries'
# we change the underlying entries.
this_entries = sorted([ entries[x] for x in utt_indexes ],
key = lambda x : 0.5 * (x[2] + x[3]))
min_time = 0
max_time = max([ x[3] for x in this_entries ]) + args.last_segment_end_padding
start_padding = args.start_padding
end_padding = args.end_padding
for n in range(len(this_entries)):
this_entries[n][2] = max(min_time, this_entries[n][2] - start_padding)
this_entries[n][3] = min(max_time, this_entries[n][3] + end_padding)
for n in range(len(this_entries) - 1):
this_end_time = this_entries[n][3]
next_start_time = this_entries[n+1][2]
if this_end_time > next_start_time and args.fix_overlapping_segments == 'true':
midpoint = 0.5 * (this_end_time + next_start_time)
this_entries[n][3] = midpoint
this_entries[n+1][2] = midpoint
num_times_fixed += 1
# this prints a number with a certain number of digits after
# the point, while removing trailing zeros.
def FloatToString(f):
num_digits = 6 # we want to print 6 digits after the zero
g = f
while abs(g) > 1.0:
g *= 0.1
num_digits += 1
format_str = '%.{0}g'.format(num_digits)
return format_str % f
for entry in entries:
[ utt_id, recording_id, start_time, end_time ] = entry
if not start_time < end_time:
print("extend_segment_times.py: bad segment after processing (ignoring): " +
' '.join(entry), file = sys.stderr)
continue
print(utt_id, recording_id, FloatToString(start_time), FloatToString(end_time))
print("extend_segment_times.py: extended {0} segments; fixed {1} "
"overlapping segments".format(len(entries), num_times_fixed),
file = sys.stderr)
## test:
# (echo utt1 reco1 0.2 6.2; echo utt2 reco1 6.3 9.8 )| extend_segment_times.py
# and also try the above with the options --last-segment-end-padding=0.0 --fix-overlapping-segments=false
|
tftrt/examples/image_classification/image_classification.py | sarvex/tensorrt | 662 | 12761439 | <reponame>sarvex/tensorrt<filename>tftrt/examples/image_classification/image_classification.py<gh_stars>100-1000
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import sys
import logging
import multiprocessing
import time
from functools import partial
import numpy as np
import tensorflow as tf
import preprocessing
# Allow import of top level python files
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from benchmark_args import BaseCommandLineAPI
from benchmark_runner import BaseBenchmarkRunner
class CommandLineAPI(BaseCommandLineAPI):
SAMPLES_IN_VALIDATION_SET = 50000
def __init__(self):
super(CommandLineAPI, self).__init__()
self._parser.add_argument('--input_size', type=int, default=224,
help='Size of input images expected by the '
'model')
self._parser.add_argument('--num_classes', type=int, default=1001,
help='Number of classes used when training '
'the model')
self._parser.add_argument('--preprocess_method', type=str,
choices=['vgg', 'inception',
'resnet50_v1_5_tf1_ngc_preprocess'
],
default='vgg',
help='The image preprocessing method used in '
'dataloading.')
class BenchmarkRunner(BaseBenchmarkRunner):
ACCURACY_METRIC_NAME = "accuracy"
def before_benchmark(self, **kwargs):
self._labels_shift = 1 if kwargs["num_classes"] == 1001 else 0
def compute_accuracy_metric(self, predictions, expected, **kwargs):
return np.mean(np.equal(predictions["outputs"], expected))
def process_model_output(self, outputs, **kwargs):
outputs = outputs.numpy()
if (len(outputs.shape) != 1):
outputs = np.argmax(outputs, axis=1).reshape(-1)
return {"outputs": outputs - self._labels_shift}
def get_dataset(data_files, batch_size, use_synthetic_data, preprocess_method, input_size):
def deserialize_image_record(record):
feature_map = {
'image/encoded': tf.io.FixedLenFeature([], tf.string, ''),
'image/class/label': tf.io.FixedLenFeature([1], tf.int64, -1),
'image/class/text': tf.io.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32)
}
with tf.compat.v1.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(serialized=record,
features=feature_map)
imgdata = obj['image/encoded']
label = tf.cast(obj['image/class/label'], tf.int32)
return imgdata, label
def get_preprocess_fn(preprocess_method, input_size):
"""Creates a function to parse and process a TFRecord
preprocess_method: string
input_size: int
returns: function, the preprocessing function for a record
"""
if preprocess_method == 'vgg':
preprocess_fn = preprocessing.vgg_preprocess
elif preprocess_method == 'inception':
preprocess_fn = preprocessing.inception_preprocess
elif preprocess_method == 'resnet50_v1_5_tf1_ngc_preprocess':
preprocess_fn = preprocessing.resnet50_v1_5_tf1_ngc_preprocess
else:
raise ValueError(
'Invalid preprocessing method {}'.format(preprocess_method)
)
def preprocess_sample_fn(record):
# Parse TFRecord
imgdata, label = deserialize_image_record(record)
label -= 1 # Change to 0-based (don't use background class)
try:
image = tf.image.decode_jpeg(
imgdata,
channels=3,
fancy_upscaling=False,
dct_method='INTEGER_FAST'
)
except:
image = tf.image.decode_png(imgdata, channels=3)
# Use model's preprocessing function
image = preprocess_fn(image, input_size, input_size)
return image, label
return preprocess_sample_fn
dataset = tf.data.Dataset.from_tensor_slices(data_files)
dataset = dataset.interleave(
tf.data.TFRecordDataset,
cycle_length=min(8, multiprocessing.cpu_count()),
block_length=max(batch_size, 32)
)
# preprocess function for input data
preprocess_fn = get_preprocess_fn(
preprocess_method=preprocess_method,
input_size=input_size
)
dataset = dataset.map(
map_func=preprocess_fn,
num_parallel_calls=min(8, multiprocessing.cpu_count())
)
dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)
if use_synthetic_data:
dataset = dataset.take(count=1) # loop over 1 batch
dataset = dataset.cache()
dataset = dataset.repeat()
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
if __name__ == '__main__':
cmdline_api = CommandLineAPI()
args = cmdline_api.parse_args()
def get_files(data_dir, filename_pattern):
if data_dir is None:
return []
files = tf.io.gfile.glob(os.path.join(data_dir, filename_pattern))
if not files:
raise ValueError('Can not find any files in {} with '
'pattern "{}"'.format(data_dir, filename_pattern))
return files
data_files = get_files(args.data_dir, 'validation*')
calib_files = (
[]
if args.precision != 'INT8' else
get_files(args.calib_data_dir, 'train*')
)
def _input_fn(input_files, build_steps, model_phase):
dataset = get_dataset(
data_files=input_files,
batch_size=args.batch_size,
# even when using synthetic data, we need to
# build and/or calibrate using real training data
# to be in a realistic scenario
use_synthetic_data=False,
preprocess_method=args.preprocess_method,
input_size=args.input_size
)
for i, (batch_images, _) in enumerate(dataset):
if i >= build_steps:
break
print("* [%s] - step %04d/%04d" % (
model_phase, i + 1, build_steps
))
yield batch_images,
calibration_input_fn = partial(
_input_fn,
input_files=calib_files,
build_steps=args.num_calib_inputs // args.batch_size,
model_phase="Calibration"
)
optimize_offline_input_fn = partial(
_input_fn,
input_files=data_files,
build_steps=1,
model_phase="Building"
)
runner = BenchmarkRunner(
input_saved_model_dir=args.input_saved_model_dir,
output_saved_model_dir=args.output_saved_model_dir,
allow_build_at_runtime=args.allow_build_at_runtime,
calibration_input_fn=calibration_input_fn,
debug=args.debug,
gpu_mem_cap=args.gpu_mem_cap,
input_signature_key=args.input_signature_key,
max_workspace_size_bytes=args.max_workspace_size,
minimum_segment_size=args.minimum_segment_size,
num_calib_inputs=args.num_calib_inputs,
optimize_offline=args.optimize_offline,
optimize_offline_input_fn=optimize_offline_input_fn,
output_tensor_indices=args.output_tensor_indices,
output_tensor_names=args.output_tensor_names,
precision_mode=args.precision,
use_dynamic_shape=args.use_dynamic_shape,
use_tftrt=args.use_tftrt
)
get_benchmark_input_fn = partial(
get_dataset,
data_files=data_files,
input_size=args.input_size,
preprocess_method=args.preprocess_method
)
runner.execute_benchmark(
batch_size=args.batch_size,
display_every=args.display_every,
get_benchmark_input_fn=get_benchmark_input_fn,
num_iterations=args.num_iterations,
num_warmup_iterations=args.num_warmup_iterations,
skip_accuracy_testing=(
args.use_synthetic_data or args.skip_accuracy_testing
),
use_synthetic_data=args.use_synthetic_data,
use_xla=args.use_xla,
########### Additional Settings ############
num_classes=args.num_classes,
)
|
tests/datasets/test_eigenscape_raw.py | lucaspbastos/soundata | 177 | 12761486 | <filename>tests/datasets/test_eigenscape_raw.py<gh_stars>100-1000
import numpy as np
from tests.test_utils import run_clip_tests
from soundata import annotations
from soundata.datasets import eigenscape_raw
TEST_DATA_HOME = "tests/resources/sound_datasets/eigenscape_raw"
def test_clip():
default_clipid = "Beach-01-Raw"
dataset = eigenscape_raw.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
expected_attributes = {
"audio_path": (
"tests/resources/sound_datasets/eigenscape_raw/Beach-01-Raw.wav"
),
"clip_id": "Beach-01-Raw",
}
expected_property_types = {
"audio": tuple,
"tags": annotations.Tags,
"location": str,
"date": str,
"time": str,
"additional_information": str,
}
run_clip_tests(clip, expected_attributes, expected_property_types)
def test_load_audio():
default_clipid = "Beach-01-Raw"
dataset = eigenscape_raw.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
audio_path = clip.audio_path
audio, sr = eigenscape_raw.load_audio(audio_path)
assert sr == 48000
assert type(audio) is np.ndarray
assert len(audio.shape) == 2 # check audio is loaded correctly
assert audio.shape[0] == 32 # check audio is 32ch (HOA 4th order)
assert audio.shape[1] == 48000 * 1.0 # Check audio duration is as expected
def test_load_tags():
# dataset
default_clipid = "Beach-01-Raw"
dataset = eigenscape_raw.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
assert len(clip.tags.labels) == 1
assert clip.tags.labels[0] == "Beach"
assert np.allclose([1.0], clip.tags.confidence)
def test_load_metadata():
# dataset
default_clipid = "Beach-01-Raw"
dataset = eigenscape_raw.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
assert clip.location == "Bridlington Beach"
assert clip.time == "10:42"
assert clip.date == "09/05/2017"
assert clip.additional_information == ""
def test_to_jams():
default_clipid = "Beach-01-Raw"
dataset = eigenscape_raw.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
jam = clip.to_jams()
assert jam.validate()
# Validate Tags
tags = jam.search(namespace="tag_open")[0]["data"]
assert len(tags) == 1
assert tags[0].time == 0
assert tags[0].duration == 1.0
assert tags[0].value == "Beach"
assert tags[0].confidence == 1
# validate metadata
assert jam.file_metadata.duration == 1.0
assert jam.sandbox.location == "Bridlington Beach"
assert jam.sandbox.time == "10:42"
assert jam.sandbox.date == "09/05/2017"
assert jam.annotations[0].annotation_metadata.data_source == "soundata"
|
kipart/common.py | xesscorp/KiPart | 133 | 12761509 | <gh_stars>100-1000
# MIT license
#
# Copyright (C) 2015-2021 by <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import csv
import difflib
import os.path
import re
from builtins import object
import openpyxl
from .py_2_3 import *
COLUMN_NAMES = {
"pin": "num",
"num": "num",
"name": "name",
"type": "type",
"style": "style",
"side": "side",
"unit": "unit",
"bank": "unit",
"hidden": "hidden",
"": "", # Blank column names stay blank.
}
# This is just a vanilla object class for device pins.
# We'll add attributes to it as needed.
class Pin(object):
pass
DEFAULT_PIN = Pin()
DEFAULT_PIN.num = None
DEFAULT_PIN.name = ""
DEFAULT_PIN.type = "io"
DEFAULT_PIN.style = "line"
DEFAULT_PIN.unit = 1
DEFAULT_PIN.side = "left"
DEFAULT_PIN.hidden = "no"
def num_row_elements(row):
"""Get number of elements in CSV row."""
try:
rowset = set(row)
rowset.discard("")
return len(rowset)
except TypeError:
return 0
def get_nonblank_row(csv_reader):
"""Return the first non-blank row encountered from the current point in a CSV file."""
for row in csv_reader:
if num_row_elements(row) > 0:
return row
return []
def get_part_info(csv_reader):
"""Get the part number, ref prefix, footprint, MPN, datasheet link, and description from a row of the CSV file."""
# Read the first, nonblank row and pad it with None's to make sure it's long enough.
(
part_num,
part_ref_prefix,
part_footprint,
part_manf_num,
part_datasheet,
part_desc,
) = list(get_nonblank_row(csv_reader) + [None] * 6)[:6]
# Put in the default part reference identifier if it isn't present.
if part_ref_prefix in (None, "", " "):
part_ref_prefix = "U"
# Check to see if the row with the part identifier is missing.
if part_num and part_num.lower() in list(COLUMN_NAMES.keys()):
issue("Row with part number is missing in CSV file.", "error")
return (
part_num,
part_ref_prefix,
part_footprint,
part_manf_num,
part_datasheet,
part_desc,
)
def find_closest_match(name, name_dict, fuzzy_match, threshold=0.0):
"""Approximate matching subroutine"""
# Scrub non-alphanumerics from name and lowercase it.
scrubber = re.compile("[\W.]+")
name = scrubber.sub("", name).lower()
# Return regular dictionary lookup if fuzzy matching is not enabled.
if fuzzy_match == False:
return name_dict[name]
# Find the closest fuzzy match to the given name in the scrubbed list.
# Set the matching threshold to 0 so it always gives some result.
match = difflib.get_close_matches(name, list(name_dict.keys()), 1, threshold)[0]
return name_dict[match]
def clean_headers(headers):
"""Return a list of the closest valid column headers for the headers found in the file."""
return [find_closest_match(h, COLUMN_NAMES, True) for h in headers]
def issue(msg, level="warning"):
if level == "warning":
print("Warning: {}".format(msg))
elif level == "error":
print("ERROR: {}".format(msg))
raise Exception("Unrecoverable error")
else:
print(msg)
def fix_pin_data(pin_data, part_num):
"""Fix common errors in pin data."""
fixed_pin_data = pin_data.strip() # Remove leading/trailing spaces.
if re.search("\s", fixed_pin_data) is not None:
fixed_pin_data = re.sub("\s", "_", fixed_pin_data)
issue(
"Replaced whitespace with '_' in pin '{pin_data}' of part {part_num}.".format(
**locals()
)
)
return fixed_pin_data
def is_xlsx(filename):
return os.path.splitext(filename)[1] == ".xlsx"
def convert_xlsx_to_csv(xlsx_file, sheetname=None):
"""
Convert sheet of an Excel workbook into a CSV file in the same directory
and return the read handle of the CSV file.
"""
wb = openpyxl.load_workbook(xlsx_file)
if sheetname:
sh = wb[sheetname]
else:
sh = wb.active
if USING_PYTHON2:
# Python 2 doesn't accept newline parameter when opening file.
newline = {}
else:
# kipart fails on Python 3 unless file is opened with this newline.
newline = {"newline": ""}
csv_filename = "xlsx_to_csv_file.csv"
with open(csv_filename, "w", **newline) as f:
col = csv.writer(f)
for row in sh.rows:
try:
col.writerow([cell.value for cell in row])
except UnicodeEncodeError:
for cell in row:
if cell.value:
cell.value = "".join([c for c in cell.value if ord(c) < 128])
col.writerow([cell.value for cell in row])
return open(csv_filename, "r")
|
django_react/settings.py | AmbiteamProject/spleeter-web | 202 | 12761535 | <gh_stars>100-1000
import os
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.getenv('SECRET_KEY', 'sekrit')
YOUTUBE_API_KEY = os.getenv('YOUTUBE_API_KEY', '')
CPU_SEPARATION = bool(int(os.getenv('CPU_SEPARATION', '1')))
ALLOWED_HOSTS = [os.getenv('APP_HOST'), '0.0.0.0', '127.0.0.1', 'localhost']
DEFAULT_FILE_STORAGE = 'api.storage.AzureStorage'
# DEFAULT_FILE_STORAGE = 'api.storage.S3Boto3Storage'
# DEFAULT_FILE_STORAGE = 'api.storage.FileSystemStorage'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
##################################
# Azure storage backend settings #
##################################
AZURE_ACCOUNT_KEY = os.getenv('AZURE_ACCOUNT_KEY', '')
AZURE_ACCOUNT_NAME = os.getenv('AZURE_ACCOUNT_NAME', '')
AZURE_CONTAINER = os.getenv('AZURE_CONTAINER', '')
AZURE_CUSTOM_DOMAIN = os.getenv('AZURE_CUSTOM_DOMAIN')
AZURE_OBJECT_PARAMETERS = {'content_disposition': 'attachment'}
################################
# AWS storage backend settings #
################################
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID', '')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', '')
AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME', '')
AWS_S3_CUSTOM_DOMAIN = os.getenv('AWS_S3_CUSTOM_DOMAIN')
# A path prefix that will be prepended to all uploads
AWS_LOCATION = 'media'
# Disable query parameter authentication (for public reads)
AWS_QUERYSTRING_AUTH = False
# Make uploaded files publicly accessible and downloadable
AWS_S3_OBJECT_PARAMETERS = {'ACL': 'public-read', 'ContentDisposition': 'attachment'}
# S3 region
AWS_S3_REGION_NAME = 'us-east-1'
CELERY_BROKER_URL = os.getenv('CELERY_BROKER_URL', 'redis://localhost:6379/0')
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis://localhost:6379/0')
CELERY_TASK_ROUTES = {
'api.tasks.create_static_mix': {
'queue': 'slow_queue'
},
'api.tasks.create_dynamic_mix': {
'queue': 'slow_queue'
},
'api.tasks.fetch_youtube_audio': {
'queue': 'fast_queue'
},
}
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'spleeter-web.sqlite3',
}
}
MEDIA_ROOT = 'media'
MEDIA_URL = '/media/'
SEPARATE_DIR = 'separate'
UPLOAD_DIR = 'uploads'
VALID_MIME_TYPES = [
'audio/aac', 'audio/aiff', 'audio/x-aiff', 'audio/ogg', 'video/ogg', 'application/ogg', 'audio/opus', 'audio/vorbis', 'audio/mpeg',
'audio/mp3', 'audio/mpeg3', 'audio/x-mpeg-3', 'video/mpeg', 'audio/m4a', 'audio/x-m4a', 'audio/x-hx-aac-adts', 'audio/mp4', 'video/x-mpeg',
'audio/flac', 'audio/x-flac', 'audio/wav', 'audio/x-wav', 'audio/webm', 'video/webm'
]
VALID_FILE_EXT = [
# Lossless
'.aif',
'.aifc',
'.aiff',
'.flac',
'.wav',
# Lossy
'.aac',
'.m4a',
'.mp3',
'.opus',
'.weba',
'.webm',
# Ogg (Lossy)
'.ogg',
'.oga',
'.mogg'
]
UPLOAD_FILE_SIZE_LIMIT = 100 * 1024 * 1024
YOUTUBE_LENGTH_LIMIT = 30 * 60
YOUTUBE_MAX_RETRIES = 3
# Application definition
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'frontend.apps.FrontendConfig',
'rest_framework',
'webpack_loader'
]
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'dist/',
'STATS_FILE': os.path.join(BASE_DIR, 'frontend', 'assets', 'webpack-stats.json')
}
}
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
)
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
]
ROOT_URLCONF = 'django_react.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'frontend', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'frontend.context_processors.debug',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_react.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'frontend', 'assets'),
)
# Override production variables if DJANGO_DEVELOPMENT env variable is set
if os.getenv('DJANGO_DEVELOPMENT'):
from .settings_dev import *
|
event_rpcgen.py | mengzhisuoliu/libevent | 8,731 | 12761541 | #!/usr/bin/env python
#
# Copyright (c) 2005-2007 <NAME> <<EMAIL>>
# Copyright (c) 2007-2012 <NAME> and <NAME>
# All rights reserved.
#
# Generates marshaling code based on libevent.
# pylint: disable=too-many-lines
# pylint: disable=too-many-branches
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-statements
# pylint: disable=global-statement
# TODO:
# 1) propagate the arguments/options parsed by argparse down to the
# instantiated factory objects.
# 2) move the globals into a class that manages execution, including the
# progress outputs that go to stderr at the moment.
# 3) emit other languages.
import argparse
import re
import sys
_NAME = "event_rpcgen.py"
_VERSION = "0.1"
# Globals
LINE_COUNT = 0
CPPCOMMENT_RE = re.compile(r"\/\/.*$")
NONIDENT_RE = re.compile(r"\W")
PREPROCESSOR_DEF_RE = re.compile(r"^#define")
STRUCT_REF_RE = re.compile(r"^struct\[(?P<name>[a-zA-Z_][a-zA-Z0-9_]*)\]$")
STRUCT_DEF_RE = re.compile(r"^struct +[a-zA-Z_][a-zA-Z0-9_]* *{$")
WHITESPACE_RE = re.compile(r"\s+")
HEADER_DIRECT = []
CPP_DIRECT = []
QUIETLY = False
def declare(s):
if not QUIETLY:
print(s)
def TranslateList(mylist, mydict):
return [x % mydict for x in mylist]
class RpcGenError(Exception):
"""An Exception class for parse errors."""
def __init__(self, why): # pylint: disable=super-init-not-called
self.why = why
def __str__(self):
return str(self.why)
# Holds everything that makes a struct
class Struct(object):
def __init__(self, name):
self._name = name
self._entries = []
self._tags = {}
declare(" Created struct: %s" % name)
def AddEntry(self, entry):
if entry.Tag() in self._tags:
raise RpcGenError(
'Entry "%s" duplicates tag number %d from "%s" '
"around line %d"
% (entry.Name(), entry.Tag(), self._tags[entry.Tag()], LINE_COUNT)
)
self._entries.append(entry)
self._tags[entry.Tag()] = entry.Name()
declare(" Added entry: %s" % entry.Name())
def Name(self):
return self._name
def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types."""
name = "%s_%s" % (self._name, entry.Name())
return name.upper()
@staticmethod
def PrintIndented(filep, ident, code):
"""Takes an array, add indentation to each entry and prints it."""
for entry in code:
filep.write("%s%s\n" % (ident, entry))
class StructCCode(Struct):
""" Knows how to generate C code for a struct """
def __init__(self, name):
Struct.__init__(self, name)
def PrintTags(self, filep):
"""Prints the tag definitions for a structure."""
filep.write("/* Tag definition for %s */\n" % self._name)
filep.write("enum %s_ {\n" % self._name.lower())
for entry in self._entries:
filep.write(" %s=%d,\n" % (self.EntryTagName(entry), entry.Tag()))
filep.write(" %s_MAX_TAGS\n" % (self._name.upper()))
filep.write("};\n\n")
def PrintForwardDeclaration(self, filep):
filep.write("struct %s;\n" % self._name)
def PrintDeclaration(self, filep):
filep.write("/* Structure declaration for %s */\n" % self._name)
filep.write("struct %s_access_ {\n" % self._name)
for entry in self._entries:
dcl = entry.AssignDeclaration("(*%s_assign)" % entry.Name())
dcl.extend(entry.GetDeclaration("(*%s_get)" % entry.Name()))
if entry.Array():
dcl.extend(entry.AddDeclaration("(*%s_add)" % entry.Name()))
self.PrintIndented(filep, " ", dcl)
filep.write("};\n\n")
filep.write("struct %s {\n" % self._name)
filep.write(" struct %s_access_ *base;\n\n" % self._name)
for entry in self._entries:
dcl = entry.Declaration()
self.PrintIndented(filep, " ", dcl)
filep.write("\n")
for entry in self._entries:
filep.write(" ev_uint8_t %s_set;\n" % entry.Name())
filep.write("};\n\n")
filep.write(
"""struct %(name)s *%(name)s_new(void);
struct %(name)s *%(name)s_new_with_arg(void *);
void %(name)s_free(struct %(name)s *);
void %(name)s_clear(struct %(name)s *);
void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
int %(name)s_complete(struct %(name)s *);
void evtag_marshal_%(name)s(struct evbuffer *, ev_uint32_t,
const struct %(name)s *);
int evtag_unmarshal_%(name)s(struct evbuffer *, ev_uint32_t,
struct %(name)s *);\n"""
% {"name": self._name}
)
# Write a setting function of every variable
for entry in self._entries:
self.PrintIndented(
filep, "", entry.AssignDeclaration(entry.AssignFuncName())
)
self.PrintIndented(filep, "", entry.GetDeclaration(entry.GetFuncName()))
if entry.Array():
self.PrintIndented(filep, "", entry.AddDeclaration(entry.AddFuncName()))
filep.write("/* --- %s done --- */\n\n" % self._name)
def PrintCode(self, filep):
filep.write(
"""/*
* Implementation of %s
*/
"""
% (self._name)
)
filep.write(
"""
static struct %(name)s_access_ %(name)s_base__ = {
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeBase())
filep.write("};\n\n")
# Creation
filep.write(
"""struct %(name)s *
%(name)s_new(void)
{
return %(name)s_new_with_arg(NULL);
}
struct %(name)s *
%(name)s_new_with_arg(void *unused)
{
struct %(name)s *tmp;
if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {
event_warn("%%s: malloc", __func__);
return (NULL);
}
tmp->base = &%(name)s_base__;
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeInitialize("tmp"))
filep.write(" tmp->%s_set = 0;\n\n" % entry.Name())
filep.write(
""" return (tmp);
}
"""
)
# Adding
for entry in self._entries:
if entry.Array():
self.PrintIndented(filep, "", entry.CodeAdd())
filep.write("\n")
# Assigning
for entry in self._entries:
self.PrintIndented(filep, "", entry.CodeAssign())
filep.write("\n")
# Getting
for entry in self._entries:
self.PrintIndented(filep, "", entry.CodeGet())
filep.write("\n")
# Clearing
filep.write(
"""void
%(name)s_clear(struct %(name)s *tmp)
{
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeClear("tmp"))
filep.write("}\n\n")
# Freeing
filep.write(
"""void
%(name)s_free(struct %(name)s *tmp)
{
"""
% {"name": self._name}
)
for entry in self._entries:
self.PrintIndented(filep, " ", entry.CodeFree("tmp"))
filep.write(
""" free(tmp);
}
"""
)
# Marshaling
filep.write(
"""void
%(name)s_marshal(struct evbuffer *evbuf, const struct %(name)s *tmp) {
"""
% {"name": self._name}
)
for entry in self._entries:
indent = " "
# Optional entries do not have to be set
if entry.Optional():
indent += " "
filep.write(" if (tmp->%s_set) {\n" % entry.Name())
self.PrintIndented(
filep,
indent,
entry.CodeMarshal(
"evbuf",
self.EntryTagName(entry),
entry.GetVarName("tmp"),
entry.GetVarLen("tmp"),
),
)
if entry.Optional():
filep.write(" }\n")
filep.write("}\n\n")
# Unmarshaling
filep.write(
"""int
%(name)s_unmarshal(struct %(name)s *tmp, struct evbuffer *evbuf)
{
ev_uint32_t tag;
while (evbuffer_get_length(evbuf) > 0) {
if (evtag_peek(evbuf, &tag) == -1)
return (-1);
switch (tag) {
"""
% {"name": self._name}
)
for entry in self._entries:
filep.write(" case %s:\n" % (self.EntryTagName(entry)))
if not entry.Array():
filep.write(
""" if (tmp->%s_set)
return (-1);
"""
% (entry.Name())
)
self.PrintIndented(
filep,
" ",
entry.CodeUnmarshal(
"evbuf",
self.EntryTagName(entry),
entry.GetVarName("tmp"),
entry.GetVarLen("tmp"),
),
)
filep.write(
""" tmp->%s_set = 1;
break;
"""
% (entry.Name())
)
filep.write(
""" default:
return -1;
}
}
"""
)
# Check if it was decoded completely
filep.write(
""" if (%(name)s_complete(tmp) == -1)
return (-1);
return (0);
}
"""
% {"name": self._name}
)
# Checking if a structure has all the required data
filep.write(
"""
int
%(name)s_complete(struct %(name)s *msg)
{
"""
% {"name": self._name}
)
for entry in self._entries:
if not entry.Optional():
code = [
"""if (!msg->%(name)s_set)
return (-1);"""
]
code = TranslateList(code, entry.GetTranslation())
self.PrintIndented(filep, " ", code)
self.PrintIndented(
filep, " ", entry.CodeComplete("msg", entry.GetVarName("msg"))
)
filep.write(
""" return (0);
}
"""
)
# Complete message unmarshaling
filep.write(
"""
int
evtag_unmarshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t need_tag,
struct %(name)s *msg)
{
ev_uint32_t tag;
int res = -1;
struct evbuffer *tmp = evbuffer_new();
if (evtag_unmarshal(evbuf, &tag, tmp) == -1 || tag != need_tag)
goto error;
if (%(name)s_unmarshal(msg, tmp) == -1)
goto error;
res = 0;
error:
evbuffer_free(tmp);
return (res);
}
"""
% {"name": self._name}
)
# Complete message marshaling
filep.write(
"""
void
evtag_marshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t tag,
const struct %(name)s *msg)
{
struct evbuffer *buf_ = evbuffer_new();
assert(buf_ != NULL);
%(name)s_marshal(buf_, msg);
evtag_marshal_buffer(evbuf, tag, buf_);
evbuffer_free(buf_);
}
"""
% {"name": self._name}
)
class Entry(object):
def __init__(self, ent_type, name, tag):
self._type = ent_type
self._name = name
self._tag = int(tag)
self._ctype = ent_type
self._optional = False
self._can_be_array = False
self._array = False
self._line_count = -1
self._struct = None
self._refname = None
self._optpointer = True
self._optaddarg = True
@staticmethod
def GetInitializer():
raise NotImplementedError("Entry does not provide an initializer")
def SetStruct(self, struct):
self._struct = struct
def LineCount(self):
assert self._line_count != -1
return self._line_count
def SetLineCount(self, number):
self._line_count = number
def Array(self):
return self._array
def Optional(self):
return self._optional
def Tag(self):
return self._tag
def Name(self):
return self._name
def Type(self):
return self._type
def MakeArray(self):
self._array = True
def MakeOptional(self):
self._optional = True
def Verify(self):
if self.Array() and not self._can_be_array:
raise RpcGenError(
'Entry "%s" cannot be created as an array '
"around line %d" % (self._name, self.LineCount())
)
if not self._struct:
raise RpcGenError(
'Entry "%s" does not know which struct it belongs to '
"around line %d" % (self._name, self.LineCount())
)
if self._optional and self._array:
raise RpcGenError(
'Entry "%s" has illegal combination of optional and array '
"around line %d" % (self._name, self.LineCount())
)
def GetTranslation(self, extradict=None):
if extradict is None:
extradict = {}
mapping = {
"parent_name": self._struct.Name(),
"name": self._name,
"ctype": self._ctype,
"refname": self._refname,
"optpointer": self._optpointer and "*" or "",
"optreference": self._optpointer and "&" or "",
"optaddarg": self._optaddarg and ", const %s value" % self._ctype or "",
}
for (k, v) in list(extradict.items()):
mapping[k] = v
return mapping
def GetVarName(self, var):
return "%(var)s->%(name)s_data" % self.GetTranslation({"var": var})
def GetVarLen(self, _var):
return "sizeof(%s)" % self._ctype
def GetFuncName(self):
return "%s_%s_get" % (self._struct.Name(), self._name)
def GetDeclaration(self, funcname):
code = [
"int %s(struct %s *, %s *);" % (funcname, self._struct.Name(), self._ctype)
]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, %(ctype)s *value)
{
if (msg->%(name)s_set != 1)
return (-1);
*value = msg->%(name)s_data;
return (0);
}"""
code = code % self.GetTranslation()
return code.split("\n")
def AssignFuncName(self):
return "%s_%s_assign" % (self._struct.Name(), self._name)
def AddFuncName(self):
return "%s_%s_add" % (self._struct.Name(), self._name)
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, const %s);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def CodeAssign(self):
code = [
"int",
"%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,"
" const %(ctype)s value)",
"{",
" msg->%(name)s_set = 1;",
" msg->%(name)s_data = value;",
" return (0);",
"}",
]
code = "\n".join(code)
code = code % self.GetTranslation()
return code.split("\n")
def CodeClear(self, structname):
code = ["%s->%s_set = 0;" % (structname, self.Name())]
return code
@staticmethod
def CodeComplete(_structname, _var_name):
return []
@staticmethod
def CodeFree(_name):
return []
def CodeBase(self):
code = ["%(parent_name)s_%(name)s_assign,", "%(parent_name)s_%(name)s_get,"]
if self.Array():
code.append("%(parent_name)s_%(name)s_add,")
code = "\n".join(code)
code = code % self.GetTranslation()
return code.split("\n")
class EntryBytes(Entry):
def __init__(self, ent_type, name, tag, length):
# Init base class
super(EntryBytes, self).__init__(ent_type, name, tag)
self._length = length
self._ctype = "ev_uint8_t"
@staticmethod
def GetInitializer():
return "NULL"
def GetVarLen(self, _var):
return "(%s)" % self._length
@staticmethod
def CodeArrayAdd(varname, _value):
# XXX: copy here
return ["%(varname)s = NULL;" % {"varname": varname}]
def GetDeclaration(self, funcname):
code = [
"int %s(struct %s *, %s **);" % (funcname, self._struct.Name(), self._ctype)
]
return code
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, const %s *);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def Declaration(self):
dcl = ["ev_uint8_t %s_data[%s];" % (self._name, self._length)]
return dcl
def CodeGet(self):
name = self._name
code = [
"int",
"%s_%s_get(struct %s *msg, %s **value)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_set != 1)" % name,
" return (-1);",
" *value = msg->%s_data;" % name,
" return (0);",
"}",
]
return code
def CodeAssign(self):
name = self._name
code = [
"int",
"%s_%s_assign(struct %s *msg, const %s *value)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" msg->%s_set = 1;" % name,
" memcpy(msg->%s_data, value, %s);" % (name, self._length),
" return (0);",
"}",
]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = [
"if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, "
"%(var)s, %(varlen)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
return TranslateList(
code,
self.GetTranslation(
{"var": var_name, "varlen": var_len, "buf": buf, "tag": tag_name}
),
)
@staticmethod
def CodeMarshal(buf, tag_name, var_name, var_len):
code = ["evtag_marshal(%s, %s, %s, %s);" % (buf, tag_name, var_name, var_len)]
return code
def CodeClear(self, structname):
code = [
"%s->%s_set = 0;" % (structname, self.Name()),
"memset(%s->%s_data, 0, sizeof(%s->%s_data));"
% (structname, self._name, structname, self._name),
]
return code
def CodeInitialize(self, name):
code = [
"memset(%s->%s_data, 0, sizeof(%s->%s_data));"
% (name, self._name, name, self._name)
]
return code
def Verify(self):
if not self._length:
raise RpcGenError(
'Entry "%s" needs a length '
"around line %d" % (self._name, self.LineCount())
)
super(EntryBytes, self).Verify()
class EntryInt(Entry):
def __init__(self, ent_type, name, tag, bits=32):
# Init base class
super(EntryInt, self).__init__(ent_type, name, tag)
self._can_be_array = True
if bits == 32:
self._ctype = "ev_uint32_t"
self._marshal_type = "int"
if bits == 64:
self._ctype = "ev_uint64_t"
self._marshal_type = "int64"
@staticmethod
def GetInitializer():
return "0"
@staticmethod
def CodeArrayFree(_var):
return []
@staticmethod
def CodeArrayAssign(varname, srcvar):
return ["%(varname)s = %(srcvar)s;" % {"varname": varname, "srcvar": srcvar}]
@staticmethod
def CodeArrayAdd(varname, value):
"""Returns a new entry of this type."""
return ["%(varname)s = %(value)s;" % {"varname": varname, "value": value}]
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
code = [
"if (evtag_unmarshal_%(ma)s(%(buf)s, %(tag)s, &%(var)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"ma": self._marshal_type, "buf": buf, "tag": tag_name, "var": var_name}
)
return code.split("\n")
def CodeMarshal(self, buf, tag_name, var_name, _var_len):
code = [
"evtag_marshal_%s(%s, %s, %s);"
% (self._marshal_type, buf, tag_name, var_name)
]
return code
def Declaration(self):
dcl = ["%s %s_data;" % (self._ctype, self._name)]
return dcl
def CodeInitialize(self, name):
code = ["%s->%s_data = 0;" % (name, self._name)]
return code
class EntryString(Entry):
def __init__(self, ent_type, name, tag):
# Init base class
super(EntryString, self).__init__(ent_type, name, tag)
self._can_be_array = True
self._ctype = "char *"
@staticmethod
def GetInitializer():
return "NULL"
@staticmethod
def CodeArrayFree(varname):
code = ["if (%(var)s != NULL) free(%(var)s);"]
return TranslateList(code, {"var": varname})
@staticmethod
def CodeArrayAssign(varname, srcvar):
code = [
"if (%(var)s != NULL)",
" free(%(var)s);",
"%(var)s = strdup(%(srcvar)s);",
"if (%(var)s == NULL) {",
' event_warnx("%%s: strdup", __func__);',
" return (-1);",
"}",
]
return TranslateList(code, {"var": varname, "srcvar": srcvar})
@staticmethod
def CodeArrayAdd(varname, value):
code = [
"if (%(value)s != NULL) {",
" %(var)s = strdup(%(value)s);",
" if (%(var)s == NULL) {",
" goto error;",
" }",
"} else {",
" %(var)s = NULL;",
"}",
]
return TranslateList(code, {"var": varname, "value": value})
def GetVarLen(self, var):
return "strlen(%s)" % self.GetVarName(var)
@staticmethod
def CodeMakeInitalize(varname):
return "%(varname)s = NULL;" % {"varname": varname}
def CodeAssign(self):
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
if (msg->%(name)s_data != NULL)
free(msg->%(name)s_data);
if ((msg->%(name)s_data = strdup(value)) == NULL)
return (-1);
msg->%(name)s_set = 1;
return (0);
}""" % (
self.GetTranslation()
)
return code.split("\n")
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
code = [
"if (evtag_unmarshal_string(%(buf)s, %(tag)s, &%(var)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"buf": buf, "tag": tag_name, "var": var_name}
)
return code.split("\n")
@staticmethod
def CodeMarshal(buf, tag_name, var_name, _var_len):
code = ["evtag_marshal_string(%s, %s, %s);" % (buf, tag_name, var_name)]
return code
def CodeClear(self, structname):
code = [
"if (%s->%s_set == 1) {" % (structname, self.Name()),
" free(%s->%s_data);" % (structname, self.Name()),
" %s->%s_data = NULL;" % (structname, self.Name()),
" %s->%s_set = 0;" % (structname, self.Name()),
"}",
]
return code
def CodeInitialize(self, name):
code = ["%s->%s_data = NULL;" % (name, self._name)]
return code
def CodeFree(self, name):
code = [
"if (%s->%s_data != NULL)" % (name, self._name),
" free (%s->%s_data);" % (name, self._name),
]
return code
def Declaration(self):
dcl = ["char *%s_data;" % self._name]
return dcl
class EntryStruct(Entry):
def __init__(self, ent_type, name, tag, refname):
# Init base class
super(EntryStruct, self).__init__(ent_type, name, tag)
self._optpointer = False
self._can_be_array = True
self._refname = refname
self._ctype = "struct %s*" % refname
self._optaddarg = False
def GetInitializer(self):
return "NULL"
def GetVarLen(self, _var):
return "-1"
def CodeArrayAdd(self, varname, _value):
code = [
"%(varname)s = %(refname)s_new();",
"if (%(varname)s == NULL)",
" goto error;",
]
return TranslateList(code, self.GetTranslation({"varname": varname}))
def CodeArrayFree(self, var):
code = ["%(refname)s_free(%(var)s);" % self.GetTranslation({"var": var})]
return code
def CodeArrayAssign(self, var, srcvar):
code = [
"int had_error = 0;",
"struct evbuffer *tmp = NULL;",
"%(refname)s_clear(%(var)s);",
"if ((tmp = evbuffer_new()) == NULL) {",
' event_warn("%%s: evbuffer_new()", __func__);',
" had_error = 1;",
" goto done;",
"}",
"%(refname)s_marshal(tmp, %(srcvar)s);",
"if (%(refname)s_unmarshal(%(var)s, tmp) == -1) {",
' event_warnx("%%s: %(refname)s_unmarshal", __func__);',
" had_error = 1;",
" goto done;",
"}",
"done:",
"if (tmp != NULL)",
" evbuffer_free(tmp);",
"if (had_error) {",
" %(refname)s_clear(%(var)s);",
" return (-1);",
"}",
]
return TranslateList(code, self.GetTranslation({"var": var, "srcvar": srcvar}))
def CodeGet(self):
name = self._name
code = [
"int",
"%s_%s_get(struct %s *msg, %s *value)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_set != 1) {" % name,
" msg->%s_data = %s_new();" % (name, self._refname),
" if (msg->%s_data == NULL)" % name,
" return (-1);",
" msg->%s_set = 1;" % name,
" }",
" *value = msg->%s_data;" % name,
" return (0);",
"}",
]
return code
def CodeAssign(self):
code = (
"""int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (msg->%(name)s_set) {
%(refname)s_clear(msg->%(name)s_data);
msg->%(name)s_set = 0;
} else {
msg->%(name)s_data = %(refname)s_new();
if (msg->%(name)s_data == NULL) {
event_warn("%%s: %(refname)s_new()", __func__);
goto error;
}
}
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
msg->%(name)s_set = 1;
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
if (msg->%(name)s_data != NULL) {
%(refname)s_free(msg->%(name)s_data);
msg->%(name)s_data = NULL;
}
return (-1);
}"""
% self.GetTranslation()
)
return code.split("\n")
def CodeComplete(self, structname, var_name):
code = [
"if (%(structname)s->%(name)s_set && "
"%(refname)s_complete(%(var)s) == -1)",
" return (-1);",
]
return TranslateList(
code, self.GetTranslation({"structname": structname, "var": var_name})
)
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
code = [
"%(var)s = %(refname)s_new();",
"if (%(var)s == NULL)",
" return (-1);",
"if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag)s, ",
" %(var)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"buf": buf, "tag": tag_name, "var": var_name}
)
return code.split("\n")
def CodeMarshal(self, buf, tag_name, var_name, _var_len):
code = [
"evtag_marshal_%s(%s, %s, %s);" % (self._refname, buf, tag_name, var_name)
]
return code
def CodeClear(self, structname):
code = [
"if (%s->%s_set == 1) {" % (structname, self.Name()),
" %s_free(%s->%s_data);" % (self._refname, structname, self.Name()),
" %s->%s_data = NULL;" % (structname, self.Name()),
" %s->%s_set = 0;" % (structname, self.Name()),
"}",
]
return code
def CodeInitialize(self, name):
code = ["%s->%s_data = NULL;" % (name, self._name)]
return code
def CodeFree(self, name):
code = [
"if (%s->%s_data != NULL)" % (name, self._name),
" %s_free(%s->%s_data);" % (self._refname, name, self._name),
]
return code
def Declaration(self):
dcl = ["%s %s_data;" % (self._ctype, self._name)]
return dcl
class EntryVarBytes(Entry):
def __init__(self, ent_type, name, tag):
# Init base class
super(EntryVarBytes, self).__init__(ent_type, name, tag)
self._ctype = "ev_uint8_t *"
@staticmethod
def GetInitializer():
return "NULL"
def GetVarLen(self, var):
return "%(var)s->%(name)s_length" % self.GetTranslation({"var": var})
@staticmethod
def CodeArrayAdd(varname, _value):
# xxx: copy
return ["%(varname)s = NULL;" % {"varname": varname}]
def GetDeclaration(self, funcname):
code = [
"int %s(struct %s *, %s *, ev_uint32_t *);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, const %s, ev_uint32_t);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def CodeAssign(self):
name = self._name
code = [
"int",
"%s_%s_assign(struct %s *msg, "
"const %s value, ev_uint32_t len)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_data != NULL)" % name,
" free (msg->%s_data);" % name,
" msg->%s_data = malloc(len);" % name,
" if (msg->%s_data == NULL)" % name,
" return (-1);",
" msg->%s_set = 1;" % name,
" msg->%s_length = len;" % name,
" memcpy(msg->%s_data, value, len);" % name,
" return (0);",
"}",
]
return code
def CodeGet(self):
name = self._name
code = [
"int",
"%s_%s_get(struct %s *msg, %s *value, ev_uint32_t *plen)"
% (self._struct.Name(), name, self._struct.Name(), self._ctype),
"{",
" if (msg->%s_set != 1)" % name,
" return (-1);",
" *value = msg->%s_data;" % name,
" *plen = msg->%s_length;" % name,
" return (0);",
"}",
]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = [
"if (evtag_payload_length(%(buf)s, &%(varlen)s) == -1)",
" return (-1);",
# We do not want DoS opportunities
"if (%(varlen)s > evbuffer_get_length(%(buf)s))",
" return (-1);",
"if ((%(var)s = malloc(%(varlen)s)) == NULL)",
" return (-1);",
"if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, %(var)s, "
"%(varlen)s) == -1) {",
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
" return (-1);",
"}",
]
code = "\n".join(code) % self.GetTranslation(
{"buf": buf, "tag": tag_name, "var": var_name, "varlen": var_len}
)
return code.split("\n")
@staticmethod
def CodeMarshal(buf, tag_name, var_name, var_len):
code = ["evtag_marshal(%s, %s, %s, %s);" % (buf, tag_name, var_name, var_len)]
return code
def CodeClear(self, structname):
code = [
"if (%s->%s_set == 1) {" % (structname, self.Name()),
" free (%s->%s_data);" % (structname, self.Name()),
" %s->%s_data = NULL;" % (structname, self.Name()),
" %s->%s_length = 0;" % (structname, self.Name()),
" %s->%s_set = 0;" % (structname, self.Name()),
"}",
]
return code
def CodeInitialize(self, name):
code = [
"%s->%s_data = NULL;" % (name, self._name),
"%s->%s_length = 0;" % (name, self._name),
]
return code
def CodeFree(self, name):
code = [
"if (%s->%s_data != NULL)" % (name, self._name),
" free(%s->%s_data);" % (name, self._name),
]
return code
def Declaration(self):
dcl = [
"ev_uint8_t *%s_data;" % self._name,
"ev_uint32_t %s_length;" % self._name,
]
return dcl
class EntryArray(Entry):
_index = None
def __init__(self, entry):
# Init base class
super(EntryArray, self).__init__(entry._type, entry._name, entry._tag)
self._entry = entry
self._refname = entry._refname
self._ctype = self._entry._ctype
self._optional = True
self._optpointer = self._entry._optpointer
self._optaddarg = self._entry._optaddarg
# provide a new function for accessing the variable name
def GetVarName(var_name):
return "%(var)s->%(name)s_data[%(index)s]" % self._entry.GetTranslation(
{"var": var_name, "index": self._index}
)
self._entry.GetVarName = GetVarName
def GetInitializer(self):
return "NULL"
def GetVarName(self, var):
return var
def GetVarLen(self, _var_name):
return "-1"
def GetDeclaration(self, funcname):
"""Allows direct access to elements of the array."""
code = [
"int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);"
% self.GetTranslation({"funcname": funcname})
]
return code
def AssignDeclaration(self, funcname):
code = [
"int %s(struct %s *, int, const %s);"
% (funcname, self._struct.Name(), self._ctype)
]
return code
def AddDeclaration(self, funcname):
code = [
"%(ctype)s %(optpointer)s "
"%(funcname)s(struct %(parent_name)s *msg%(optaddarg)s);"
% self.GetTranslation({"funcname": funcname})
]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
%(ctype)s *value)
{
if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
return (-1);
*value = msg->%(name)s_data[offset];
return (0);
}
""" % (
self.GetTranslation()
)
return code.splitlines()
def CodeAssign(self):
code = [
"int",
"%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,",
" const %(ctype)s value)",
"{",
" if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)",
" return (-1);",
"",
" {",
]
code = TranslateList(code, self.GetTranslation())
codearrayassign = self._entry.CodeArrayAssign(
"msg->%(name)s_data[off]" % self.GetTranslation(), "value"
)
code += [" " + x for x in codearrayassign]
code += TranslateList([" }", " return (0);", "}"], self.GetTranslation())
return code
def CodeAdd(self):
codearrayadd = self._entry.CodeArrayAdd(
"msg->%(name)s_data[msg->%(name)s_length - 1]" % self.GetTranslation(),
"value",
)
code = [
"static int",
"%(parent_name)s_%(name)s_expand_to_hold_more("
"struct %(parent_name)s *msg)",
"{",
" int tobe_allocated = msg->%(name)s_num_allocated;",
" %(ctype)s* new_data = NULL;",
" tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;",
" new_data = (%(ctype)s*) realloc(msg->%(name)s_data,",
" tobe_allocated * sizeof(%(ctype)s));",
" if (new_data == NULL)",
" return -1;",
" msg->%(name)s_data = new_data;",
" msg->%(name)s_num_allocated = tobe_allocated;",
" return 0;",
"}",
"",
"%(ctype)s %(optpointer)s",
"%(parent_name)s_%(name)s_add(struct %(parent_name)s *msg%(optaddarg)s)",
"{",
" if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {",
" if (%(parent_name)s_%(name)s_expand_to_hold_more(msg)<0)",
" goto error;",
" }",
]
code = TranslateList(code, self.GetTranslation())
code += [" " + x for x in codearrayadd]
code += TranslateList(
[
" msg->%(name)s_set = 1;",
" return %(optreference)s(msg->%(name)s_data["
"msg->%(name)s_length - 1]);",
"error:",
" --msg->%(name)s_length;",
" return (NULL);",
"}",
],
self.GetTranslation(),
)
return code
def CodeComplete(self, structname, var_name):
self._index = "i"
tmp = self._entry.CodeComplete(structname, self._entry.GetVarName(var_name))
# skip the whole loop if there is nothing to check
if not tmp:
return []
translate = self.GetTranslation({"structname": structname})
code = [
"{",
" int i;",
" for (i = 0; i < %(structname)s->%(name)s_length; ++i) {",
]
code = TranslateList(code, translate)
code += [" " + x for x in tmp]
code += [" }", "}"]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, _var_len):
translate = self.GetTranslation(
{
"var": var_name,
"buf": buf,
"tag": tag_name,
"init": self._entry.GetInitializer(),
}
)
code = [
"if (%(var)s->%(name)s_length >= %(var)s->%(name)s_num_allocated &&",
" %(parent_name)s_%(name)s_expand_to_hold_more(%(var)s) < 0) {",
' puts("HEY NOW");',
" return (-1);",
"}",
]
# the unmarshal code directly returns
code = TranslateList(code, translate)
self._index = "%(var)s->%(name)s_length" % translate
code += self._entry.CodeUnmarshal(
buf,
tag_name,
self._entry.GetVarName(var_name),
self._entry.GetVarLen(var_name),
)
code += ["++%(var)s->%(name)s_length;" % translate]
return code
def CodeMarshal(self, buf, tag_name, var_name, _var_len):
code = ["{", " int i;", " for (i = 0; i < %(var)s->%(name)s_length; ++i) {"]
self._index = "i"
code += self._entry.CodeMarshal(
buf,
tag_name,
self._entry.GetVarName(var_name),
self._entry.GetVarLen(var_name),
)
code += [" }", "}"]
code = "\n".join(code) % self.GetTranslation({"var": var_name})
return code.split("\n")
def CodeClear(self, structname):
translate = self.GetTranslation({"structname": structname})
codearrayfree = self._entry.CodeArrayFree(
"%(structname)s->%(name)s_data[i]"
% self.GetTranslation({"structname": structname})
)
code = ["if (%(structname)s->%(name)s_set == 1) {"]
if codearrayfree:
code += [
" int i;",
" for (i = 0; i < %(structname)s->%(name)s_length; ++i) {",
]
code = TranslateList(code, translate)
if codearrayfree:
code += [" " + x for x in codearrayfree]
code += [" }"]
code += TranslateList(
[
" free(%(structname)s->%(name)s_data);",
" %(structname)s->%(name)s_data = NULL;",
" %(structname)s->%(name)s_set = 0;",
" %(structname)s->%(name)s_length = 0;",
" %(structname)s->%(name)s_num_allocated = 0;",
"}",
],
translate,
)
return code
def CodeInitialize(self, name):
code = [
"%s->%s_data = NULL;" % (name, self._name),
"%s->%s_length = 0;" % (name, self._name),
"%s->%s_num_allocated = 0;" % (name, self._name),
]
return code
def CodeFree(self, structname):
code = self.CodeClear(structname)
code += TranslateList(
["free(%(structname)s->%(name)s_data);"],
self.GetTranslation({"structname": structname}),
)
return code
def Declaration(self):
dcl = [
"%s *%s_data;" % (self._ctype, self._name),
"int %s_length;" % self._name,
"int %s_num_allocated;" % self._name,
]
return dcl
def NormalizeLine(line):
line = CPPCOMMENT_RE.sub("", line)
line = line.strip()
line = WHITESPACE_RE.sub(" ", line)
return line
ENTRY_NAME_RE = re.compile(r"(?P<name>[^\[\]]+)(\[(?P<fixed_length>.*)\])?")
ENTRY_TAG_NUMBER_RE = re.compile(r"(0x)?\d+", re.I)
def ProcessOneEntry(factory, newstruct, entry):
optional = False
array = False
entry_type = ""
name = ""
tag = ""
tag_set = None
separator = ""
fixed_length = ""
for token in entry.split(" "):
if not entry_type:
if not optional and token == "optional":
optional = True
continue
if not array and token == "array":
array = True
continue
if not entry_type:
entry_type = token
continue
if not name:
res = ENTRY_NAME_RE.match(token)
if not res:
raise RpcGenError(
r"""Cannot parse name: "%s" around line %d""" % (entry, LINE_COUNT)
)
name = res.group("name")
fixed_length = res.group("fixed_length")
continue
if not separator:
separator = token
if separator != "=":
raise RpcGenError(
r'''Expected "=" after name "%s" got "%s"''' % (name, token)
)
continue
if not tag_set:
tag_set = 1
if not ENTRY_TAG_NUMBER_RE.match(token):
raise RpcGenError(r'''Expected tag number: "%s"''' % (entry))
tag = int(token, 0)
continue
raise RpcGenError(r'''Cannot parse "%s"''' % (entry))
if not tag_set:
raise RpcGenError(r'''Need tag number: "%s"''' % (entry))
# Create the right entry
if entry_type == "bytes":
if fixed_length:
newentry = factory.EntryBytes(entry_type, name, tag, fixed_length)
else:
newentry = factory.EntryVarBytes(entry_type, name, tag)
elif entry_type == "int" and not fixed_length:
newentry = factory.EntryInt(entry_type, name, tag)
elif entry_type == "int64" and not fixed_length:
newentry = factory.EntryInt(entry_type, name, tag, bits=64)
elif entry_type == "string" and not fixed_length:
newentry = factory.EntryString(entry_type, name, tag)
else:
res = STRUCT_REF_RE.match(entry_type)
if res:
# References another struct defined in our file
newentry = factory.EntryStruct(entry_type, name, tag, res.group("name"))
else:
raise RpcGenError('Bad type: "%s" in "%s"' % (entry_type, entry))
structs = []
if optional:
newentry.MakeOptional()
if array:
newentry.MakeArray()
newentry.SetStruct(newstruct)
newentry.SetLineCount(LINE_COUNT)
newentry.Verify()
if array:
# We need to encapsulate this entry into a struct
newentry = factory.EntryArray(newentry)
newentry.SetStruct(newstruct)
newentry.SetLineCount(LINE_COUNT)
newentry.MakeArray()
newstruct.AddEntry(newentry)
return structs
def ProcessStruct(factory, data):
tokens = data.split(" ")
# First three tokens are: 'struct' 'name' '{'
newstruct = factory.Struct(tokens[1])
inside = " ".join(tokens[3:-1])
tokens = inside.split(";")
structs = []
for entry in tokens:
entry = NormalizeLine(entry)
if not entry:
continue
# It's possible that new structs get defined in here
structs.extend(ProcessOneEntry(factory, newstruct, entry))
structs.append(newstruct)
return structs
C_COMMENT_START = "/*"
C_COMMENT_END = "*/"
C_COMMENT_START_RE = re.compile(re.escape(C_COMMENT_START))
C_COMMENT_END_RE = re.compile(re.escape(C_COMMENT_END))
C_COMMENT_START_SUB_RE = re.compile(r"%s.*$" % (re.escape(C_COMMENT_START)))
C_COMMENT_END_SUB_RE = re.compile(r"%s.*$" % (re.escape(C_COMMENT_END)))
C_MULTILINE_COMMENT_SUB_RE = re.compile(
r"%s.*?%s" % (re.escape(C_COMMENT_START), re.escape(C_COMMENT_END))
)
CPP_CONDITIONAL_BLOCK_RE = re.compile(r"#(if( |def)|endif)")
INCLUDE_RE = re.compile(r'#include (".+"|<.+>)')
def GetNextStruct(filep):
global CPP_DIRECT
global LINE_COUNT
got_struct = False
have_c_comment = False
data = ""
while True:
line = filep.readline()
if not line:
break
LINE_COUNT += 1
line = line[:-1]
if not have_c_comment and C_COMMENT_START_RE.search(line):
if C_MULTILINE_COMMENT_SUB_RE.search(line):
line = C_MULTILINE_COMMENT_SUB_RE.sub("", line)
else:
line = C_COMMENT_START_SUB_RE.sub("", line)
have_c_comment = True
if have_c_comment:
if not C_COMMENT_END_RE.search(line):
continue
have_c_comment = False
line = C_COMMENT_END_SUB_RE.sub("", line)
line = NormalizeLine(line)
if not line:
continue
if not got_struct:
if INCLUDE_RE.match(line):
CPP_DIRECT.append(line)
elif CPP_CONDITIONAL_BLOCK_RE.match(line):
CPP_DIRECT.append(line)
elif PREPROCESSOR_DEF_RE.match(line):
HEADER_DIRECT.append(line)
elif not STRUCT_DEF_RE.match(line):
raise RpcGenError("Missing struct on line %d: %s" % (LINE_COUNT, line))
else:
got_struct = True
data += line
continue
# We are inside the struct
tokens = line.split("}")
if len(tokens) == 1:
data += " " + line
continue
if tokens[1]:
raise RpcGenError("Trailing garbage after struct on line %d" % LINE_COUNT)
# We found the end of the struct
data += " %s}" % tokens[0]
break
# Remove any comments, that might be in there
data = re.sub(r"/\*.*\*/", "", data)
return data
def Parse(factory, filep):
"""
Parses the input file and returns C code and corresponding header file.
"""
entities = []
while 1:
# Just gets the whole struct nicely formatted
data = GetNextStruct(filep)
if not data:
break
entities.extend(ProcessStruct(factory, data))
return entities
class CCodeGenerator(object):
def __init__(self):
pass
@staticmethod
def GuardName(name):
# Use the complete provided path to the input file, with all
# non-identifier characters replaced with underscores, to
# reduce the chance of a collision between guard macros.
return "EVENT_RPCOUT_%s_" % (NONIDENT_RE.sub("_", name).upper())
def HeaderPreamble(self, name):
guard = self.GuardName(name)
pre = """
/*
* Automatically generated from %s
*/
#ifndef %s
#define %s
""" % (
name,
guard,
guard,
)
if HEADER_DIRECT:
for statement in HEADER_DIRECT:
pre += "%s\n" % statement
pre += "\n"
pre += """
#include <event2/util.h> /* for ev_uint*_t */
#include <event2/rpc.h>
"""
return pre
def HeaderPostamble(self, name):
guard = self.GuardName(name)
return "#endif /* %s */" % (guard)
@staticmethod
def BodyPreamble(name, header_file):
global _NAME
global _VERSION
slash = header_file.rfind("/")
if slash != -1:
header_file = header_file[slash + 1 :]
pre = """
/*
* Automatically generated from %(name)s
* by %(script_name)s/%(script_version)s. DO NOT EDIT THIS FILE.
*/
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <event2/event-config.h>
#include <event2/event.h>
#include <event2/buffer.h>
#include <event2/tag.h>
#if defined(EVENT__HAVE___func__)
# ifndef __func__
# define __func__ __func__
# endif
#elif defined(EVENT__HAVE___FUNCTION__)
# define __func__ __FUNCTION__
#else
# define __func__ __FILE__
#endif
""" % {
"name": name,
"script_name": _NAME,
"script_version": _VERSION,
}
for statement in CPP_DIRECT:
pre += "%s\n" % statement
pre += '\n#include "%s"\n\n' % header_file
pre += "void event_warn(const char *fmt, ...);\n"
pre += "void event_warnx(const char *fmt, ...);\n\n"
return pre
@staticmethod
def HeaderFilename(filename):
return ".".join(filename.split(".")[:-1]) + ".h"
@staticmethod
def CodeFilename(filename):
return ".".join(filename.split(".")[:-1]) + ".gen.c"
@staticmethod
def Struct(name):
return StructCCode(name)
@staticmethod
def EntryBytes(entry_type, name, tag, fixed_length):
return EntryBytes(entry_type, name, tag, fixed_length)
@staticmethod
def EntryVarBytes(entry_type, name, tag):
return EntryVarBytes(entry_type, name, tag)
@staticmethod
def EntryInt(entry_type, name, tag, bits=32):
return EntryInt(entry_type, name, tag, bits)
@staticmethod
def EntryString(entry_type, name, tag):
return EntryString(entry_type, name, tag)
@staticmethod
def EntryStruct(entry_type, name, tag, struct_name):
return EntryStruct(entry_type, name, tag, struct_name)
@staticmethod
def EntryArray(entry):
return EntryArray(entry)
class CommandLine(object):
def __init__(self, argv=None):
"""Initialize a command-line to launch event_rpcgen, as if
from a command-line with CommandLine(sys.argv). If you're
calling this directly, remember to provide a dummy value
for sys.argv[0]
"""
global QUIETLY
self.filename = None
self.header_file = None
self.impl_file = None
self.factory = CCodeGenerator()
parser = argparse.ArgumentParser(
usage="%(prog)s [options] rpc-file [[h-file] c-file]"
)
parser.add_argument("--quiet", action="store_true", default=False)
parser.add_argument("rpc_file", type=argparse.FileType("r"))
args, extra_args = parser.parse_known_args(args=argv)
QUIETLY = args.quiet
if extra_args:
if len(extra_args) == 1:
self.impl_file = extra_args[0].replace("\\", "/")
elif len(extra_args) == 2:
self.header_file = extra_args[0].replace("\\", "/")
self.impl_file = extra_args[1].replace("\\", "/")
else:
parser.error("Spurious arguments provided")
self.rpc_file = args.rpc_file
if not self.impl_file:
self.impl_file = self.factory.CodeFilename(self.rpc_file.name)
if not self.header_file:
self.header_file = self.factory.HeaderFilename(self.impl_file)
if not self.impl_file.endswith(".c"):
parser.error("can only generate C implementation files")
if not self.header_file.endswith(".h"):
parser.error("can only generate C header files")
def run(self):
filename = self.rpc_file.name
header_file = self.header_file
impl_file = self.impl_file
factory = self.factory
declare('Reading "%s"' % filename)
with self.rpc_file:
entities = Parse(factory, self.rpc_file)
declare('... creating "%s"' % header_file)
with open(header_file, "w") as header_fp:
header_fp.write(factory.HeaderPreamble(filename))
# Create forward declarations: allows other structs to reference
# each other
for entry in entities:
entry.PrintForwardDeclaration(header_fp)
header_fp.write("\n")
for entry in entities:
entry.PrintTags(header_fp)
entry.PrintDeclaration(header_fp)
header_fp.write(factory.HeaderPostamble(filename))
declare('... creating "%s"' % impl_file)
with open(impl_file, "w") as impl_fp:
impl_fp.write(factory.BodyPreamble(filename, header_file))
for entry in entities:
entry.PrintCode(impl_fp)
def main(argv=None):
try:
CommandLine(argv=argv).run()
return 0
except RpcGenError as e:
sys.stderr.write(e)
except EnvironmentError as e:
if e.filename and e.strerror:
sys.stderr.write("%s: %s" % (e.filename, e.strerror))
elif e.strerror:
sys.stderr.write(e.strerror)
else:
raise
return 1
if __name__ == "__main__":
sys.exit(main(argv=sys.argv[1:]))
|
tests/guinea-pigs/unittest/nested_suits.py | djeebus/teamcity-python | 105 | 12761552 | <reponame>djeebus/teamcity-python
import unittest
from teamcity.unittestpy import TeamcityTestRunner
from teamcity import is_running_under_teamcity
class TestXXX(unittest.TestCase):
def runTest(self):
assert 1 == 1
if __name__ == '__main__':
if is_running_under_teamcity():
runner = TeamcityTestRunner()
else:
runner = unittest.TextTestRunner()
nested_suite = unittest.TestSuite()
nested_suite.addTest(TestXXX())
suite = unittest.TestSuite()
suite.addTest(nested_suite)
runner.run(suite)
|
Bar/bar_border_radius.py | pyecharts/pyecharts_gallery | 759 | 12761577 | from pyecharts import options as opts
from pyecharts.charts import Bar
from pyecharts.commons.utils import JsCode
from pyecharts.faker import Faker
c = (
Bar()
.add_xaxis(Faker.choose())
.add_yaxis("商家A", Faker.values(), category_gap="60%")
.set_series_opts(
itemstyle_opts={
"normal": {
"color": JsCode(
"""new echarts.graphic.LinearGradient(0, 0, 0, 1, [{
offset: 0,
color: 'rgba(0, 244, 255, 1)'
}, {
offset: 1,
color: 'rgba(0, 77, 167, 1)'
}], false)"""
),
"barBorderRadius": [30, 30, 30, 30],
"shadowColor": "rgb(0, 160, 221)",
}
}
)
.set_global_opts(title_opts=opts.TitleOpts(title="Bar-渐变圆柱"))
.render("bar_border_radius.html")
)
|
plugins/xml_hidden_extensions_hotfix.py | MattDMo/PackageDev | 288 | 12761594 | <filename>plugins/xml_hidden_extensions_hotfix.py
"""Bootstrap the 'hidden_extensions' setting for the XML syntax.
The XML package includes a `XML.sublime-settings` file
that sets `hidden_extensions` to include some of the extensions
we want to highlight with our package.
There is currently no other way to override this,
so we manually override this extension list
in a User settings file with a plugin.
See also:
https://github.com/sublimehq/Packages/issues/823
https://github.com/SublimeTextIssues/Core/issues/1326
"""
import sublime
from sublime_lib import ResourcePath
__all__ = [
"plugin_loaded",
]
DEFAULT_VALUE = ["rss", "sublime-snippet", "vcproj", "tmLanguage", "tmTheme", "tmSnippet",
"tmPreferences", "dae"]
MODIFIED_VALUE = ["rss", "vcproj", "tmLanguage", "tmTheme", "tmSnippet", "dae"]
# Encode ST build and date of last change (of this file) into the bootstrap value.
# I'm not sure what exactly I'm gonna do with it, so just include info I might find useful later.
BOOTSTRAP_VALUE = [3126, 2017, 3, 13]
def override_extensions(expected, modified):
settings = sublime.load_settings("XML.sublime-settings")
if settings.get('hidden_extensions') == expected:
settings.set('hidden_extensions', modified)
settings.set('package_dev.bootstrapped', BOOTSTRAP_VALUE)
sublime.save_settings("XML.sublime-settings")
print("[PackageDev] Bootstrapped XML's `hidden_extensions` setting")
def remove_override():
settings = sublime.load_settings("XML.sublime-settings")
if settings.get('package_dev.bootstrapped'):
settings.erase('package_dev.bootstrapped')
if settings.get('hidden_extensions') == MODIFIED_VALUE:
settings.erase('hidden_extensions')
print("[PackageDev] Unbootstrapped XML's `hidden_extensions` setting")
sublime.save_settings("XML.sublime-settings")
sublime.set_timeout(remove_file_if_empty, 2000) # Give ST time to write the file
def remove_file_if_empty():
path = ResourcePath("Packages/User/XML.sublime-settings").file_path()
try:
with path.open() as f:
data = sublime.decode_value(f.read())
except (FileNotFoundError, ValueError):
pass
else:
if not data or len(data) == 1 and 'extensions' in data and not data['extensions']:
path.unlink()
print("[PackageDev] Removed now-empty XML.sublime-settings")
def plugin_loaded():
version = int(sublime.version())
if version < 3153:
override_extensions(DEFAULT_VALUE, MODIFIED_VALUE)
# "csproj" was added for 3153.
# https://github.com/sublimehq/Packages/commit/4a3712b7e236f8c4b443282d97bad17f68df318c
# Technically there was a change in 4050, but nobody should be using that anymore.
# https://github.com/sublimehq/Packages/commit/7866273af18398bce324408ff23c7a22f30486c8
elif version < 4075:
override_extensions(DEFAULT_VALUE + ["csproj"], MODIFIED_VALUE + ["csproj"])
elif version >= 4075:
# The settings were move to the syntax file
# https://github.com/sublimehq/Packages/commit/73b16ff196d3cbaf7df2cf5807fda6ab68a2434e
remove_override()
|
equation.py | NYU-CDS-Capstone-FBSDE/DeepBSDE | 205 | 12761606 | <reponame>NYU-CDS-Capstone-FBSDE/DeepBSDE
import numpy as np
import tensorflow as tf
class Equation(object):
"""Base class for defining PDE related function."""
def __init__(self, eqn_config):
self.dim = eqn_config.dim
self.total_time = eqn_config.total_time
self.num_time_interval = eqn_config.num_time_interval
self.delta_t = self.total_time / self.num_time_interval
self.sqrt_delta_t = np.sqrt(self.delta_t)
self.y_init = None
def sample(self, num_sample):
"""Sample forward SDE."""
raise NotImplementedError
def f_tf(self, t, x, y, z):
"""Generator function in the PDE."""
raise NotImplementedError
def g_tf(self, t, x):
"""Terminal condition of the PDE."""
raise NotImplementedError
class HJBLQ(Equation):
"""HJB equation in PNAS paper doi.org/10.1073/pnas.1718942115"""
def __init__(self, eqn_config):
super(HJBLQ, self).__init__(eqn_config)
self.x_init = np.zeros(self.dim)
self.sigma = np.sqrt(2.0)
self.lambd = 1.0
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + self.sigma * dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
return -self.lambd * tf.reduce_sum(tf.square(z), 1, keepdims=True)
def g_tf(self, t, x):
return tf.math.log((1 + tf.reduce_sum(tf.square(x), 1, keepdims=True)) / 2)
class AllenCahn(Equation):
"""Allen-Cahn equation in PNAS paper doi.org/10.1073/pnas.1718942115"""
def __init__(self, eqn_config):
super(AllenCahn, self).__init__(eqn_config)
self.x_init = np.zeros(self.dim)
self.sigma = np.sqrt(2.0)
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + self.sigma * dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
return y - tf.pow(y, 3)
def g_tf(self, t, x):
return 0.5 / (1 + 0.2 * tf.reduce_sum(tf.square(x), 1, keepdims=True))
class PricingDefaultRisk(Equation):
"""
Nonlinear Black-Scholes equation with default risk in PNAS paper
doi.org/10.1073/pnas.1718942115
"""
def __init__(self, eqn_config):
super(PricingDefaultRisk, self).__init__(eqn_config)
self.x_init = np.ones(self.dim) * 100.0
self.sigma = 0.2
self.rate = 0.02 # interest rate R
self.delta = 2.0 / 3
self.gammah = 0.2
self.gammal = 0.02
self.mu_bar = 0.02
self.vh = 50.0
self.vl = 70.0
self.slope = (self.gammah - self.gammal) / (self.vh - self.vl)
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = (1 + self.mu_bar * self.delta_t) * x_sample[:, :, i] + (
self.sigma * x_sample[:, :, i] * dw_sample[:, :, i])
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
piecewise_linear = tf.nn.relu(
tf.nn.relu(y - self.vh) * self.slope + self.gammah - self.gammal) + self.gammal
return (-(1 - self.delta) * piecewise_linear - self.rate) * y
def g_tf(self, t, x):
return tf.reduce_min(x, 1, keepdims=True)
class PricingDiffRate(Equation):
"""
Nonlinear Black-Scholes equation with different interest rates for borrowing and lending
in Section 4.4 of Comm. Math. Stat. paper doi.org/10.1007/s40304-017-0117-6
"""
def __init__(self, eqn_config):
super(PricingDiffRate, self).__init__(eqn_config)
self.x_init = np.ones(self.dim) * 100
self.sigma = 0.2
self.mu_bar = 0.06
self.rl = 0.04
self.rb = 0.06
self.alpha = 1.0 / self.dim
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
factor = np.exp((self.mu_bar-(self.sigma**2)/2)*self.delta_t)
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = (factor * np.exp(self.sigma * dw_sample[:, :, i])) * x_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
temp = tf.reduce_sum(z, 1, keepdims=True) / self.sigma
return -self.rl * y - (self.mu_bar - self.rl) * temp + (
(self.rb - self.rl) * tf.maximum(temp - y, 0))
def g_tf(self, t, x):
temp = tf.reduce_max(x, 1, keepdims=True)
return tf.maximum(temp - 120, 0) - 2 * tf.maximum(temp - 150, 0)
class BurgersType(Equation):
"""
Multidimensional Burgers-type PDE in Section 4.5 of Comm. Math. Stat. paper
doi.org/10.1007/s40304-017-0117-6
"""
def __init__(self, eqn_config):
super(BurgersType, self).__init__(eqn_config)
self.x_init = np.zeros(self.dim)
self.y_init = 1 - 1.0 / (1 + np.exp(0 + np.sum(self.x_init) / self.dim))
self.sigma = self.dim + 0.0
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + self.sigma * dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
return (y - (2 + self.dim) / 2.0 / self.dim) * tf.reduce_sum(z, 1, keepdims=True)
def g_tf(self, t, x):
return 1 - 1.0 / (1 + tf.exp(t + tf.reduce_sum(x, 1, keepdims=True) / self.dim))
class QuadraticGradient(Equation):
"""
An example PDE with quadratically growing derivatives in Section 4.6 of Comm. Math. Stat. paper
doi.org/10.1007/s40304-017-0117-6
"""
def __init__(self, eqn_config):
super(QuadraticGradient, self).__init__(eqn_config)
self.alpha = 0.4
self.x_init = np.zeros(self.dim)
base = self.total_time + np.sum(np.square(self.x_init) / self.dim)
self.y_init = np.sin(np.power(base, self.alpha))
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
x_square = tf.reduce_sum(tf.square(x), 1, keepdims=True)
base = self.total_time - t + x_square / self.dim
base_alpha = tf.pow(base, self.alpha)
derivative = self.alpha * tf.pow(base, self.alpha - 1) * tf.cos(base_alpha)
term1 = tf.reduce_sum(tf.square(z), 1, keepdims=True)
term2 = -4.0 * (derivative ** 2) * x_square / (self.dim ** 2)
term3 = derivative
term4 = -0.5 * (
2.0 * derivative + 4.0 / (self.dim ** 2) * x_square * self.alpha * (
(self.alpha - 1) * tf.pow(base, self.alpha - 2) * tf.cos(base_alpha) - (
self.alpha * tf.pow(base, 2 * self.alpha - 2) * tf.sin(base_alpha)
)
)
)
return term1 + term2 + term3 + term4
def g_tf(self, t, x):
return tf.sin(
tf.pow(tf.reduce_sum(tf.square(x), 1, keepdims=True) / self.dim, self.alpha))
class ReactionDiffusion(Equation):
"""
Time-dependent reaction-diffusion-type example PDE in Section 4.7 of Comm. Math. Stat. paper
doi.org/10.1007/s40304-017-0117-6
"""
def __init__(self, eqn_config):
super(ReactionDiffusion, self).__init__(eqn_config)
self._kappa = 0.6
self.lambd = 1 / np.sqrt(self.dim)
self.x_init = np.zeros(self.dim)
self.y_init = 1 + self._kappa + np.sin(self.lambd * np.sum(self.x_init)) * np.exp(
-self.lambd * self.lambd * self.dim * self.total_time / 2)
def sample(self, num_sample):
dw_sample = np.random.normal(size=[num_sample, self.dim, self.num_time_interval]) * self.sqrt_delta_t
x_sample = np.zeros([num_sample, self.dim, self.num_time_interval + 1])
x_sample[:, :, 0] = np.ones([num_sample, self.dim]) * self.x_init
for i in range(self.num_time_interval):
x_sample[:, :, i + 1] = x_sample[:, :, i] + dw_sample[:, :, i]
return dw_sample, x_sample
def f_tf(self, t, x, y, z):
exp_term = tf.exp((self.lambd ** 2) * self.dim * (t - self.total_time) / 2)
sin_term = tf.sin(self.lambd * tf.reduce_sum(x, 1, keepdims=True))
temp = y - self._kappa - 1 - sin_term * exp_term
return tf.minimum(tf.constant(1.0, dtype=tf.float64), tf.square(temp))
def g_tf(self, t, x):
return 1 + self._kappa + tf.sin(self.lambd * tf.reduce_sum(x, 1, keepdims=True))
|
benchmark/megatron/benchmark_gpt_bert_one_case.py | yf225/alpa | 114 | 12761658 | <reponame>yf225/alpa
import argparse
import gc
from functools import partial
import os
import sys
import time
import numpy as np
from megatron.utils import average_losses_across_data_parallel_group
from megatron.model import BertModel, GPTModel
from megatron.model import ModelType
from megatron import mpu, initialize_megatron, get_args, get_timers
from megatron.training import train_step, setup_model_and_optimizer
import torch
from util import write_tsv, benchmark_func,\
compute_gpt_tflops, compute_gpt_parameter_count
GB = 1024 ** 3
def get_gpt_functions():
args = get_args()
micro_batch_size = args.micro_batch_size
seq_len = args.encoder_seq_length
def model_provider(pre_process=True, post_process=True):
model = GPTModel(
num_tokentypes=0,
parallel_output=True,
pre_process=pre_process,
post_process=post_process
)
return model
def loss_func(loss_mask, output_tensor):
losses = output_tensor.float()
loss_mask = loss_mask.view(-1).float()
loss = torch.sum(losses.view(-1) * loss_mask) / loss_mask.sum()
# Reduce loss for logging.
#averaged_loss = average_losses_across_data_parallel_group([loss])
averaged_loss = [0]
return loss, {'lm loss': averaged_loss[0]}
tokens = torch.ones((micro_batch_size, seq_len)).cuda().long()
labels = torch.ones((micro_batch_size, seq_len)).cuda().long()
loss_mask = torch.ones((micro_batch_size, seq_len)).cuda().int()
attention_mask = \
torch.ones(micro_batch_size, 1, seq_len, seq_len).cuda().bool()
position_ids = torch.ones((micro_batch_size, seq_len)).cuda().long()
def forward_step(data_iterator, model):
output_tensor = model(tokens, position_ids, attention_mask,
labels=labels)
return output_tensor, partial(loss_func, loss_mask)
return model_provider, loss_func, forward_step
def get_bert_functions():
args = get_args()
micro_batch_size = args.micro_batch_size
seq_len = args.encoder_seq_length
def model_provider(pre_process=True, post_process=True):
num_tokentypes = 2 if args.bert_binary_head else 0
model = BertModel(
num_tokentypes=num_tokentypes,
add_binary_head=args.bert_binary_head,
parallel_output=True,
pre_process=pre_process,
post_process=post_process)
return model
def loss_func(loss_mask, sentence_order, output_tensor):
lm_loss_, sop_logits = output_tensor
lm_loss_ = lm_loss_.float()
loss_mask = loss_mask.float()
lm_loss = torch.sum(
lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
if sop_logits is not None:
sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(),
sentence_order.view(-1),
ignore_index=-1)
sop_loss = sop_loss.float()
loss = lm_loss + sop_loss
#averaged_losses = average_losses_across_data_parallel_group(
# [lm_loss, sop_loss])
averaged_losses = [0, 0]
return loss, {'lm loss': averaged_losses[0],
'sop loss': averaged_losses[1]}
else:
loss = lm_loss
#averaged_losses = average_losses_across_data_parallel_group(
# [lm_loss])
averaged_losses = [0]
return loss, {'lm loss': averaged_losses[0]}
tokens = torch.ones((micro_batch_size, seq_len)).cuda().long()
padding_mask = \
torch.ones(micro_batch_size, seq_len).cuda().bool()
types = torch.ones((micro_batch_size, seq_len)).cuda().long()
lm_labels = torch.ones((micro_batch_size, seq_len)).cuda().long()
loss_mask = torch.ones((micro_batch_size, seq_len)).cuda().int()
sentence_order = None
def forward_step(data_iterator, model):
if not args.bert_binary_head:
types = None
output_tensor = model(tokens, padding_mask, tokentype_ids=types,
lm_labels=lm_labels)
return output_tensor, partial(loss_func, loss_mask, sentence_order)
return model_provider, loss_func, forward_step
def benchmark_gpt_bert_one_case(benchmark_case, output_file_name):
# Model configs
(model_type, global_batch_size, seq_len, hidden_size, num_layers, num_heads,
vocab_size, num_micro_batches, parallel_mode, parallel_args) = benchmark_case
assert parallel_mode == "manual"
(prefer_reduce_scatter, use_remat, (dp, op, pp), force_batch_dim_mapping) = parallel_args
dp_size, tensor_mp_size, pipeline_mp_size = dp, op, pp
checkpoint_activations = use_remat
num_gpus = dp_size * tensor_mp_size * pipeline_mp_size
assert global_batch_size % (dp_size * num_micro_batches) == 0
micro_batch_size = global_batch_size // dp_size // num_micro_batches
# always use local DDP
ddp_impl = True
# Parallel configs
# Initialize megatron
sys.argv += ["--micro-batch-size", str(micro_batch_size)]
sys.argv += ["--tensor-model-parallel-size", str(tensor_mp_size)]
sys.argv += ["--pipeline-model-parallel-size", str(pipeline_mp_size)]
sys.argv += ["--global-batch-size", str(global_batch_size)]
sys.argv += ["--num-layers", str(num_layers)]
sys.argv += ["--hidden-size", str(hidden_size)]
sys.argv += ["--num-attention-heads", str(num_heads)]
sys.argv += ["--seq-length", str(seq_len)]
sys.argv += ["--max-position-embeddings", str(seq_len)]
sys.argv += ["--optimizer", "adam"]
sys.argv += ["--train-iters", "100"]
sys.argv += ["--lr", "0.00015"]
sys.argv += ["--bert-no-binary-head"]
sys.argv += ["--DDP-impl", "local" if ddp_impl else "torch"]
sys.argv += ["--fp16"]
sys.argv += ["--loss-scale", "8"]
if checkpoint_activations:
sys.argv += ["--checkpoint-activations"]
# sys.argv += ["--no-masked-softmax-fusion"]
# sys.argv += ["--no-async-tensor-model-parallel-allreduce"]
# sys.argv += ["--no-scatter-gather-tensors-in-pipeline"]
initialize_megatron()
args = get_args()
args.padded_vocab_size = vocab_size
rank = torch.distributed.get_rank()
# Check initialization
assert dp_size == mpu.get_data_parallel_world_size()
assert tensor_mp_size == mpu.get_tensor_model_parallel_world_size()
assert pipeline_mp_size == mpu.get_pipeline_model_parallel_world_size()
# Build model
if model_type == "gpt":
model_provider, loss_func, forward_step = get_gpt_functions()
elif model_type == "bert":
model_provider, loss_func, forward_step = get_bert_functions()
model, optimizer, lr_scheduler = setup_model_and_optimizer(model_provider,
model_type=ModelType.encoder_or_decoder)
parameter_count = compute_gpt_parameter_count(
num_layers, hidden_size, vocab_size)
def run_func():
train_step(forward_step, None, model, optimizer, lr_scheduler)
# Warmup and reset timers
run_func()
timers = get_timers()
names = list(timers.timers.keys())
for name in names:
timers(name).reset()
# Benchmark step time
repeat = 2
number = 1
costs = benchmark_func(run_func, sync_func=None,
warmup=0, repeat=repeat, number=number)
timers.log(names, normalizer=repeat * number)
# Print results
if rank == 0:
peak_mem = torch.cuda.max_memory_allocated(0)
tflops = compute_gpt_tflops(global_batch_size, seq_len, num_layers,
hidden_size, vocab_size,
torch.distributed.get_world_size(),
np.mean(costs))
tflops_ckpt = compute_gpt_tflops(global_batch_size, seq_len, num_layers,
hidden_size, vocab_size,
torch.distributed.get_world_size(),
np.mean(costs), True)
heads = ["Type", "Model Config", "Parallel Config", "P-mesh shape", "#Microbatch",
"Force DP", "Remat", "Mean Time", "Std Time", "#Params", "TFLOPs", "TFLOPs (ckpt)",
"Peak Mem"]
values = [model_type, str(benchmark_case[1:6]),
str((dp_size, tensor_mp_size, pipeline_mp_size)),
"N/A", str(num_micro_batches), "N/A",
str(checkpoint_activations), f"{np.mean(costs):.3f}", f"{np.std(costs):.3f}",
f"{parameter_count/1e9:.3f}", f"{tflops:.2f}", f"{tflops_ckpt:.2f}",
f"{peak_mem/GB:5.3f}"]
write_tsv(heads, values, f"{model_type}_megatron_{output_file_name}_rank{rank}.tsv")
print("Sleeping for 30 seconds before starting the next case. ")
time.sleep(30)
if __name__ == "__main__":
case = eval(sys.argv[-2])
output_file_name = sys.argv[-1]
del sys.argv[-1]
del sys.argv[-1]
benchmark_gpt_bert_one_case(case, output_file_name)
|
datasets/wiki_auto/wiki_auto.py | MitchellTesla/datasets | 10,608 | 12761664 | <reponame>MitchellTesla/datasets
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WikiAuto dataset for Text Simplification"""
import json
import datasets
_CITATION = """\
@inproceedings{acl/JiangMLZX20,
author = {<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME>},
editor = {<NAME> and
<NAME> and
<NAME> and
<NAME>},
title = {Neural {CRF} Model for Sentence Alignment in Text Simplification},
booktitle = {Proceedings of the 58th Annual Meeting of the Association for Computational
Linguistics, {ACL} 2020, Online, July 5-10, 2020},
pages = {7943--7960},
publisher = {Association for Computational Linguistics},
year = {2020},
url = {https://www.aclweb.org/anthology/2020.acl-main.709/}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
WikiAuto provides a set of aligned sentences from English Wikipedia and Simple English Wikipedia
as a resource to train sentence simplification systems. The authors first crowd-sourced a set of manual alignments
between sentences in a subset of the Simple English Wikipedia and their corresponding versions in English Wikipedia
(this corresponds to the `manual` config), then trained a neural CRF system to predict these alignments.
The trained model was then applied to the other articles in Simple English Wikipedia with an English counterpart to
create a larger corpus of aligned sentences (corresponding to the `auto`, `auto_acl`, `auto_full_no_split`, and `auto_full_with_split` configs here).
"""
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = "CC-BY-SA 3.0"
# TODO: Add link to the official dataset URLs here
# The HuggingFace dataset library don't host the datasets but only point to the original files
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLs = {
"manual": {
"train": "https://www.dropbox.com/sh/ohqaw41v48c7e5p/AACdl4UPKtu7CMMa-CJhz4G7a/wiki-manual/train.tsv?dl=1",
"dev": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/dev.tsv",
"test": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-manual/test.tsv",
},
"auto_acl": {
"normal": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/ACL2020/train.src",
"simple": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/ACL2020/train.dst",
},
"auto_full_no_split": {
"normal": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_no_split/train.src",
"simple": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_no_split/train.dst",
},
"auto_full_with_split": {
"normal": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/train.src",
"simple": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/train.dst",
},
"auto": {
"part_1": "https://www.dropbox.com/sh/ohqaw41v48c7e5p/AAATBDhU1zpdcT5x5WgO8DMaa/wiki-auto-all-data/wiki-auto-part-1-data.json?dl=1",
"part_2": "https://www.dropbox.com/sh/ohqaw41v48c7e5p/AAATgPkjo_tPt9z12vZxJ3MRa/wiki-auto-all-data/wiki-auto-part-2-data.json?dl=1",
},
}
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
class WikiAuto(datasets.GeneratorBasedBuilder):
"""WikiAuto dataset for sentence simplification"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="manual",
version=VERSION,
description="A set of 10K Wikipedia sentence pairs aligned by crowd workers.",
),
datasets.BuilderConfig(
name="auto_acl",
version=VERSION,
description="Automatically aligned and filtered sentence pairs used to train the ACL2020 system.",
),
datasets.BuilderConfig(
name="auto_full_no_split",
version=VERSION,
description="All automatically aligned sentence pairs without sentence splitting.",
),
datasets.BuilderConfig(
name="auto_full_with_split",
version=VERSION,
description="All automatically aligned sentence pairs with sentence splitting.",
),
datasets.BuilderConfig(
name="auto", version=VERSION, description="A large set of automatically aligned sentence pairs."
),
]
DEFAULT_CONFIG_NAME = "auto"
def _info(self):
if self.config.name == "manual": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{
"alignment_label": datasets.ClassLabel(names=["notAligned", "aligned", "partialAligned"]),
"normal_sentence_id": datasets.Value("string"),
"simple_sentence_id": datasets.Value("string"),
"normal_sentence": datasets.Value("string"),
"simple_sentence": datasets.Value("string"),
"gleu_score": datasets.Value("float32"),
}
)
elif (
self.config.name == "auto_acl"
or self.config.name == "auto_full_no_split"
or self.config.name == "auto_full_with_split"
):
features = datasets.Features(
{
"normal_sentence": datasets.Value("string"),
"simple_sentence": datasets.Value("string"),
}
)
else:
features = datasets.Features(
{
"example_id": datasets.Value("string"),
"normal": {
"normal_article_id": datasets.Value("int32"),
"normal_article_title": datasets.Value("string"),
"normal_article_url": datasets.Value("string"),
"normal_article_content": datasets.Sequence(
{
"normal_sentence_id": datasets.Value("string"),
"normal_sentence": datasets.Value("string"),
}
),
},
"simple": {
"simple_article_id": datasets.Value("int32"),
"simple_article_title": datasets.Value("string"),
"simple_article_url": datasets.Value("string"),
"simple_article_content": datasets.Sequence(
{
"simple_sentence_id": datasets.Value("string"),
"simple_sentence": datasets.Value("string"),
}
),
},
"paragraph_alignment": datasets.Sequence(
{
"normal_paragraph_id": datasets.Value("string"),
"simple_paragraph_id": datasets.Value("string"),
}
),
"sentence_alignment": datasets.Sequence(
{
"normal_sentence_id": datasets.Value("string"),
"simple_sentence_id": datasets.Value("string"),
}
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage="https://github.com/chaojiang06/wiki-auto",
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
my_urls = _URLs[self.config.name]
data_dir = dl_manager.download_and_extract(my_urls)
if self.config.name in ["manual", "auto"]:
return [
datasets.SplitGenerator(
name=spl,
gen_kwargs={
"filepaths": data_dir,
"split": spl,
},
)
for spl in data_dir
]
else:
return [
datasets.SplitGenerator(
name="full",
gen_kwargs={"filepaths": data_dir, "split": "full"},
)
]
def _generate_examples(self, filepaths, split):
if self.config.name == "manual":
keys = [
"alignment_label",
"simple_sentence_id",
"normal_sentence_id",
"simple_sentence",
"normal_sentence",
"gleu_score",
]
with open(filepaths[split], encoding="utf-8") as f:
for id_, line in enumerate(f):
values = line.strip().split("\t")
assert len(values) == 6, f"Not enough fields in ---- {line} --- {values}"
yield id_, dict(
[(k, val) if k != "gleu_score" else (k, float(val)) for k, val in zip(keys, values)]
)
elif (
self.config.name == "auto_acl"
or self.config.name == "auto_full_no_split"
or self.config.name == "auto_full_with_split"
):
with open(filepaths["normal"], encoding="utf-8") as fi:
with open(filepaths["simple"], encoding="utf-8") as fo:
for id_, (norm_se, simp_se) in enumerate(zip(fi, fo)):
yield id_, {
"normal_sentence": norm_se,
"simple_sentence": simp_se,
}
else:
dataset_dict = json.load(open(filepaths[split], encoding="utf-8"))
for id_, (eid, example_dict) in enumerate(dataset_dict.items()):
res = {
"example_id": eid,
"normal": {
"normal_article_id": example_dict["normal"]["id"],
"normal_article_title": example_dict["normal"]["title"],
"normal_article_url": example_dict["normal"]["url"],
"normal_article_content": {
"normal_sentence_id": [
sen_id for sen_id, sen_txt in example_dict["normal"]["content"].items()
],
"normal_sentence": [
sen_txt for sen_id, sen_txt in example_dict["normal"]["content"].items()
],
},
},
"simple": {
"simple_article_id": example_dict["simple"]["id"],
"simple_article_title": example_dict["simple"]["title"],
"simple_article_url": example_dict["simple"]["url"],
"simple_article_content": {
"simple_sentence_id": [
sen_id for sen_id, sen_txt in example_dict["simple"]["content"].items()
],
"simple_sentence": [
sen_txt for sen_id, sen_txt in example_dict["simple"]["content"].items()
],
},
},
"paragraph_alignment": {
"normal_paragraph_id": [
norm_id for simp_id, norm_id in example_dict.get("paragraph_alignment", [])
],
"simple_paragraph_id": [
simp_id for simp_id, norm_id in example_dict.get("paragraph_alignment", [])
],
},
"sentence_alignment": {
"normal_sentence_id": [
norm_id for simp_id, norm_id in example_dict.get("sentence_alignment", [])
],
"simple_sentence_id": [
simp_id for simp_id, norm_id in example_dict.get("sentence_alignment", [])
],
},
}
yield id_, res
|
chainer/_environment_check.py | zjzh/chainer | 3,705 | 12761669 | <reponame>zjzh/chainer
from __future__ import absolute_import
import os
import sys
import warnings
import numpy.distutils.system_info
import pkg_resources
import chainer
def _check_python_350():
if sys.version_info[:3] == (3, 5, 0):
if not int(os.getenv('CHAINER_PYTHON_350_FORCE', '0')):
msg = """
Chainer does not work with Python 3.5.0.
We strongly recommend to use another version of Python.
If you want to use Chainer with Python 3.5.0 at your own risk,
set 1 to CHAINER_PYTHON_350_FORCE environment variable."""
raise Exception(msg)
def _check_osx_numpy_backend():
if sys.platform != 'darwin':
return
blas_opt_info = numpy.distutils.system_info.get_info('blas_opt')
if blas_opt_info:
extra_link_args = blas_opt_info.get('extra_link_args')
if extra_link_args and '-Wl,Accelerate' in extra_link_args:
warnings.warn('''\
Accelerate has been detected as a NumPy backend library.
vecLib, which is a part of Accelerate, is known not to work correctly with Chainer.
We recommend using other BLAS libraries such as OpenBLAS.
For details of the issue, please see
https://docs.chainer.org/en/stable/tips.html#mnist-example-does-not-converge-in-cpu-mode-on-mac-os-x.
Please be aware that Mac OS X is not an officially supported OS.
''') # NOQA
def _check_optional_dependencies():
for dep in chainer._version._optional_dependencies:
name = dep['name']
pkgs = dep['packages']
spec = dep['specifier']
help = dep['help']
installed = False
for pkg in pkgs:
found = False
requirement = pkg
if os.environ.get('CHAINER_WARN_VERSION_MISMATCH', '1') == '1':
requirement = '{}{}'.format(pkg, spec)
try:
pkg_resources.require(requirement)
found = True
except pkg_resources.DistributionNotFound:
continue
except pkg_resources.VersionConflict:
msg = '''
--------------------------------------------------------------------------------
{name} ({pkg}) version {version} may not be compatible with this version of Chainer.
Please consider installing the supported version by running:
$ pip install '{requirement}'
See the following page for more details:
{help}
--------------------------------------------------------------------------------
''' # NOQA
warnings.warn(msg.format(
name=name, pkg=pkg,
version=pkg_resources.get_distribution(pkg).version,
requirement=requirement, help=help))
found = True
except Exception:
warnings.warn(
'Failed to check requirement: {}'.format(requirement))
break
if found:
if installed:
warnings.warn('''
--------------------------------------------------------------------------------
Multiple installations of {name} package has been detected.
You should select only one package from from {pkgs}.
Follow these steps to resolve this issue:
1. `pip list` to list {name} packages installed
2. `pip uninstall <package name>` to uninstall all {name} packages
3. `pip install <package name>` to install the proper one
--------------------------------------------------------------------------------
'''.format(name=name, pkgs=pkgs))
installed = True
def check():
_check_python_350()
_check_osx_numpy_backend()
_check_optional_dependencies()
|
contrib/share_driver_hooks/zaqar_notification_example_consumer.py | kpawar89/manila | 159 | 12761671 | #!/usr/bin/env python
#
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import signal
import sys
import time
import netaddr
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import timeutils
import six
opts = [
cfg.IntOpt(
"consume_interval",
default=5,
deprecated_name="sleep_between_consume_attempts",
help=("Time that script will sleep between requests for consuming "
"Zaqar messages in seconds."),
),
cfg.StrOpt(
"mount_dir",
default="/tmp",
help="Directory that will contain all mounted shares."
),
cfg.ListOpt(
"expected_ip_addresses",
default=[],
help=("List of IP addresses that are expected to be found in access "
"rules to trigger [un]mount operation for a share.")
),
]
CONF = cfg.CONF
def print_with_time(data):
time = six.text_type(timeutils.utcnow())
print(time + " " + six.text_type(data))
def print_pretty_dict(d):
pprint.pprint(d)
def pop_zaqar_messages(client, queues_names):
if not isinstance(queues_names, (list, set, tuple)):
queues_names = (queues_names, )
try:
user = client.conf['auth_opts']['options']['os_username']
project = client.conf['auth_opts']['options']['os_project_name']
messages = []
for queue_name in queues_names:
queue = client.queue(queue_name)
messages.extend([six.text_type(m.body) for m in queue.pop()])
print_with_time(
"Received %(len)s message[s] from '%(q)s' "
"queue using '%(u)s' user and '%(p)s' project." % {
'len': len(messages),
'q': queue_name,
'u': user,
'p': project,
}
)
return messages
except Exception as e:
print_with_time("Caught exception - %s" % e)
return []
def signal_handler(signal, frame):
print("")
print_with_time("Ctrl+C was pressed. Shutting down consumer.")
sys.exit(0)
def parse_str_to_dict(string):
if not isinstance(string, six.string_types):
return string
result = eval(string)
return result
def handle_message(data):
"""Handles consumed message.
Expected structure of a message is following:
{'data': {
'access_rules': [
{
'access_id': u'b28268b9-36c6-40d3-a485-22534077328f',
'access_instance_id':
u'd137b2cb-f549-4141-9dd7-36b2789fb973',
'access_level': u'rw',
'access_state': u'active',
'access_to': u'7.7.7.7',
'access_type': u'ip',
}
],
'availability_zone': u'nova',
'export_locations': [u'127.0.0.1:/path/to/nfs/share'],
'is_allow_operation': True,
'share_id': u'053eae9a-726f-4f7e-8502-49d7b1adf290',
'share_instance_id': u'dc33e554-e0b9-40f5-9046-c198716d73a0',
'share_proto': u'NFS'
}}
"""
if 'data' in data.keys():
data = data['data']
valid_access = (
'access_rules' in data and len(data['access_rules']) == 1 and
data['access_rules'][0].get('access_type', '?').lower() == 'ip' and
data.get('share_proto', '?').lower() == 'nfs'
)
if valid_access:
is_allow_operation = data['is_allow_operation']
export_location = data['export_locations'][0]
if is_allow_operation:
mount_share(export_location, data['access_to'])
else:
unmount_share(export_location, data['access_to'])
else:
print_with_time('Do nothing with above message.')
def execute(cmd):
try:
print_with_time('Executing following command: \n%s' % cmd)
cmd = cmd.split()
stdout, stderr = processutils.execute(*cmd)
if stderr:
print_with_time('Got error: %s' % stderr)
return stdout, stderr
except Exception as e:
print_with_time('Got following error: %s' % e)
return False, True
def is_share_mounted(mount_point):
mounts, stderr = execute('mount')
return mount_point in mounts
def rule_affects_me(ip_or_cidr):
if '/' in ip_or_cidr:
net = netaddr.IPNetwork(ip_or_cidr)
for my_ip in CONF.zaqar.expected_ip_addresses:
if netaddr.IPAddress(my_ip) in net:
return True
else:
for my_ip in CONF.zaqar.expected_ip_addresses:
if my_ip == ip_or_cidr:
return True
return False
def mount_share(export_location, access_to):
data = {
'mount_point': os.path.join(CONF.zaqar.mount_dir,
export_location.split('/')[-1]),
'export_location': export_location,
}
if (rule_affects_me(access_to) and
not is_share_mounted(data['mount_point'])):
print_with_time(
"Mounting '%(export_location)s' share to %(mount_point)s.")
execute('sudo mkdir -p %(mount_point)s' % data)
stdout, stderr = execute(
'sudo mount.nfs %(export_location)s %(mount_point)s' % data)
if stderr:
print_with_time("Mount operation failed.")
else:
print_with_time("Mount operation went OK.")
def unmount_share(export_location, access_to):
if rule_affects_me(access_to) and is_share_mounted(export_location):
print_with_time("Unmounting '%(export_location)s' share.")
stdout, stderr = execute('sudo umount %s' % export_location)
if stderr:
print_with_time("Unmount operation failed.")
else:
print_with_time("Unmount operation went OK.")
def main():
# Register other local modules
cur = os.path.dirname(__file__)
pathtest = os.path.join(cur)
sys.path.append(pathtest)
# Init configuration
CONF(sys.argv[1:], project="manila_notifier", version=1.0)
CONF.register_opts(opts, group="zaqar")
# Import common config and Zaqar client
import zaqarclientwrapper
# Handle SIGINT
signal.signal(signal.SIGINT, signal_handler)
# Run consumer
print_with_time("Consumer was successfully run.")
while(True):
messages = pop_zaqar_messages(
zaqarclientwrapper.ZAQARCLIENT, CONF.zaqar.zaqar_queues)
if not messages:
message = ("No new messages in '%s' queue[s] "
"found." % ','.join(CONF.zaqar.zaqar_queues))
else:
message = "Got following messages:"
print_with_time(message)
for message in messages:
message = parse_str_to_dict(message)
print_pretty_dict(message)
handle_message(message)
time.sleep(CONF.zaqar.consume_interval)
if __name__ == '__main__':
main()
|
tests/test_remote_debug.py | codelv/enaml-native | 237 | 12761706 | """
Copyright (c) 2017, <NAME>.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on Oct 4, 2017
@author: jrm
"""
import sh
import sys
def main():
# Make sure instance is cleared
from enaml.application import Application
Application._instance = None
from enamlnative.android.app import AndroidApplication
app = AndroidApplication(
debug=True,
dev='remote', # "10.0.2.2" # or 'server'
load_view=load_view
)
app.timed_call(5000, run_gestures, app)
app.start()
def run_gestures(app):
for i in range(30):
#: Swipe to next page
t = i*2000
app.timed_call(t,
sh.adb, *'shell input swipe 250 300 -800 300'.split(), _bg=True)
#: Tap a few places
for j in range(4):
app.timed_call(t+i*200,
sh.adb, *'shell input tap 500 150'.split(), _bg=True)
app.timed_call(120000, app.stop)
def load_view(app):
import enaml
#: For debug purposes only!
app.widget.resetBridgeStats()
app.widget.resetBridgeCache()
with enaml.imports():
import view
if app.view:
reload(view)
app.view = view.ContentView()
#: Time how long it takes
app.show_view()
def test_remote_debug():
#sh.pip('install tornado --user'.split())
enaml_native = sh.Command('enaml-native')
enaml_native('start', '--remote-debugging', _bg=True)
#: Add
sys.path.append('src/apps/')
sys.path.append('src/')
#: Init remote nativehooks implementation
from enamlnative.core import remotehooks
remotehooks.init()
main()
|
.dev_scripts/benchmark/gather_train_benchmark_metric.py | kevin3314/mmtracking | 2,226 | 12761723 | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import json
import os.path as osp
import mmcv
try:
import xlrd
except ImportError:
xlrd = None
try:
import xlutils
from xlutils.copy import copy
except ImportError:
xlutils = None
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--excel', type=str, help='input path of excel to be recorded')
parser.add_argument(
'--ncol', type=int, help='Number of column to be modified or appended')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.excel:
assert args.ncol, 'Please specify "--excel" and "--ncol" ' \
'at the same time'
if xlrd is None:
raise RuntimeError(
'xlrd is not installed,'
'Please use “pip install xlrd==1.2.0” to install')
if xlutils is None:
raise RuntimeError(
'xlutils is not installed,'
'Please use “pip install xlutils==2.0.0” to install')
readbook = xlrd.open_workbook(args.excel)
root_path = args.root
all_results_dict = {}
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
model_cfgs = [_ for _ in model_cfgs if 'configs' in _]
for i, config in enumerate(model_cfgs):
config = config.strip()
if len(config) == 0:
continue
config_name = osp.split(config)[-1]
config_name = osp.splitext(config_name)[0]
result_path = osp.join(root_path, config_name)
if osp.exists(result_path):
# 1 read config and excel
cfg = mmcv.Config.fromfile(config)
total_epochs = cfg.total_epochs
# the first metric will be used to find the best ckpt
has_final_ckpt = True
if 'vid' in config:
eval_metrics = ['bbox_mAP_50']
elif 'mot' in config:
eval_metrics = ['MOTA', 'IDF1']
# tracktor and deepsort don't have ckpt.
has_final_ckpt = False
elif 'sot' in config:
eval_metrics = ['success', 'norm_precision', 'precision']
else:
raise NotImplementedError(
f'Not supported config: {config}')
if args.excel:
xlrw = copy(readbook)
if 'vid' in config:
sheet = readbook.sheet_by_name('vid')
table = xlrw.get_sheet('vid')
elif 'mot' in config:
sheet = readbook.sheet_by_name('mot')
table = xlrw.get_sheet('mot')
elif 'sot' in config:
sheet = readbook.sheet_by_name('sot')
table = xlrw.get_sheet('sot')
sheet_info = {}
for i in range(6, sheet.nrows):
sheet_info[sheet.row_values(i)[0]] = i
# 2 determine whether total_epochs ckpt exists
ckpt_path = f'epoch_{total_epochs}.pth'
if osp.exists(osp.join(result_path, ckpt_path)) or \
not has_final_ckpt:
log_json_path = list(
sorted(glob.glob(osp.join(result_path,
'*.log.json'))))[-1]
# 3 read metric
result_dict = dict()
with open(log_json_path, 'r') as f:
for line in f.readlines():
log_line = json.loads(line)
if 'mode' not in log_line.keys():
continue
if log_line['mode'] == 'val' or \
log_line['mode'] == 'test':
result_dict[f"epoch_{log_line['epoch']}"] = {
key: log_line[key]
for key in eval_metrics if key in log_line
}
# 4 find the best ckpt
best_epoch_results = dict()
for epoch in result_dict:
if len(best_epoch_results) == 0:
best_epoch_results = result_dict[epoch]
else:
if best_epoch_results[eval_metrics[
0]] < result_dict[epoch][eval_metrics[0]]:
best_epoch_results = result_dict[epoch]
for metric in best_epoch_results:
if 'success' in best_epoch_results:
performance = round(best_epoch_results[metric],
1)
else:
performance = round(
best_epoch_results[metric] * 100, 1)
best_epoch_results[metric] = performance
all_results_dict[config] = best_epoch_results
# update and append excel content
if args.excel:
performance = ''
for metric in best_epoch_results:
performance += f'{best_epoch_results[metric]}/'
row_num = sheet_info.get(config, None)
if row_num:
table.write(row_num, args.ncol, performance)
else:
table.write(sheet.nrows, 0, config)
table.write(sheet.nrows, args.ncol, performance)
filename, sufflx = osp.splitext(args.excel)
xlrw.save(f'{filename}_o{sufflx}')
readbook = xlrd.open_workbook(f'{filename}_o{sufflx}')
else:
print(f'{config} not exist: {ckpt_path}')
else:
print(f'not exist: {config}')
# 4 save or print results
print('===================================')
for config_name, metrics in all_results_dict.items():
print(config_name, metrics)
print('===================================')
if args.excel:
print(f'>>> Output {filename}_o{sufflx}')
|
gravity/migrations/0003_tiltbridge_mdns_id.py | fossabot/fermentrack | 114 | 12761732 | <reponame>fossabot/fermentrack
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2019-03-18 23:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('gravity', '0002_tilt_refactor'),
]
operations = [
# Converting from AlterField to RemoveField/AddField because of issues with Django 2.0+ migration:
# https://docs.djangoproject.com/en/3.0/releases/2.0/#foreign-key-constraints-are-now-enabled-on-sqlite
migrations.RemoveField(
model_name='tiltbridge',
name='api_key',
),
migrations.AddField(
model_name='tiltbridge',
name='mdns_id',
field=models.CharField(help_text="mDNS ID used by the TiltBridge to identify itself both on your network and to Fermentrack. NOTE - Prefix only - do not include '.local'", max_length=64, primary_key=True, serialize=False, validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z0-9]+$')]),
),
migrations.AlterField(
model_name='tiltbridge',
name='mdns_id',
field=models.CharField(default='tiltbridge', help_text="mDNS ID used by the TiltBridge to identify itself both on your network and to Fermentrack. NOTE - Prefix only - do not include '.local'", max_length=64, primary_key=True, serialize=False),
preserve_default=False,
),
]
|
saleor/menu/migrations/0009_remove_menu_json_content.py | elwoodxblues/saleor | 15,337 | 12761737 | # Generated by Django 2.0.8 on 2018-09-13 13:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("menu", "0008_menu_json_content_new")]
operations = [migrations.RemoveField(model_name="menu", name="json_content")]
|
src/tools/_predict.py | TensorFX/tensorfx | 204 | 12761738 | # Copyright 2016 TensorLab. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# _predict.py
# Implements PredictCommand.
import json
import os
import sys
import tensorflow as tf
import tensorfx as tfx
class PredictCommand(object):
"""Implements the tfx predict command to use a model to produce predictions.
"""
name = 'predict'
help = 'Produces predictions using a model.'
extra = False
@staticmethod
def build_parser(parser):
parser.add_argument('--model', metavar='path', type=str, required=True,
help='The path to a previously trained model.')
parser.add_argument('--input', metavar='path', type=str,
help='The path to a file with input instances. Uses stdin by default.')
parser.add_argument('--output', metavar='path', type=str,
help='The path to a file to write outputs to. Uses stdout by default.')
parser.add_argument('--batch-size', metavar='instances', type=int, default=10,
help='The number of instances to predict per batch.')
@staticmethod
def run(args):
# TODO: Figure out where to do JSON and TF initialization in more common way.
json.encoder.FLOAT_REPR = lambda f: ('%.5f' % f)
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf.logging.ERROR)
model = tfx.prediction.Model.load(args.model)
with TextSource(args.input, args.batch_size) as source, TextSink(args.output) as sink:
for instances in source:
predictions = model.predict(instances)
lines = map(lambda p: json.dumps(p, sort_keys=True), predictions)
sink.write(lines)
class TextSource(object):
def __init__(self, file=None, batch_size=1):
self._file = file
self._batch_size = batch_size
def __enter__(self):
self._stream = open(self._file, 'r') if self._file else sys.stdin
return self
def __exit__(self, type, value, traceback):
if self._stream and self._file:
self._stream.close()
def __iter__(self):
instances = []
while True:
instance = self._stream.readline().strip()
if not instance:
# EOF
break
instances.append(instance)
if len(instances) == self._batch_size:
# A desired batch of instances is available
yield instances
instances = []
if instances:
yield instances
class TextSink(object):
def __init__(self, file=None):
self._file = file
def __enter__(self):
self._stream = open(self._file, 'w') if self._file else sys.stdout
return self
def __exit__(self, type, value, traceback):
if self._stream and self._file:
self._stream.close()
def write(self, lines):
for l in lines:
self._stream.write(l + '\n')
|
sources_non_forked/vim-visual-multi/test/tests/oO/commands.py | doitsu2014/vimrc | 2,083 | 12761748 | <filename>sources_non_forked/vim-visual-multi/test/tests/oO/commands.py<gh_stars>1000+
# insert CR, insert line above
keys(':setf vim\<CR>jw')
keys('4\<C-Down>')
keys('Ea')
keys('\<CR>')
keys('CARRYING OVER ')
keys('\<Esc>A')
keys('\<CR>')
keys('CR at EOL')
keys('\<Esc>k')
keys('O')
keys('above CR')
keys('\<Esc>\<Esc>')
|
analysis/paper_plot.py | MGheini/unify-parameter-efficient-tuning | 101 | 12761761 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
import os
from collections import defaultdict
labelsize = 16
legendsize = 14
mpl.rcParams['xtick.labelsize'] = labelsize
mpl.rcParams['ytick.labelsize'] = labelsize
mpl.rcParams['axes.labelsize'] = labelsize
mpl.rcParams['axes.titlesize'] = labelsize
mpl.rcParams['font.size'] = labelsize
plt.style.use('seaborn-deep')
# plt.rcParams.update({
# "text.usetex": True,
# "font.family": "sans-serif",
# "font.sans-serif": ["Helvetica"]})
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['text.usetex'] = True
colormap = plt.cm.gist_ncar
def plot_ax(ax, params, ys, legends, ylabel, full, title=None, add_legend=True):
labelsize = 20
legendsize = 20
mpl.rcParams['xtick.labelsize'] = labelsize
mpl.rcParams['ytick.labelsize'] = labelsize
mpl.rcParams['axes.labelsize'] = labelsize
mpl.rcParams['axes.titlesize'] = labelsize
mpl.rcParams['font.size'] = labelsize
color_base = ["blue", "red", "green", "tab:orange", "purple", "tab:cyan"]
markers = ["o", "v", "s", "*", "8"]
sorted_xs = list(set([x for xs in params for x in xs]))
sorted_xs = sorted(sorted_xs)
xticks = [format(xx) for xx in sorted_xs]
for ii, (x, y) in enumerate(zip(params[::-1], ys[::-1])):
ax.plot(x, y, c=color_base[ii], marker=markers[ii], ms=10, linewidth=3)
ax.set_xlim(ax.get_xlim()[0], 15)
p1 = ax.get_xlim()
p1 = [p1[0]-0.1, p1[1]+1.0]
p2 = [full, full]
ax.plot(p1, p2, "--", ms=6, c="black", linewidth=2)
# ax.set_xscale('log', basex=10)
legends = legends[::-1] + ["Full Fine-tuning", "Ours"]
if add_legend:
ax.legend(legends, loc="best", fontsize=legendsize)
# ax.set_xticks(sorted_xs, xticks)
if title is not None:
ax.set(xlabel=r"Fine-tuned Parameters (\%)", ylabel=ylabel)
else:
ax.set(title=title, xlabel=r"Fine-tuned Parameters (\%)", ylabel=ylabel)
ax.grid()
ax.set_facecolor("white")
def plot_intro():
color_base = ["blue", "purple", "green", "tab:orange", "red", "tab:cyan"]
# color_base = ["blue", "blue", "blue", "blue", "red", "tab:cyan"]
color_base = ["dodgerblue", "mediumvioletred", "olivedrab", "goldenrod", "firebrick", "tab:cyan"]
color_base = ["dodgerblue", "hotpink", "olivedrab", "goldenrod", "crimson", "tab:cyan"]
color_base = ["gray", "dodgerblue", "olivedrab", "hotpink", "crimson", "tab:cyan"]
markers = ["o", "v", "s", "*", "D"]
markers = ["o", "o", "o", "o", "D"]
fig, ax = plt.subplots(1, 1)
full = 21.94
legends = ["Full Fine-tuning", "BitFit", "PrefixTuning", "Adapter", "LoRA", "Ours"]
params = [0.08, 3.6, 12.3, 14.4, 6.7]
xsum = [17.32, 20.46, 20.98, 20.5, 21.9]
for ii, (param, r2) in enumerate(zip(params, xsum)):
ax.scatter(param, r2, c=color_base[ii], marker=markers[ii], edgecolor='black', linewidth=1, s=300)
ax.set_xlim(ax.get_xlim()[0], 15)
p1 = ax.get_xlim()
p1 = [p1[0]-0.1, p1[1]+1.0]
p2 = [full, full]
ax.plot(p1, p2, "--", ms=6, c="black", linewidth=2)
# ax.legend(legends, loc='best', fontsize=12)
ax.grid()
ax.set_facecolor("white")
ax.set(xlabel=r"Fine-tuned Parameters (\%)", ylabel="ROUGE-2")
fig.set_size_inches(5, 5)
fig.savefig("intro.pdf", bbox_inches='tight')
def compute_params(r):
base = 200 * 2 * 3 * 1024 * 12
base_params = 3.6
print(r * 1.0 / base * base_params)
return r * 1.0 / base * base_params
def format(n):
return r"{:.1f}%".format(n)
def plot_overview():
d, L = 1024, 12
# fig, axes = plt.subplots(2, 1)
# percentage of parameters
params_bitfit = [0.08]
# params_prompt = [compute_params(d * 1), compute_params(d * 30), compute_params(d * 200), compute_params(d * 300)]
params_prompt = [compute_params(d * 300)]
params_pt = [compute_params(1 * 2 * 3 * d * L), compute_params(30 * 2 * 3 * d * L),
compute_params(200 * 2 * 3 * d * L), compute_params(512 * 2 * 3 * d * L)]
params_hously_adapter_ffn_ho = [compute_params(30 * 2 * 2 * d * L),
compute_params(200 * 2 * 2 * d * L),
compute_params(512 * 2 * 2 * d * L), compute_params(1024 * 2 * 2 * d * L)]
params_lora_attn = [compute_params(1*4*3*d*L), compute_params(30*4*3*d*L), compute_params(200*4*3*d*L),
compute_params(400*4*3*d*L)]
params_lora_ffn = [compute_params(1*10*2*d*L), compute_params(102*10*2*d*L), compute_params(120*10*2*d*L)]
params_hously_adapter_attn_ho = [compute_params(1 * 2 * 3 * d * L), compute_params(30 * 2 * 3 * d * L),
compute_params(200 * 2 * 3 * d * L),
compute_params(512 * 2 * 3 * d * L), compute_params(1024 * 2 * 3 * d * L)]
# print("prompt: 300")
# print(params_prompt)
# print("pt: 1, 30, 200, 512")
# print(params_pt)
# print("ho/hi ffn: 1, 30, 200, 512, 1024")
# print(params_hously_adapter_ffn_ho)
# print("ho/hi attn: 1, 30, 200, 512, 1024")
# print(params_hously_adapter_attn_ho)
# print("lora attn: 1, 30, 200, 400")
# print(params_lora_attn)
# print("lora ffn: 1, 102, 120")
# print(params_lora_ffn)
# xsum
xsum_bitfit = [17.32]
# xsum_prompt = [5.33, 14, 15.49, 15.98] # 1, 30?, 200, 300
# xsum_prompt = [15.98] # 300
xsum_pt = [18.14, 20.01, 20.46, 20.40] # 1, 30, 200, 512
xsum_hously_adapter_ffn_ho = [17, 18.81, 20.4, 20.58, 20.98] # 1, 30, 200?, 512?, 1024?
xsum_hously_adapter_ffn_ho = [18.81, 20.4, 20.58, 20.98] # 1, 30, 200?, 512?, 1024?
xsum_lora_attn = [17.4, 19.59, 20.29, 20.5] # 1, 30, 200, 400
# mt
mt_bitfit = [26.4]
# mt_prompt = [6.0, 16.7, 21] # 1, 30, 200
# mt_prompt = [21] # 200
mt_pt = [30.2, 35.2, 35.6, 35.1] # 1, 30, 200, 512
mt_hously_adapter_ffn_ho = [24.3, 33.0, 35.6, 36.3, 36.7] # 1, 30, 200, 512, 1024
mt_hously_adapter_ffn_ho = [33.0, 35.6, 36.3, 36.7] # 1, 30, 200, 512, 1024
mt_lora_attn = [25.5, 34.2, 36.2, 36.6] # 1, 30, 200, 400
# legends = ["BitFit (bias)", "PromptTuning (input)", "PrefixTuning (attn)", "Adapter (ffn)", "LoRA (attn)"]
# plot_ax(axes[0], [params_bitfit, params_prompt, params_pt, params_hously_adapter_ffn_ho, params_lora_attn],
# [xsum_bitfit, xsum_prompt, xsum_pt, xsum_hously_adapter_ffn_ho, xsum_lora_attn], legends, "ROUGE-2", full=21.94, ours=21.90,
# title="(a) abstractive text summarization", add_legend=False)
# plot_ax(axes[1], [params_bitfit, params_prompt, params_pt, params_hously_adapter_ffn_ho, params_lora_attn],
# [mt_bitfit, mt_prompt, mt_pt, mt_hously_adapter_ffn_ho, mt_lora_attn], legends, "BLEU", full=37.3, ours=37.5,
# title="(b) machine translation")
fig, ax = plt.subplots(1, 1)
legends = ["BitFit", "PrefixTuning", "Adapter", "LoRA"]
plot_ax(ax, [params_bitfit, params_pt, params_hously_adapter_ffn_ho, params_lora_attn],
[xsum_bitfit, xsum_pt, xsum_hously_adapter_ffn_ho, xsum_lora_attn], legends, "XSum ROUGE-2", full=21.94,
title=None, add_legend=False)
fig.set_size_inches(5, 5)
fig.savefig("xsum_overview.pdf", bbox_inches='tight')
fig, ax = plt.subplots(1, 1)
plot_ax(ax, [params_bitfit, params_pt, params_hously_adapter_ffn_ho, params_lora_attn],
[mt_bitfit, mt_pt, mt_hously_adapter_ffn_ho, mt_lora_attn], legends, "MT BLEU", full=37.3,
title=None)
fig.set_size_inches(5,5)
fig.savefig("mt_overview.pdf", bbox_inches='tight')
def plot_table4():
color_base = ["blue", "red", "green", "tab:orange", "tab:cyan", "purple", ]
markers = ["o", "v", "s", "*", "D"]
fig, ax = plt.subplots(1, 1)
ylabel = "XSum ROUGE-2"
params_pt = [3.6, 9.2]
params_lora = [7.2]
params_adapter = [3.6, 9.2]
r2_pt = [20.46, 20.40]
r2_lora = [20.29]
r2_adapter = [20.31, 20.83]
ffn_params_lora = [6.1]
ffn_r2_lora = [21.31]
ffn_params_adapter = [2.4, 6.1, 12.3]
ffn_r2_adapter = [20.66, 20.98, 21.24]
ax.plot(params_pt, r2_pt, c=color_base[0], marker=markers[0], ms=10, linewidth=2)
ax.plot(params_adapter, r2_adapter, c=color_base[0], marker=markers[1], ms=10, linewidth=2)
ax.plot(params_lora, r2_lora, c=color_base[0], marker=markers[2], ms=10, linewidth=2)
ax.plot(ffn_params_adapter, ffn_r2_adapter, "--", c=color_base[1], marker=markers[1], ms=10, linewidth=2)
ax.plot(ffn_params_lora, ffn_r2_lora, "--", c=color_base[1], marker=markers[2], ms=10, linewidth=2)
# legends = ["attn-PT", "attn-PA", "attn-LoRA", "ffn-PA",
# "ffn-LoRA"]
# ax.legend(legends, loc="lower right", fontsize=12)
ax.set(xlabel=r"Fine-tuned Parameters (\%)", ylabel=ylabel)
ax.grid()
ax.set_facecolor("white")
fig.set_size_inches(5, 3)
fig.savefig("xsum_modification_position.pdf", bbox_inches='tight')
fig, ax = plt.subplots(1, 1)
ylabel = "MT BLEU"
params_pt = [3.6, 9.2]
params_lora = [7.2]
params_adapter = [3.6, 9.2]
bleu_pt = [35.6, 35.1]
bleu_lora = [36.2]
bleu_adapter = [35.6, 36.2]
ffn_params_lora = [6.1]
ffn_params_adapter = [2.4, 6.1, 12.3]
ffn_bleu_lora = [36.5]
ffn_bleu_adapter = [36.4, 37.1, 37.3]
ax.plot(params_pt, bleu_pt, c=color_base[0], marker=markers[0], ms=10, linewidth=2)
ax.plot(params_adapter, bleu_adapter, c=color_base[0], marker=markers[1], ms=10, linewidth=2)
ax.plot(params_lora, bleu_lora, c=color_base[0], marker=markers[2], ms=10, linewidth=2)
ax.plot(ffn_params_adapter, ffn_bleu_adapter, "--", c=color_base[1], marker=markers[1], ms=10, linewidth=2)
ax.plot(ffn_params_lora, ffn_bleu_lora, "--", c=color_base[1], marker=markers[2], ms=10, linewidth=2)
# legends = ["attn-Prefix Tuning", "attn-Parallel Adapter", "attn-LoRA", "ffn-Parallel Adaptaer", "ffn-LoRA"]
# ax.legend(legends, loc="lower right", fontsize=12, bbox_to_anchor=(1.27, 0.005))
legends = ["Prefix (attn)", "PA (attn)", "LoRA (attn)", "PA (ffn)", "LoRA (ffn)"]
ax.legend(legends, loc="lower right", fontsize=12, bbox_to_anchor=(1.11, 0.00))
ax.set(xlabel=r"Fine-tuned Parameters (\%)", ylabel=ylabel)
ax.grid()
ax.set_facecolor("white")
fig.set_size_inches(5, 3)
fig.savefig("mt_modification_position.pdf", bbox_inches='tight')
# plot_overview()
plot_intro()
# plot_table4() |
fuzz_lightyear/settings.py | bbhunter/fuzz-lightyear | 169 | 12761816 | import random
from functools import lru_cache
from hypothesis import core
class Settings:
def __init__(self) -> None:
self.seed = random.getrandbits(128) # type: int
self.unicode_enabled = True # type: bool
self.enable_color = True # type: bool
@property
def seed(self) -> int:
return self._seed
@seed.setter
def seed(self, value: int) -> None:
self._seed = value
core.global_force_seed = value # type: ignore
random.seed(value)
@lru_cache(maxsize=1)
def get_settings() -> Settings:
return Settings()
|
keras-bert-poetry-generator/model.py | ganfanhang/DeepLearningExamples | 274 | 12761823 | <filename>keras-bert-poetry-generator/model.py
# -*- coding: utf-8 -*-
# @File : model.py
# @Author : AaronJny
# @Time : 2019/12/25
# @Desc :
from bert4keras.models import build_transformer_model
import tensorflow as tf
from dataset import keep_words
import settings
model = build_transformer_model(settings.CONFIG_PATH, settings.CHECKPOINT_PATH, application='lm', keep_tokens=keep_words)
model.summary()
# loss fun,交叉熵
# 输入的数据,从第二个字符开始,可以作为正确的目标结果(输入是没有经过one-hot编码的)
y_true = model.input[0][:, 1:]
# 目标mask
y_mask = model.get_layer('Embedding-Token').output_mask[:, 1:]
y_mask = tf.cast(y_mask, tf.float32)
# 预测结果,到倒数第二个(包括)时结束
y_pred = model.output[:, :-1]
cross_entropy = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
cross_entropy = tf.reduce_sum(cross_entropy * y_mask) / tf.reduce_sum(y_mask)
model.add_loss(cross_entropy)
model.compile(tf.keras.optimizers.Adam(1e-5))
|
news_collector/collector/apps.py | orehush/channels-examples | 1,311 | 12761825 | from django.apps import AppConfig
class CollectorConfig(AppConfig):
name = 'collector'
|
office365/sharepoint/social/socialRestActor.py | wreiner/Office365-REST-Python-Client | 544 | 12761827 | <filename>office365/sharepoint/social/socialRestActor.py<gh_stars>100-1000
from office365.runtime.client_object import ClientObject
class SocialRestActor(ClientObject):
pass
|
glance/tests/functional/db/migrations/test_pike_expand01.py | Steap/glance | 309 | 12761898 | <reponame>Steap/glance<gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import utils as db_utils
from glance.tests.functional.db import test_migrations
import glance.tests.utils as test_utils
class TestPikeExpand01Mixin(test_migrations.AlembicMigrationsMixin):
artifacts_table_names = [
'artifact_blob_locations',
'artifact_properties',
'artifact_blobs',
'artifact_dependencies',
'artifact_tags',
'artifacts'
]
def _get_revisions(self, config):
return test_migrations.AlembicMigrationsMixin._get_revisions(
self, config, head='pike_expand01')
def _pre_upgrade_pike_expand01(self, engine):
# verify presence of the artifacts tables
for table_name in self.artifacts_table_names:
table = db_utils.get_table(engine, table_name)
self.assertIsNotNone(table)
def _check_pike_expand01(self, engine, data):
# should be no changes, so re-run pre-upgrade check
self._pre_upgrade_pike_expand01(engine)
class TestPikeExpand01MySQL(
TestPikeExpand01Mixin,
test_fixtures.OpportunisticDBTestMixin,
test_utils.BaseTestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
|
venv/lib/python3.8/site-packages/statsmodels/multivariate/tests/test_ml_factor.py | johncollinsai/post-high-frequency-data | 6,931 | 12761911 | <reponame>johncollinsai/post-high-frequency-data
import numpy as np
from statsmodels.multivariate.factor import Factor
from numpy.testing import assert_allclose, assert_equal
from scipy.optimize import approx_fprime
import warnings
# A small model for basic testing
def _toy():
uniq = np.r_[4, 9, 16]
load = np.asarray([[3, 1, 2], [2, 5, 8]]).T
par = np.r_[2, 3, 4, 3, 1, 2, 2, 5, 8]
corr = np.asarray([[1, .5, .25], [.5, 1, .5], [.25, .5, 1]])
return uniq, load, corr, par
def test_loglike():
uniq, load, corr, par = _toy()
fa = Factor(n_factor=2, corr=corr)
# Two ways of passing the parameters to loglike
ll1 = fa.loglike((load, uniq))
ll2 = fa.loglike(par)
assert_allclose(ll1, ll2)
def test_score():
uniq, load, corr, par = _toy()
fa = Factor(n_factor=2, corr=corr)
def f(par):
return fa.loglike(par)
par2 = np.r_[0.1, 0.2, 0.3, 0.4, 0.3, 0.1, 0.2, -0.2, 0, 0.8, 0.5, 0]
for pt in (par, par2):
g1 = approx_fprime(pt, f, 1e-8)
g2 = fa.score(pt)
assert_allclose(g1, g2, atol=1e-3)
def test_exact():
# Test if we can recover exact factor-structured matrices with
# default starting values.
np.random.seed(23324)
# Works for larger k_var but slow for routine testing.
for k_var in 5, 10, 25:
for n_factor in 1, 2, 3:
load = np.random.normal(size=(k_var, n_factor))
uniq = np.linspace(1, 2, k_var)
c = np.dot(load, load.T)
c.flat[::c.shape[0]+1] += uniq
s = np.sqrt(np.diag(c))
c /= np.outer(s, s)
fa = Factor(corr=c, n_factor=n_factor, method='ml')
rslt = fa.fit()
assert_allclose(rslt.fitted_cov, c, rtol=1e-4, atol=1e-4)
rslt.summary() # smoke test
def test_exact_em():
# Test if we can recover exact factor-structured matrices with
# default starting values using the EM algorithm.
np.random.seed(23324)
# Works for larger k_var but slow for routine testing.
for k_var in 5, 10, 25:
for n_factor in 1, 2, 3:
load = np.random.normal(size=(k_var, n_factor))
uniq = np.linspace(1, 2, k_var)
c = np.dot(load, load.T)
c.flat[::c.shape[0]+1] += uniq
s = np.sqrt(np.diag(c))
c /= np.outer(s, s)
fa = Factor(corr=c, n_factor=n_factor, method='ml')
load_e, uniq_e = fa._fit_ml_em(2000)
c_e = np.dot(load_e, load_e.T)
c_e.flat[::c_e.shape[0]+1] += uniq_e
assert_allclose(c_e, c, rtol=1e-4, atol=1e-4)
def test_fit_ml_em_random_state():
# Ensure Factor._fit_ml_em doesn't change numpy's singleton random state
# see #7357
T = 10
epsilon = np.random.multivariate_normal(np.zeros(3), np.eye(3), size=T).T
initial = np.random.get_state()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message='Fitting did not converge')
Factor(endog=epsilon, n_factor=2, method='ml').fit()
final = np.random.get_state()
assert(initial[0] == final[0])
assert_equal(initial[1], final[1])
assert(initial[2:] == final[2:])
def test_em():
n_factor = 1
cor = np.asarray([[1, 0.5, 0.3], [0.5, 1, 0], [0.3, 0, 1]])
fa = Factor(corr=cor, n_factor=n_factor, method='ml')
rslt = fa.fit(opt={'gtol': 1e-3})
load_opt = rslt.loadings
uniq_opt = rslt.uniqueness
load_em, uniq_em = fa._fit_ml_em(1000)
cc = np.dot(load_em, load_em.T)
cc.flat[::cc.shape[0]+1] += uniq_em
assert_allclose(cc, rslt.fitted_cov, rtol=1e-2, atol=1e-2)
def test_1factor():
"""
# R code:
r = 0.4
p = 4
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
fa = factanal(covmat=cm, factors=1)
print(fa, digits=10)
"""
r = 0.4
p = 4
ii = np.arange(p)
cm = r ** np.abs(np.subtract.outer(ii, ii))
fa = Factor(corr=cm, n_factor=1, method='ml')
rslt = fa.fit()
if rslt.loadings[0, 0] < 0:
rslt.loadings[:, 0] *= -1
# R solution, but our likelihood is higher
# uniq = np.r_[0.8392472054, 0.5820958187, 0.5820958187, 0.8392472054]
# load = np.asarray([[0.4009399224, 0.6464550935, 0.6464550935,
# 0.4009399224]]).T
# l1 = fa.loglike(fa._pack(load, uniq))
# l2 = fa.loglike(fa._pack(rslt.loadings, rslt.uniqueness))
# So use a smoke test
uniq = np.r_[0.85290232, 0.60916033, 0.55382266, 0.82610666]
load = np.asarray([[0.38353316], [0.62517171], [0.66796508],
[0.4170052]])
assert_allclose(load, rslt.loadings, rtol=1e-3, atol=1e-3)
assert_allclose(uniq, rslt.uniqueness, rtol=1e-3, atol=1e-3)
assert_equal(rslt.df, 2)
def test_2factor():
"""
# R code:
r = 0.4
p = 6
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
factanal(covmat=cm, factors=2)
"""
r = 0.4
p = 6
ii = np.arange(p)
cm = r ** np.abs(np.subtract.outer(ii, ii))
fa = Factor(corr=cm, n_factor=2, nobs=100, method='ml')
rslt = fa.fit()
for j in 0, 1:
if rslt.loadings[0, j] < 0:
rslt.loadings[:, j] *= -1
uniq = np.r_[0.782, 0.367, 0.696, 0.696, 0.367, 0.782]
assert_allclose(uniq, rslt.uniqueness, rtol=1e-3, atol=1e-3)
loads = [np.r_[0.323, 0.586, 0.519, 0.519, 0.586, 0.323],
np.r_[0.337, 0.538, 0.187, -0.187, -0.538, -0.337]]
for k in 0, 1:
if np.dot(loads[k], rslt.loadings[:, k]) < 0:
loads[k] *= -1
assert_allclose(loads[k], rslt.loadings[:, k], rtol=1e-3, atol=1e-3)
assert_equal(rslt.df, 4)
# Smoke test for standard errors
e = np.asarray([0.11056836, 0.05191071, 0.09836349,
0.09836349, 0.05191071, 0.11056836])
assert_allclose(rslt.uniq_stderr, e, atol=1e-4)
e = np.asarray([[0.08842151, 0.08842151], [0.06058582, 0.06058582],
[0.08339874, 0.08339874], [0.08339874, 0.08339874],
[0.06058582, 0.06058582], [0.08842151, 0.08842151]])
assert_allclose(rslt.load_stderr, e, atol=1e-4)
|
deeppy/dataset/stl10.py | purushothamgowthu/deeppy | 1,170 | 12761947 | import os
import numpy as np
import logging
from ..base import float_, int_
from .util import dataset_home, download, checksum, archive_extract, checkpoint
log = logging.getLogger(__name__)
_URL = 'http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz'
_SHA1 = 'b22ebbd7f3c4384ebc9ba3152939186d3750b902'
class STL10(object):
'''
The STL-10 dataset [1]
http://cs.stanford.edu/~acoates/stl10
References:
[1]: An Analysis of Single Layer Networks in Unsupervised Feature Learning,
<NAME>, <NAME>, <NAME>, AISTATS, 2011.
'''
def __init__(self):
self.name = 'stl10'
self.n_classes = 10
self.n_train = 5000
self.n_test = 8000
self.n_unlabeled = 100000
self.img_shape = (3, 96, 96)
self.data_dir = os.path.join(dataset_home, self.name)
self._npz_path = os.path.join(self.data_dir, 'stl10.npz')
self._install()
self._arrays, self.folds = self._load()
def arrays(self, dp_dtypes=False):
x_train, y_train, x_test, y_test, x_unlabeled = self._arrays
if dp_dtypes:
x_train = x_train.astype(float_)
y_train = y_train.astype(int_)
x_test = x_test.astype(float_)
y_test = y_test.astype(int_)
x_unlabeled = x_unlabeled.astype(float_)
return x_train, y_train, x_test, y_test, x_unlabeled
def _install(self):
checkpoint_file = os.path.join(self.data_dir, '__install_check')
with checkpoint(checkpoint_file) as exists:
if exists:
return
log.info('Downloading %s', _URL)
filepath = download(_URL, self.data_dir)
if _SHA1 != checksum(filepath, method='sha1'):
raise RuntimeError('Checksum mismatch for %s.' % _URL)
log.info('Unpacking %s', filepath)
archive_extract(filepath, self.data_dir)
unpack_dir = os.path.join(self.data_dir, 'stl10_binary')
log.info('Converting data to Numpy arrays')
filenames = ['train_X.bin', 'train_y.bin', 'test_X.bin',
'test_y.bin', 'unlabeled_X.bin']
def bin2numpy(filepath):
with open(filepath, 'rb') as f:
arr = np.fromfile(f, dtype=np.uint8)
if '_X' in filepath:
arr = np.reshape(arr, (-1,) + self.img_shape)
return arr
filepaths = [os.path.join(unpack_dir, f) for f in filenames]
x_train, y_train, x_test, y_test, x_unlabeled = map(bin2numpy,
filepaths)
folds = []
with open(os.path.join(unpack_dir, 'fold_indices.txt'), 'r') as f:
for line in f:
folds.append([int(s) for s in line.strip().split(' ')])
folds = np.array(folds)
with open(self._npz_path, 'wb') as f:
np.savez(f, x_train=x_train, y_train=y_train, x_test=x_test,
y_test=y_test, x_unlabeled=x_unlabeled, folds=folds)
def _load(self):
with open(self._npz_path, 'rb') as f:
dic = np.load(f)
return ((dic['x_train'], dic['y_train'], dic['x_test'],
dic['y_test'], dic['x_unlabeled']), dic['folds'])
|
eICU_tstr_evaluation.py | cliohong/RGAN | 585 | 12761955 | <reponame>cliohong/RGAN
import data_utils
import pandas as pd
import numpy as np
import tensorflow as tf
import math, random, itertools
import pickle
import time
import json
import os
import math
import data_utils
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_curve, auc, precision_recall_curve
import copy
from scipy.stats import sem
print ("Starting TSTR experiment.")
print ("loading data...")
samples, labels = data_utils.eICU_task()
train_seqs = samples['train'].reshape(-1,16,4)
vali_seqs = samples['vali'].reshape(-1,16,4)
test_seqs = samples['test'].reshape(-1,16,4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
train_seqs, vali_seqs, test_seqs = data_utils.scale_data(train_seqs, vali_seqs, test_seqs)
print ("data loaded.")
# iterate over all dataset versions generated after running the GAN for 5 times
aurocs_all_runs = []
auprcs_all_runs = []
for oo in range(5):
print (oo)
# find the best "dataset epoch", meaning the GAN epoch that generated the dataset
# validation is only done in some of the tasks, and the others are considered unknown
# (use validation set to pick best GAN epoch, then get result on test set)
vali_seqs_r = vali_seqs.reshape((vali_seqs.shape[0], -1))
test_seqs_r = test_seqs.reshape((test_seqs.shape[0], -1))
all_aurocs_exp = []
all_auprcs_exp = []
for nn in np.arange(50,1050,50):
with open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(nn) + '.pk', 'rb') as f:
synth_data = pickle.load(file=f)
with open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(nn) + '.pk', 'rb') as f:
synth_labels = pickle.load(file=f)
train_seqs = synth_data
train_targets = synth_labels
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
all_aurocs = []
all_auprcs = []
# in case we want to train each random forest multiple times with each dataset
for exp_num in range(1):
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(vali_seqs_r, vali_targets[:,col_num]))
preds = estimator.predict(vali_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=vali_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=vali_targets[:,col_num]))
preds = estimator.predict_proba(vali_seqs_r)
fpr, tpr, thresholds = roc_curve(vali_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(vali_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
all_aurocs.append(aurocs)
all_auprcs.append(auprcs)
all_aurocs_exp.append(all_aurocs)
all_auprcs_exp.append(all_auprcs)
#with open('all_aurocs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_aurocs_exp)
#with open('all_auprcs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_auprcs_exp)
best_idx = np.argmax(np.array(all_aurocs_exp).sum(axis=1)[:,[0,2,4]].sum(axis=1) + np.array(all_auprcs_exp).sum(axis=1)[:,[0,2,4]].sum(axis=1))
best = np.arange(50,1050,50)[best_idx]
with open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(best) + '.pk', 'rb') as f:
synth_data = pickle.load(file=f)
with open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(best) + '.pk', 'rb') as f:
synth_labels = pickle.load(file=f)
train_seqs = synth_data
train_targets = synth_labels
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(test_seqs_r, test_targets[:,col_num]))
preds = estimator.predict(test_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=test_targets[:,col_num]))
preds = estimator.predict_proba(test_seqs_r)
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
print ("----------------------------")
aurocs_all_runs.append(aurocs)
auprcs_all_runs.append(auprcs)
allr = np.vstack(aurocs_all_runs)
allp = np.vstack(auprcs_all_runs)
tstr_aurocs_mean = allr.mean(axis=0)
tstr_aurocs_sem = sem(allr, axis=0)
tstr_auprcs_mean = allp.mean(axis=0)
tstr_auprcs_sem = sem(allp, axis=0)
# get AUROC/AUPRC for real, random data
print ("Experiment with real data.")
print ("loading data...")
samples, labels = data_utils.eICU_task()
train_seqs = samples['train'].reshape(-1,16,4)
vali_seqs = samples['vali'].reshape(-1,16,4)
test_seqs = samples['test'].reshape(-1,16,4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
train_seqs, vali_seqs, test_seqs = data_utils.scale_data(train_seqs, vali_seqs, test_seqs)
print ("data loaded.")
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
vali_seqs_r = vali_seqs.reshape((vali_seqs.shape[0], -1))
test_seqs_r = test_seqs.reshape((test_seqs.shape[0], -1))
aurocs_all = []
auprcs_all = []
for i in range(5):
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(test_seqs_r, test_targets[:,col_num]))
preds = estimator.predict(test_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=test_targets[:,col_num]))
preds = estimator.predict_proba(test_seqs_r)
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
aurocs_all.append(aurocs)
auprcs_all.append(auprcs)
real_aurocs_mean = np.array(aurocs_all).mean(axis=0)
real_aurocs_sem = sem(aurocs_all, axis=0)
real_auprcs_mean = np.array(auprcs_all).mean(axis=0)
real_auprcs_sem = sem(auprcs_all, axis=0)
print ("Experiment with random predictions.")
#random score
test_targets_random = copy.deepcopy(test_targets)
random.shuffle(test_targets_random)
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
accuracies.append(accuracy_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
precisions.append(precision_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
preds = np.random.rand(len(test_targets[:,col_num]))
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds)
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds)
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
random_aurocs = aurocs
random_auprcs = auprcs
print("Results")
print("------------")
print("------------")
print("TSTR")
print(tstr_aurocs_mean)
print(tstr_aurocs_sem)
print(tstr_auprcs_mean)
print(tstr_auprcs_sem)
print("------------")
print("Real")
print(real_aurocs_mean)
print(real_aurocs_sem)
print(real_auprcs_mean)
print(real_auprcs_sem)
print("------------")
print("Random")
print(random_aurocs)
print(random_auprcs) |
ee/api/chalicelib/blueprints/bp_ee_crons.py | nogamenofun98/openreplay | 3,614 | 12761965 | <gh_stars>1000+
from chalice import Blueprint
from chalice import Cron
from chalicelib import _overrides
app = Blueprint(__name__)
_overrides.chalice_app(app) |
notifications/email_constants.py | bfortuner/VOCdetect | 336 | 12761971 | <gh_stars>100-1000
import config
import constants as c
WEBSITE_URL = config.KIBANA_URL
ADMIN_EMAIL = config.ADMIN_EMAIL
USER_EMAIL = config.USER_EMAIL
EMAIL_CHARSET = 'UTF-8'
HEADER="<html>"
FOOTER="</html>"
EXPERIMENT_STATUS_EMAIL_TEMPLATE="""
<p>Hello,</p>
<p>Your experiment has ended.</p>
<p><b>Name:</b> %s</p>
<p><b>Status:</b> %s</p>
<p><b>Status Msg:</b> %s</p>
<p><a href="%s">View Dashboard</a></p>
<p><b>Experiment Results:</b></p>
<p>%s</p>
<p><b>Experiment Config:</b></p>
<p>%s</p>
<p><b>Thanks,<br>
Team</p>
"""
EXPERIMENT_STATUS_EMAIL_BODY = (
HEADER + EXPERIMENT_STATUS_EMAIL_TEMPLATE + FOOTER
)
EXPERIMENT_STATUS_EMAIL ={
'subject' : 'New Experiment Results',
'body' : EXPERIMENT_STATUS_EMAIL_BODY
}
|
logdevice/ops/ldops/exceptions.py | majra20/LogDevice | 1,831 | 12761984 | <filename>logdevice/ops/ldops/exceptions.py<gh_stars>1000+
#!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
ldops.exceptions
~~~~~~~~~~~
Contains LDOps-wide exceptions.
"""
class LDOpsError(Exception):
"""
Generic error in LDOps
"""
pass
class NodeNotFoundError(LDOpsError):
"""
Raised when node not found
"""
pass
class NodeIsNotASequencerError(LDOpsError):
"""
Raised when node which is not a sequencer was used in a context
expecting that it is a sequencer
"""
pass
class NodeIsNotAStorageError(LDOpsError):
"""
Raised when node which is not a storage is used in a context
expectit that it is a storage
"""
pass
|
QPT_client/Python/Lib/site-packages/qpt/memory.py | Scxw010516/Smart_container | 150 | 12761997 | # Author: <NAME>
# Datetime:2021/7/3
# Copyright belongs to the author.
# Please indicate the source for reprinting.
import platform
import os
from distutils.sysconfig import get_python_lib
from qpt.kernel.qlog import Logging
def init_wrapper(var=True):
def i_wrapper(func):
if var:
@property
def render(self):
if func.__name__ in self.memory:
out = self.memory[func.__name__]
else:
out = func(self)
self.memory[func.__name__] = out
return out
else:
def render(self, *args, **kwargs):
if func.__name__ in self.memory:
out = self.memory[func.__name__]
else:
out = func(self, *args, **kwargs)
self.memory[func.__name__] = out
return out
return render
return i_wrapper
class QPTMemory:
def __init__(self):
self.memory = dict()
def set_mem(self, name, variable):
self.memory[name] = variable
return variable
def free_mem(self, name):
self.memory.pop(name)
@init_wrapper()
def platform_bit(self):
arc = platform.machine()
Logging.debug(f"操作系统位数:{arc}")
return arc
@init_wrapper()
def platform_os(self):
p_os = platform.system()
Logging.debug(f"操作系统类型:{p_os}")
return p_os
@init_wrapper()
def site_packages_path(self):
site_package_path = os.path.abspath(get_python_lib())
return site_package_path
@init_wrapper()
def pip_tool(self):
from qpt.kernel.qinterpreter import PipTools
pip_tools = PipTools()
return pip_tools
@init_wrapper()
def get_win32con(self):
import win32con
return win32con
@init_wrapper()
def get_win32api(self):
import win32api
return win32api
@init_wrapper(var=False)
def get_env_vars(self, work_dir="."):
return get_env_vars(work_dir)
QPT_MEMORY = QPTMemory()
def check_bit():
arc = QPT_MEMORY.platform_bit
assert "64" in arc, "当前QPT不支持32位操作系统"
def check_os():
p_os = QPT_MEMORY.platform_os
assert "Windows" in p_os, "当前QPT只支持Windows系统"
IGNORE_ENV_FIELD = ["conda", "Conda", "Python", "python"]
def get_env_vars(work_dir="."):
"""
获取当前待设置的环境变量字典
:param work_dir:
:return: dict
"""
env_vars = dict()
# Set PATH ENV
path_env = os.environ.get("PATH").split(";")
pre_add_env = os.path.abspath("./Python/Lib/site-packages") + ";" + \
os.path.abspath("./Python/Lib") + ";" + \
os.path.abspath("./Python/Lib/ext") + ";" + \
os.path.abspath("./Python") + ";" + \
os.path.abspath("./Python/Scripts") + ";"
for pe in path_env:
if pe:
add_flag = True
for ief in IGNORE_ENV_FIELD:
if ief in pe:
add_flag = False
break
if add_flag:
pre_add_env += pe + ";"
env_vars["PATH"] = pre_add_env + \
"%SYSTEMROOT%/System32/WindowsPowerShell/v1.0;" + \
"C:/Windows/System32/WindowsPowerShell/v1.0;" + \
"%ProgramFiles%/WindowsPowerShell/Modules;" + \
"%SystemRoot%/system32/WindowsPowerShell/v1.0/Modules;" + \
f"{os.path.join(os.path.abspath(work_dir), 'opt/CUDA')};"
# Set PYTHON PATH ENV
env_vars["PYTHONPATH"] = os.path.abspath("./Python/Lib/site-packages") + ";" + \
work_dir + ";" + \
os.path.abspath("./Python")
os_env = os.environ.copy()
os_env.update(env_vars)
if QPT_MODE and QPT_MODE.lower() == "debug":
Logging.debug(msg="Python所识别到的环境变量如下:\n" +
"".join([_ek + ":" + _e_v + " \n" for _ek, _ev in env_vars.items()
for _e_v in _ev.split(";")]))
return os_env
PYTHON_IGNORE_DIRS = [".idea", ".git", ".github", "venv"]
# 被忽略的Python包
IGNORE_PACKAGES = ["virtualenv", "pip", "setuptools", "cpython"]
# QPT运行状态 Run/Debug
QPT_MODE = os.getenv("QPT_MODE")
# QPT检测到的运行状态 Run/本地Run - 目的给予开发者警告,避免软件包膨胀
QPT_RUN_MODE = None
class CheckRun:
@staticmethod
def make_run_file(configs_path):
with open(os.path.join(configs_path, "run_act.lock"), "w") as f:
f.write("Run Done")
@staticmethod
def check_run_file(configs_path):
global QPT_RUN_MODE
if QPT_RUN_MODE is None:
QPT_RUN_MODE = os.path.exists(os.path.join(configs_path, "run_act.lock"))
return QPT_RUN_MODE
def check_all():
# 检查系统
check_os()
# 检查arc
check_bit()
check_all()
|
Packs/ShiftManagement/Scripts/GetAwayUsers/GetAwayUsers_test.py | sorkan/content | 799 | 12762010 | import io
import json
from copy import deepcopy
import GetAwayUsers
import demistomock as demisto
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
away_user_data = util_load_json('test_data/away_user.json')
def test_script_valid(mocker):
"""
Given:
When:
- Calling to GetAwayUsers Script.
Then:
- Ensure expected outputs are returned.
"""
from GetAwayUsers import main
return_results_mock = mocker.patch.object(GetAwayUsers, 'return_results')
away_user = away_user_data
not_away_user = deepcopy(away_user_data)
not_away_user['isAway'] = False
mocker.patch.object(demisto, 'executeCommand', return_value=[{'Type': '1', 'Contents': [away_user, not_away_user]}])
main()
command_results = return_results_mock.call_args[0][0]
assert command_results.outputs == [{'email': '',
'id': 'admin',
'name': 'Admin',
'phone': '+650-123456',
'roles': {'demisto': ['Administrator']},
'username': 'admin'}]
def test_script_invalid(mocker):
"""
Given:
When:
- Calling to GetAwayUsers Script. Error during the demisto.executeCommand to getUsers.
Then:
- Ensure error is returned.
"""
from GetAwayUsers import main
error_entry_type: int = 4
mocker.patch.object(GetAwayUsers, 'return_error')
mocker.patch.object(demisto, 'error')
away_user = away_user_data
not_away_user = deepcopy(away_user_data)
not_away_user['isAway'] = False
mocker.patch.object(demisto, 'executeCommand',
return_value=[{'Type': error_entry_type, 'Contents': [away_user, not_away_user]}])
main()
assert GetAwayUsers.return_error.called
|
datasets/SOT/seed/Impl/TrackingNet.py | zhangzhengde0225/SwinTrack | 143 | 12762019 | import os
from datasets.types.data_split import DataSplit
from datasets.SOT.constructor.base_interface import SingleObjectTrackingDatasetConstructor
import numpy as np
def construct_TrackingNet(constructor: SingleObjectTrackingDatasetConstructor, seed):
root_path = seed.root_path
data_type = seed.data_split
enable_set_ids = seed.enable_set_ids
sequence_name_class_map_file_path = seed.sequence_name_class_map_file_path
if data_type != DataSplit.Training and enable_set_ids is not None:
raise Exception("unsupported configuration")
sequence_name_class_map = {}
if sequence_name_class_map_file_path is None:
sequence_name_class_map_file_path = os.path.join(os.path.dirname(__file__), 'data_specs', 'trackingnet_sequence_classes_map.txt')
for line in open(sequence_name_class_map_file_path, 'r', encoding='utf-8'):
line = line.strip()
name, category = line.split('\t')
sequence_name_class_map[name] = category
categories = set(sequence_name_class_map.values())
category_id_name_map = {i: v for i, v in enumerate(categories)}
category_name_id_map = {v: i for i, v in enumerate(categories)}
if enable_set_ids is not None:
trackingNetSubsets = ['TRAIN_{}'.format(v) for v in enable_set_ids]
else:
trackingNetSubsets = []
if data_type & DataSplit.Training:
trackingNetSubsets = ['TRAIN_{}'.format(v) for v in range(12)]
if data_type & DataSplit.Testing:
trackingNetSubsets.append('TEST')
sequence_list = []
for subset in trackingNetSubsets:
subset_path = os.path.join(root_path, subset)
frames_path = os.path.join(subset_path, 'frames')
anno_path = os.path.join(subset_path, 'anno')
bounding_box_annotation_files = os.listdir(anno_path)
bounding_box_annotation_files = [bounding_box_annotation_file for bounding_box_annotation_file in
bounding_box_annotation_files if bounding_box_annotation_file.endswith('.txt')]
bounding_box_annotation_files.sort()
sequences = [sequence[:-4] for sequence in bounding_box_annotation_files]
for sequence, bounding_box_annotation_file in zip(sequences, bounding_box_annotation_files):
sequence_image_path = os.path.join(frames_path, sequence)
bounding_box_annotation_file_path = os.path.join(anno_path, bounding_box_annotation_file)
sequence_list.append((sequence, sequence_image_path, bounding_box_annotation_file_path))
constructor.set_category_id_name_map(category_id_name_map)
constructor.set_total_number_of_sequences(len(sequence_list))
for sequence, sequence_image_path, sequence_bounding_box_annotation_file_path in sequence_list:
with constructor.new_sequence(category_name_id_map[sequence_name_class_map[sequence]]) as sequence_constructor:
sequence_constructor.set_name(sequence)
bounding_boxes = np.loadtxt(sequence_bounding_box_annotation_file_path, dtype=np.float, delimiter=',')
images = os.listdir(sequence_image_path)
images = [image for image in images if image.endswith('.jpg')]
if bounding_boxes.ndim == 2:
is_testing_sequence = False
assert len(images) == len(bounding_boxes)
else:
is_testing_sequence = True
assert bounding_boxes.ndim == 1 and bounding_boxes.shape[0] == 4
for i in range(len(images)):
image_file_name = '{}.jpg'.format(i)
image_file_path = os.path.join(sequence_image_path, image_file_name)
with sequence_constructor.new_frame() as frame_constructor:
frame_constructor.set_path(image_file_path)
if is_testing_sequence:
if i == 0:
frame_constructor.set_bounding_box(bounding_boxes.tolist())
else:
frame_constructor.set_bounding_box(bounding_boxes[i].tolist())
|
msticpy/config/ce_keyvault.py | kubajir/msticpy | 820 | 12762021 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Key Vault component edit."""
from .._version import VERSION
from .ce_simple_settings import CESimpleSettings
__version__ = VERSION
__author__ = "<NAME>"
class CEKeyVault(CESimpleSettings):
"""Key Vault settings edit component."""
_DESCRIPTION = "Key Vault Setup"
_COMP_PATH = "KeyVault"
_HELP_TEXT = """
Set the parameters for your Key Vault here to store secret
values such as API Keys.<br>
Check <b>UseKeyring</b> if you have Keyring installed and want to be
able to cache the secrets locally. (Note: keyring is not supported
by default on many Linux distributions)<br>
The first five items are mandatory.<br>
The value for <b>Authority</b> should be set to the Azure Cloud that you use.<br>
Options are:
<ul>
<li>global (Commercial Azure cloud)</li>
<li>usgov (US Government cloud)</li>
<li>cn (China national cloud)</li>
<li>de (German national cloud)</li>
</ul>
The default is "global".<br>
"""
_HELP_URI = {
"Key Vault Settings": (
"https://msticpy.readthedocs.io/en/latest/getting_started/"
+ "msticpyconfig.html#specifying-secrets-as-key-vault-secrets"
)
}
|
fonts/romfonts/vga1_8x8.py | slabua/st7789py_mpy | 153 | 12762029 | """converted from vga_8x8.bin """
WIDTH = 8
HEIGHT = 8
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x3c\x3c\x18\x18\x00\x18\x00'\
b'\x66\x66\x24\x00\x00\x00\x00\x00'\
b'\x6c\x6c\xfe\x6c\xfe\x6c\x6c\x00'\
b'\x18\x3e\x60\x3c\x06\x7c\x18\x00'\
b'\x00\xc6\xcc\x18\x30\x66\xc6\x00'\
b'\x38\x6c\x38\x76\xdc\xcc\x76\x00'\
b'\x18\x18\x30\x00\x00\x00\x00\x00'\
b'\x0c\x18\x30\x30\x30\x18\x0c\x00'\
b'\x30\x18\x0c\x0c\x0c\x18\x30\x00'\
b'\x00\x66\x3c\xff\x3c\x66\x00\x00'\
b'\x00\x18\x18\x7e\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x30'\
b'\x00\x00\x00\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x00'\
b'\x06\x0c\x18\x30\x60\xc0\x80\x00'\
b'\x38\x6c\xc6\xd6\xc6\x6c\x38\x00'\
b'\x18\x38\x18\x18\x18\x18\x7e\x00'\
b'\x7c\xc6\x06\x1c\x30\x66\xfe\x00'\
b'\x7c\xc6\x06\x3c\x06\xc6\x7c\x00'\
b'\x1c\x3c\x6c\xcc\xfe\x0c\x1e\x00'\
b'\xfe\xc0\xc0\xfc\x06\xc6\x7c\x00'\
b'\x38\x60\xc0\xfc\xc6\xc6\x7c\x00'\
b'\xfe\xc6\x0c\x18\x30\x30\x30\x00'\
b'\x7c\xc6\xc6\x7c\xc6\xc6\x7c\x00'\
b'\x7c\xc6\xc6\x7e\x06\x0c\x78\x00'\
b'\x00\x18\x18\x00\x00\x18\x18\x00'\
b'\x00\x18\x18\x00\x00\x18\x18\x30'\
b'\x06\x0c\x18\x30\x18\x0c\x06\x00'\
b'\x00\x00\x7e\x00\x00\x7e\x00\x00'\
b'\x60\x30\x18\x0c\x18\x30\x60\x00'\
b'\x7c\xc6\x0c\x18\x18\x00\x18\x00'\
b'\x7c\xc6\xde\xde\xde\xc0\x78\x00'\
b'\x38\x6c\xc6\xfe\xc6\xc6\xc6\x00'\
b'\xfc\x66\x66\x7c\x66\x66\xfc\x00'\
b'\x3c\x66\xc0\xc0\xc0\x66\x3c\x00'\
b'\xf8\x6c\x66\x66\x66\x6c\xf8\x00'\
b'\xfe\x62\x68\x78\x68\x62\xfe\x00'\
b'\xfe\x62\x68\x78\x68\x60\xf0\x00'\
b'\x3c\x66\xc0\xc0\xce\x66\x3a\x00'\
b'\xc6\xc6\xc6\xfe\xc6\xc6\xc6\x00'\
b'\x3c\x18\x18\x18\x18\x18\x3c\x00'\
b'\x1e\x0c\x0c\x0c\xcc\xcc\x78\x00'\
b'\xe6\x66\x6c\x78\x6c\x66\xe6\x00'\
b'\xf0\x60\x60\x60\x62\x66\xfe\x00'\
b'\xc6\xee\xfe\xfe\xd6\xc6\xc6\x00'\
b'\xc6\xe6\xf6\xde\xce\xc6\xc6\x00'\
b'\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00'\
b'\xfc\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x7c\xc6\xc6\xc6\xc6\xce\x7c\x0e'\
b'\xfc\x66\x66\x7c\x6c\x66\xe6\x00'\
b'\x3c\x66\x30\x18\x0c\x66\x3c\x00'\
b'\x7e\x7e\x5a\x18\x18\x18\x3c\x00'\
b'\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00'\
b'\xc6\xc6\xc6\xc6\xc6\x6c\x38\x00'\
b'\xc6\xc6\xc6\xd6\xd6\xfe\x6c\x00'\
b'\xc6\xc6\x6c\x38\x6c\xc6\xc6\x00'\
b'\x66\x66\x66\x3c\x18\x18\x3c\x00'\
b'\xfe\xc6\x8c\x18\x32\x66\xfe\x00'\
b'\x3c\x30\x30\x30\x30\x30\x3c\x00'\
b'\xc0\x60\x30\x18\x0c\x06\x02\x00'\
b'\x3c\x0c\x0c\x0c\x0c\x0c\x3c\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xff'\
b'\x30\x18\x0c\x00\x00\x00\x00\x00'\
b'\x00\x00\x78\x0c\x7c\xcc\x76\x00'\
b'\xe0\x60\x7c\x66\x66\x66\xdc\x00'\
b'\x00\x00\x7c\xc6\xc0\xc6\x7c\x00'\
b'\x1c\x0c\x7c\xcc\xcc\xcc\x76\x00'\
b'\x00\x00\x7c\xc6\xfe\xc0\x7c\x00'\
b'\x3c\x66\x60\xf8\x60\x60\xf0\x00'\
b'\x00\x00\x76\xcc\xcc\x7c\x0c\xf8'\
b'\xe0\x60\x6c\x76\x66\x66\xe6\x00'\
b'\x18\x00\x38\x18\x18\x18\x3c\x00'\
b'\x06\x00\x06\x06\x06\x66\x66\x3c'\
b'\xe0\x60\x66\x6c\x78\x6c\xe6\x00'\
b'\x38\x18\x18\x18\x18\x18\x3c\x00'\
b'\x00\x00\xec\xfe\xd6\xd6\xd6\x00'\
b'\x00\x00\xdc\x66\x66\x66\x66\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\x00'\
b'\x00\x00\xdc\x66\x66\x7c\x60\xf0'\
b'\x00\x00\x76\xcc\xcc\x7c\x0c\x1e'\
b'\x00\x00\xdc\x76\x60\x60\xf0\x00'\
b'\x00\x00\x7e\xc0\x7c\x06\xfc\x00'\
b'\x30\x30\xfc\x30\x30\x36\x1c\x00'\
b'\x00\x00\xcc\xcc\xcc\xcc\x76\x00'\
b'\x00\x00\xc6\xc6\xc6\x6c\x38\x00'\
b'\x00\x00\xc6\xd6\xd6\xfe\x6c\x00'\
b'\x00\x00\xc6\x6c\x38\x6c\xc6\x00'\
b'\x00\x00\xc6\xc6\xc6\x7e\x06\xfc'\
b'\x00\x00\x7e\x4c\x18\x32\x7e\x00'\
b'\x0e\x18\x18\x70\x18\x18\x0e\x00'\
b'\x18\x18\x18\x18\x18\x18\x18\x00'\
b'\x70\x18\x18\x0e\x18\x18\x70\x00'\
b'\x76\xdc\x00\x00\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\xc6\xfe\x00'\
FONT = memoryview(_FONT)
|
examples/data/norm_feature.py | leilin-research/Time-series-prediction | 552 | 12762031 | <gh_stars>100-1000
import os
import joblib
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
class FeatureNorm(object):
def __init__(self, type='minmax'):
self.type = type
def __call__(self, x, mode='train', model_dir='../weights', name='scaler'):
assert len(x.shape) == 2, "Input rank for FeatureNorm should be 2"
if self.type == 'standard':
scaler = StandardScaler()
elif self.type == 'minmax':
scaler = MinMaxScaler()
else:
raise ValueError("Unsupported norm type yet: {}".format(self.type))
if mode == 'train':
scaler.fit(x)
joblib.dump(scaler, os.path.join(model_dir, name+'.pkl'))
else:
scaler = joblib.load(os.path.join(model_dir, name+'.pkl'))
output = scaler.transform(x)
try:
return pd.DataFrame(output, index=x.index, columns=x.columns)
except:
return output
|
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/elastic_pool_performance_level_capability_py3.py | Mannan2812/azure-cli-extensions | 207 | 12762065 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ElasticPoolPerformanceLevelCapability(Model):
"""The Elastic Pool performance level capability.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar performance_level: The performance level for the pool.
:vartype performance_level:
~azure.mgmt.sql.models.PerformanceLevelCapability
:ivar sku: The sku.
:vartype sku: ~azure.mgmt.sql.models.Sku
:ivar supported_license_types: List of supported license types.
:vartype supported_license_types:
list[~azure.mgmt.sql.models.LicenseTypeCapability]
:ivar max_database_count: The maximum number of databases supported.
:vartype max_database_count: int
:ivar included_max_size: The included (free) max size for this performance
level.
:vartype included_max_size: ~azure.mgmt.sql.models.MaxSizeCapability
:ivar supported_max_sizes: The list of supported max sizes.
:vartype supported_max_sizes:
list[~azure.mgmt.sql.models.MaxSizeRangeCapability]
:ivar supported_per_database_max_sizes: The list of supported per database
max sizes.
:vartype supported_per_database_max_sizes:
list[~azure.mgmt.sql.models.MaxSizeRangeCapability]
:ivar supported_per_database_max_performance_levels: The list of supported
per database max performance levels.
:vartype supported_per_database_max_performance_levels:
list[~azure.mgmt.sql.models.ElasticPoolPerDatabaseMaxPerformanceLevelCapability]
:ivar status: The status of the capability. Possible values include:
'Visible', 'Available', 'Default', 'Disabled'
:vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus
:param reason: The reason for the capability not being available.
:type reason: str
"""
_validation = {
'performance_level': {'readonly': True},
'sku': {'readonly': True},
'supported_license_types': {'readonly': True},
'max_database_count': {'readonly': True},
'included_max_size': {'readonly': True},
'supported_max_sizes': {'readonly': True},
'supported_per_database_max_sizes': {'readonly': True},
'supported_per_database_max_performance_levels': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'performance_level': {'key': 'performanceLevel', 'type': 'PerformanceLevelCapability'},
'sku': {'key': 'sku', 'type': 'Sku'},
'supported_license_types': {'key': 'supportedLicenseTypes', 'type': '[LicenseTypeCapability]'},
'max_database_count': {'key': 'maxDatabaseCount', 'type': 'int'},
'included_max_size': {'key': 'includedMaxSize', 'type': 'MaxSizeCapability'},
'supported_max_sizes': {'key': 'supportedMaxSizes', 'type': '[MaxSizeRangeCapability]'},
'supported_per_database_max_sizes': {'key': 'supportedPerDatabaseMaxSizes', 'type': '[MaxSizeRangeCapability]'},
'supported_per_database_max_performance_levels': {'key': 'supportedPerDatabaseMaxPerformanceLevels', 'type': '[ElasticPoolPerDatabaseMaxPerformanceLevelCapability]'},
'status': {'key': 'status', 'type': 'CapabilityStatus'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(self, *, reason: str=None, **kwargs) -> None:
super(ElasticPoolPerformanceLevelCapability, self).__init__(**kwargs)
self.performance_level = None
self.sku = None
self.supported_license_types = None
self.max_database_count = None
self.included_max_size = None
self.supported_max_sizes = None
self.supported_per_database_max_sizes = None
self.supported_per_database_max_performance_levels = None
self.status = None
self.reason = reason
|
spectral/io/__init__.py | wwlswj/spectral | 398 | 12762083 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function, unicode_literals
from .spyfile import SpyFile
from ..io import aviris
from ..io import erdas
from ..io import envi
|
tests/test_provider_hashicorp_aws.py | mjuenema/python-terrascript | 507 | 12762092 | <reponame>mjuenema/python-terrascript
# tests/test_provider_hashicorp_aws.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:12:25 UTC)
def test_provider_import():
import terrascript.provider.hashicorp.aws
def test_resource_import():
from terrascript.resource.hashicorp.aws import aws_accessanalyzer_analyzer
from terrascript.resource.hashicorp.aws import aws_acm_certificate
from terrascript.resource.hashicorp.aws import aws_acm_certificate_validation
from terrascript.resource.hashicorp.aws import aws_acmpca_certificate
from terrascript.resource.hashicorp.aws import aws_acmpca_certificate_authority
from terrascript.resource.hashicorp.aws import (
aws_acmpca_certificate_authority_certificate,
)
from terrascript.resource.hashicorp.aws import aws_alb
from terrascript.resource.hashicorp.aws import aws_alb_listener
from terrascript.resource.hashicorp.aws import aws_alb_listener_certificate
from terrascript.resource.hashicorp.aws import aws_alb_listener_rule
from terrascript.resource.hashicorp.aws import aws_alb_target_group
from terrascript.resource.hashicorp.aws import aws_alb_target_group_attachment
from terrascript.resource.hashicorp.aws import aws_ami
from terrascript.resource.hashicorp.aws import aws_ami_copy
from terrascript.resource.hashicorp.aws import aws_ami_from_instance
from terrascript.resource.hashicorp.aws import aws_ami_launch_permission
from terrascript.resource.hashicorp.aws import aws_amplify_app
from terrascript.resource.hashicorp.aws import aws_amplify_backend_environment
from terrascript.resource.hashicorp.aws import aws_amplify_branch
from terrascript.resource.hashicorp.aws import aws_amplify_domain_association
from terrascript.resource.hashicorp.aws import aws_amplify_webhook
from terrascript.resource.hashicorp.aws import aws_api_gateway_account
from terrascript.resource.hashicorp.aws import aws_api_gateway_api_key
from terrascript.resource.hashicorp.aws import aws_api_gateway_authorizer
from terrascript.resource.hashicorp.aws import aws_api_gateway_base_path_mapping
from terrascript.resource.hashicorp.aws import aws_api_gateway_client_certificate
from terrascript.resource.hashicorp.aws import aws_api_gateway_deployment
from terrascript.resource.hashicorp.aws import aws_api_gateway_documentation_part
from terrascript.resource.hashicorp.aws import aws_api_gateway_documentation_version
from terrascript.resource.hashicorp.aws import aws_api_gateway_domain_name
from terrascript.resource.hashicorp.aws import aws_api_gateway_gateway_response
from terrascript.resource.hashicorp.aws import aws_api_gateway_integration
from terrascript.resource.hashicorp.aws import aws_api_gateway_integration_response
from terrascript.resource.hashicorp.aws import aws_api_gateway_method
from terrascript.resource.hashicorp.aws import aws_api_gateway_method_response
from terrascript.resource.hashicorp.aws import aws_api_gateway_method_settings
from terrascript.resource.hashicorp.aws import aws_api_gateway_model
from terrascript.resource.hashicorp.aws import aws_api_gateway_request_validator
from terrascript.resource.hashicorp.aws import aws_api_gateway_resource
from terrascript.resource.hashicorp.aws import aws_api_gateway_rest_api
from terrascript.resource.hashicorp.aws import aws_api_gateway_rest_api_policy
from terrascript.resource.hashicorp.aws import aws_api_gateway_stage
from terrascript.resource.hashicorp.aws import aws_api_gateway_usage_plan
from terrascript.resource.hashicorp.aws import aws_api_gateway_usage_plan_key
from terrascript.resource.hashicorp.aws import aws_api_gateway_vpc_link
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_api
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_api_mapping
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_authorizer
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_deployment
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_domain_name
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_integration
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_integration_response
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_model
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_route
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_route_response
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_stage
from terrascript.resource.hashicorp.aws import aws_apigatewayv2_vpc_link
from terrascript.resource.hashicorp.aws import aws_app_cookie_stickiness_policy
from terrascript.resource.hashicorp.aws import aws_appautoscaling_policy
from terrascript.resource.hashicorp.aws import aws_appautoscaling_scheduled_action
from terrascript.resource.hashicorp.aws import aws_appautoscaling_target
from terrascript.resource.hashicorp.aws import aws_appconfig_application
from terrascript.resource.hashicorp.aws import aws_appconfig_configuration_profile
from terrascript.resource.hashicorp.aws import aws_appconfig_deployment
from terrascript.resource.hashicorp.aws import aws_appconfig_deployment_strategy
from terrascript.resource.hashicorp.aws import aws_appconfig_environment
from terrascript.resource.hashicorp.aws import (
aws_appconfig_hosted_configuration_version,
)
from terrascript.resource.hashicorp.aws import aws_appmesh_gateway_route
from terrascript.resource.hashicorp.aws import aws_appmesh_mesh
from terrascript.resource.hashicorp.aws import aws_appmesh_route
from terrascript.resource.hashicorp.aws import aws_appmesh_virtual_gateway
from terrascript.resource.hashicorp.aws import aws_appmesh_virtual_node
from terrascript.resource.hashicorp.aws import aws_appmesh_virtual_router
from terrascript.resource.hashicorp.aws import aws_appmesh_virtual_service
from terrascript.resource.hashicorp.aws import (
aws_apprunner_auto_scaling_configuration_version,
)
from terrascript.resource.hashicorp.aws import aws_apprunner_connection
from terrascript.resource.hashicorp.aws import (
aws_apprunner_custom_domain_association,
)
from terrascript.resource.hashicorp.aws import aws_apprunner_service
from terrascript.resource.hashicorp.aws import aws_appstream_fleet
from terrascript.resource.hashicorp.aws import aws_appstream_stack
from terrascript.resource.hashicorp.aws import aws_appsync_api_key
from terrascript.resource.hashicorp.aws import aws_appsync_datasource
from terrascript.resource.hashicorp.aws import aws_appsync_function
from terrascript.resource.hashicorp.aws import aws_appsync_graphql_api
from terrascript.resource.hashicorp.aws import aws_appsync_resolver
from terrascript.resource.hashicorp.aws import aws_athena_database
from terrascript.resource.hashicorp.aws import aws_athena_named_query
from terrascript.resource.hashicorp.aws import aws_athena_workgroup
from terrascript.resource.hashicorp.aws import aws_autoscaling_attachment
from terrascript.resource.hashicorp.aws import aws_autoscaling_group
from terrascript.resource.hashicorp.aws import aws_autoscaling_group_tag
from terrascript.resource.hashicorp.aws import aws_autoscaling_lifecycle_hook
from terrascript.resource.hashicorp.aws import aws_autoscaling_notification
from terrascript.resource.hashicorp.aws import aws_autoscaling_policy
from terrascript.resource.hashicorp.aws import aws_autoscaling_schedule
from terrascript.resource.hashicorp.aws import aws_autoscalingplans_scaling_plan
from terrascript.resource.hashicorp.aws import aws_backup_global_settings
from terrascript.resource.hashicorp.aws import aws_backup_plan
from terrascript.resource.hashicorp.aws import aws_backup_region_settings
from terrascript.resource.hashicorp.aws import aws_backup_selection
from terrascript.resource.hashicorp.aws import aws_backup_vault
from terrascript.resource.hashicorp.aws import aws_backup_vault_notifications
from terrascript.resource.hashicorp.aws import aws_backup_vault_policy
from terrascript.resource.hashicorp.aws import aws_batch_compute_environment
from terrascript.resource.hashicorp.aws import aws_batch_job_definition
from terrascript.resource.hashicorp.aws import aws_batch_job_queue
from terrascript.resource.hashicorp.aws import aws_budgets_budget
from terrascript.resource.hashicorp.aws import aws_budgets_budget_action
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector_group
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector_logging
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector_origination
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector_streaming
from terrascript.resource.hashicorp.aws import aws_chime_voice_connector_termination
from terrascript.resource.hashicorp.aws import aws_cloud9_environment_ec2
from terrascript.resource.hashicorp.aws import aws_cloudformation_stack
from terrascript.resource.hashicorp.aws import aws_cloudformation_stack_set
from terrascript.resource.hashicorp.aws import aws_cloudformation_stack_set_instance
from terrascript.resource.hashicorp.aws import aws_cloudformation_type
from terrascript.resource.hashicorp.aws import aws_cloudfront_cache_policy
from terrascript.resource.hashicorp.aws import aws_cloudfront_distribution
from terrascript.resource.hashicorp.aws import aws_cloudfront_function
from terrascript.resource.hashicorp.aws import aws_cloudfront_key_group
from terrascript.resource.hashicorp.aws import (
aws_cloudfront_monitoring_subscription,
)
from terrascript.resource.hashicorp.aws import aws_cloudfront_origin_access_identity
from terrascript.resource.hashicorp.aws import aws_cloudfront_origin_request_policy
from terrascript.resource.hashicorp.aws import aws_cloudfront_public_key
from terrascript.resource.hashicorp.aws import aws_cloudfront_realtime_log_config
from terrascript.resource.hashicorp.aws import aws_cloudhsm_v2_cluster
from terrascript.resource.hashicorp.aws import aws_cloudhsm_v2_hsm
from terrascript.resource.hashicorp.aws import aws_cloudtrail
from terrascript.resource.hashicorp.aws import aws_cloudwatch_composite_alarm
from terrascript.resource.hashicorp.aws import aws_cloudwatch_dashboard
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_api_destination
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_archive
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_bus
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_bus_policy
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_connection
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_permission
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_rule
from terrascript.resource.hashicorp.aws import aws_cloudwatch_event_target
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_destination
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_destination_policy
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_group
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_metric_filter
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_resource_policy
from terrascript.resource.hashicorp.aws import aws_cloudwatch_log_stream
from terrascript.resource.hashicorp.aws import (
aws_cloudwatch_log_subscription_filter,
)
from terrascript.resource.hashicorp.aws import aws_cloudwatch_metric_alarm
from terrascript.resource.hashicorp.aws import aws_cloudwatch_metric_stream
from terrascript.resource.hashicorp.aws import aws_cloudwatch_query_definition
from terrascript.resource.hashicorp.aws import aws_codeartifact_domain
from terrascript.resource.hashicorp.aws import (
aws_codeartifact_domain_permissions_policy,
)
from terrascript.resource.hashicorp.aws import aws_codeartifact_repository
from terrascript.resource.hashicorp.aws import (
aws_codeartifact_repository_permissions_policy,
)
from terrascript.resource.hashicorp.aws import aws_codebuild_project
from terrascript.resource.hashicorp.aws import aws_codebuild_report_group
from terrascript.resource.hashicorp.aws import aws_codebuild_source_credential
from terrascript.resource.hashicorp.aws import aws_codebuild_webhook
from terrascript.resource.hashicorp.aws import aws_codecommit_repository
from terrascript.resource.hashicorp.aws import aws_codecommit_trigger
from terrascript.resource.hashicorp.aws import aws_codedeploy_app
from terrascript.resource.hashicorp.aws import aws_codedeploy_deployment_config
from terrascript.resource.hashicorp.aws import aws_codedeploy_deployment_group
from terrascript.resource.hashicorp.aws import aws_codepipeline
from terrascript.resource.hashicorp.aws import aws_codepipeline_webhook
from terrascript.resource.hashicorp.aws import aws_codestarconnections_connection
from terrascript.resource.hashicorp.aws import aws_codestarconnections_host
from terrascript.resource.hashicorp.aws import (
aws_codestarnotifications_notification_rule,
)
from terrascript.resource.hashicorp.aws import aws_cognito_identity_pool
from terrascript.resource.hashicorp.aws import (
aws_cognito_identity_pool_roles_attachment,
)
from terrascript.resource.hashicorp.aws import aws_cognito_identity_provider
from terrascript.resource.hashicorp.aws import aws_cognito_resource_server
from terrascript.resource.hashicorp.aws import aws_cognito_user_group
from terrascript.resource.hashicorp.aws import aws_cognito_user_pool
from terrascript.resource.hashicorp.aws import aws_cognito_user_pool_client
from terrascript.resource.hashicorp.aws import aws_cognito_user_pool_domain
from terrascript.resource.hashicorp.aws import (
aws_cognito_user_pool_ui_customization,
)
from terrascript.resource.hashicorp.aws import aws_config_aggregate_authorization
from terrascript.resource.hashicorp.aws import aws_config_config_rule
from terrascript.resource.hashicorp.aws import aws_config_configuration_aggregator
from terrascript.resource.hashicorp.aws import aws_config_configuration_recorder
from terrascript.resource.hashicorp.aws import (
aws_config_configuration_recorder_status,
)
from terrascript.resource.hashicorp.aws import aws_config_conformance_pack
from terrascript.resource.hashicorp.aws import aws_config_delivery_channel
from terrascript.resource.hashicorp.aws import (
aws_config_organization_conformance_pack,
)
from terrascript.resource.hashicorp.aws import aws_config_organization_custom_rule
from terrascript.resource.hashicorp.aws import aws_config_organization_managed_rule
from terrascript.resource.hashicorp.aws import aws_config_remediation_configuration
from terrascript.resource.hashicorp.aws import aws_connect_contact_flow
from terrascript.resource.hashicorp.aws import aws_connect_instance
from terrascript.resource.hashicorp.aws import aws_cur_report_definition
from terrascript.resource.hashicorp.aws import aws_customer_gateway
from terrascript.resource.hashicorp.aws import aws_datapipeline_pipeline
from terrascript.resource.hashicorp.aws import aws_datasync_agent
from terrascript.resource.hashicorp.aws import aws_datasync_location_efs
from terrascript.resource.hashicorp.aws import (
aws_datasync_location_fsx_windows_file_system,
)
from terrascript.resource.hashicorp.aws import aws_datasync_location_nfs
from terrascript.resource.hashicorp.aws import aws_datasync_location_s3
from terrascript.resource.hashicorp.aws import aws_datasync_location_smb
from terrascript.resource.hashicorp.aws import aws_datasync_task
from terrascript.resource.hashicorp.aws import aws_dax_cluster
from terrascript.resource.hashicorp.aws import aws_dax_parameter_group
from terrascript.resource.hashicorp.aws import aws_dax_subnet_group
from terrascript.resource.hashicorp.aws import aws_db_cluster_snapshot
from terrascript.resource.hashicorp.aws import aws_db_event_subscription
from terrascript.resource.hashicorp.aws import aws_db_instance
from terrascript.resource.hashicorp.aws import aws_db_instance_role_association
from terrascript.resource.hashicorp.aws import aws_db_option_group
from terrascript.resource.hashicorp.aws import aws_db_parameter_group
from terrascript.resource.hashicorp.aws import aws_db_proxy
from terrascript.resource.hashicorp.aws import aws_db_proxy_default_target_group
from terrascript.resource.hashicorp.aws import aws_db_proxy_endpoint
from terrascript.resource.hashicorp.aws import aws_db_proxy_target
from terrascript.resource.hashicorp.aws import aws_db_security_group
from terrascript.resource.hashicorp.aws import aws_db_snapshot
from terrascript.resource.hashicorp.aws import aws_db_subnet_group
from terrascript.resource.hashicorp.aws import aws_default_network_acl
from terrascript.resource.hashicorp.aws import aws_default_route_table
from terrascript.resource.hashicorp.aws import aws_default_security_group
from terrascript.resource.hashicorp.aws import aws_default_subnet
from terrascript.resource.hashicorp.aws import aws_default_vpc
from terrascript.resource.hashicorp.aws import aws_default_vpc_dhcp_options
from terrascript.resource.hashicorp.aws import aws_devicefarm_project
from terrascript.resource.hashicorp.aws import (
aws_directory_service_conditional_forwarder,
)
from terrascript.resource.hashicorp.aws import aws_directory_service_directory
from terrascript.resource.hashicorp.aws import (
aws_directory_service_log_subscription,
)
from terrascript.resource.hashicorp.aws import aws_dlm_lifecycle_policy
from terrascript.resource.hashicorp.aws import aws_dms_certificate
from terrascript.resource.hashicorp.aws import aws_dms_endpoint
from terrascript.resource.hashicorp.aws import aws_dms_event_subscription
from terrascript.resource.hashicorp.aws import aws_dms_replication_instance
from terrascript.resource.hashicorp.aws import aws_dms_replication_subnet_group
from terrascript.resource.hashicorp.aws import aws_dms_replication_task
from terrascript.resource.hashicorp.aws import aws_docdb_cluster
from terrascript.resource.hashicorp.aws import aws_docdb_cluster_instance
from terrascript.resource.hashicorp.aws import aws_docdb_cluster_parameter_group
from terrascript.resource.hashicorp.aws import aws_docdb_cluster_snapshot
from terrascript.resource.hashicorp.aws import aws_docdb_subnet_group
from terrascript.resource.hashicorp.aws import aws_dx_bgp_peer
from terrascript.resource.hashicorp.aws import aws_dx_connection
from terrascript.resource.hashicorp.aws import aws_dx_connection_association
from terrascript.resource.hashicorp.aws import aws_dx_gateway
from terrascript.resource.hashicorp.aws import aws_dx_gateway_association
from terrascript.resource.hashicorp.aws import aws_dx_gateway_association_proposal
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_private_virtual_interface,
)
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_private_virtual_interface_accepter,
)
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_public_virtual_interface,
)
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_public_virtual_interface_accepter,
)
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_transit_virtual_interface,
)
from terrascript.resource.hashicorp.aws import (
aws_dx_hosted_transit_virtual_interface_accepter,
)
from terrascript.resource.hashicorp.aws import aws_dx_lag
from terrascript.resource.hashicorp.aws import aws_dx_private_virtual_interface
from terrascript.resource.hashicorp.aws import aws_dx_public_virtual_interface
from terrascript.resource.hashicorp.aws import aws_dx_transit_virtual_interface
from terrascript.resource.hashicorp.aws import aws_dynamodb_global_table
from terrascript.resource.hashicorp.aws import (
aws_dynamodb_kinesis_streaming_destination,
)
from terrascript.resource.hashicorp.aws import aws_dynamodb_table
from terrascript.resource.hashicorp.aws import aws_dynamodb_table_item
from terrascript.resource.hashicorp.aws import aws_dynamodb_tag
from terrascript.resource.hashicorp.aws import aws_ebs_default_kms_key
from terrascript.resource.hashicorp.aws import aws_ebs_encryption_by_default
from terrascript.resource.hashicorp.aws import aws_ebs_snapshot
from terrascript.resource.hashicorp.aws import aws_ebs_snapshot_copy
from terrascript.resource.hashicorp.aws import aws_ebs_snapshot_import
from terrascript.resource.hashicorp.aws import aws_ebs_volume
from terrascript.resource.hashicorp.aws import aws_ec2_availability_zone_group
from terrascript.resource.hashicorp.aws import aws_ec2_capacity_reservation
from terrascript.resource.hashicorp.aws import aws_ec2_carrier_gateway
from terrascript.resource.hashicorp.aws import aws_ec2_client_vpn_authorization_rule
from terrascript.resource.hashicorp.aws import aws_ec2_client_vpn_endpoint
from terrascript.resource.hashicorp.aws import (
aws_ec2_client_vpn_network_association,
)
from terrascript.resource.hashicorp.aws import aws_ec2_client_vpn_route
from terrascript.resource.hashicorp.aws import aws_ec2_fleet
from terrascript.resource.hashicorp.aws import aws_ec2_local_gateway_route
from terrascript.resource.hashicorp.aws import (
aws_ec2_local_gateway_route_table_vpc_association,
)
from terrascript.resource.hashicorp.aws import aws_ec2_managed_prefix_list
from terrascript.resource.hashicorp.aws import aws_ec2_managed_prefix_list_entry
from terrascript.resource.hashicorp.aws import aws_ec2_tag
from terrascript.resource.hashicorp.aws import aws_ec2_traffic_mirror_filter
from terrascript.resource.hashicorp.aws import aws_ec2_traffic_mirror_filter_rule
from terrascript.resource.hashicorp.aws import aws_ec2_traffic_mirror_session
from terrascript.resource.hashicorp.aws import aws_ec2_traffic_mirror_target
from terrascript.resource.hashicorp.aws import aws_ec2_transit_gateway
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_peering_attachment,
)
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_peering_attachment_accepter,
)
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_prefix_list_reference,
)
from terrascript.resource.hashicorp.aws import aws_ec2_transit_gateway_route
from terrascript.resource.hashicorp.aws import aws_ec2_transit_gateway_route_table
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_route_table_association,
)
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_route_table_propagation,
)
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_vpc_attachment,
)
from terrascript.resource.hashicorp.aws import (
aws_ec2_transit_gateway_vpc_attachment_accepter,
)
from terrascript.resource.hashicorp.aws import aws_ecr_lifecycle_policy
from terrascript.resource.hashicorp.aws import aws_ecr_registry_policy
from terrascript.resource.hashicorp.aws import aws_ecr_replication_configuration
from terrascript.resource.hashicorp.aws import aws_ecr_repository
from terrascript.resource.hashicorp.aws import aws_ecr_repository_policy
from terrascript.resource.hashicorp.aws import aws_ecrpublic_repository
from terrascript.resource.hashicorp.aws import aws_ecs_capacity_provider
from terrascript.resource.hashicorp.aws import aws_ecs_cluster
from terrascript.resource.hashicorp.aws import aws_ecs_service
from terrascript.resource.hashicorp.aws import aws_ecs_tag
from terrascript.resource.hashicorp.aws import aws_ecs_task_definition
from terrascript.resource.hashicorp.aws import aws_efs_access_point
from terrascript.resource.hashicorp.aws import aws_efs_backup_policy
from terrascript.resource.hashicorp.aws import aws_efs_file_system
from terrascript.resource.hashicorp.aws import aws_efs_file_system_policy
from terrascript.resource.hashicorp.aws import aws_efs_mount_target
from terrascript.resource.hashicorp.aws import aws_egress_only_internet_gateway
from terrascript.resource.hashicorp.aws import aws_eip
from terrascript.resource.hashicorp.aws import aws_eip_association
from terrascript.resource.hashicorp.aws import aws_eks_addon
from terrascript.resource.hashicorp.aws import aws_eks_cluster
from terrascript.resource.hashicorp.aws import aws_eks_fargate_profile
from terrascript.resource.hashicorp.aws import aws_eks_identity_provider_config
from terrascript.resource.hashicorp.aws import aws_eks_node_group
from terrascript.resource.hashicorp.aws import aws_elastic_beanstalk_application
from terrascript.resource.hashicorp.aws import (
aws_elastic_beanstalk_application_version,
)
from terrascript.resource.hashicorp.aws import (
aws_elastic_beanstalk_configuration_template,
)
from terrascript.resource.hashicorp.aws import aws_elastic_beanstalk_environment
from terrascript.resource.hashicorp.aws import aws_elasticache_cluster
from terrascript.resource.hashicorp.aws import (
aws_elasticache_global_replication_group,
)
from terrascript.resource.hashicorp.aws import aws_elasticache_parameter_group
from terrascript.resource.hashicorp.aws import aws_elasticache_replication_group
from terrascript.resource.hashicorp.aws import aws_elasticache_security_group
from terrascript.resource.hashicorp.aws import aws_elasticache_subnet_group
from terrascript.resource.hashicorp.aws import aws_elasticache_user
from terrascript.resource.hashicorp.aws import aws_elasticache_user_group
from terrascript.resource.hashicorp.aws import aws_elasticsearch_domain
from terrascript.resource.hashicorp.aws import aws_elasticsearch_domain_policy
from terrascript.resource.hashicorp.aws import aws_elasticsearch_domain_saml_options
from terrascript.resource.hashicorp.aws import aws_elastictranscoder_pipeline
from terrascript.resource.hashicorp.aws import aws_elastictranscoder_preset
from terrascript.resource.hashicorp.aws import aws_elb
from terrascript.resource.hashicorp.aws import aws_elb_attachment
from terrascript.resource.hashicorp.aws import aws_emr_cluster
from terrascript.resource.hashicorp.aws import aws_emr_instance_fleet
from terrascript.resource.hashicorp.aws import aws_emr_instance_group
from terrascript.resource.hashicorp.aws import aws_emr_managed_scaling_policy
from terrascript.resource.hashicorp.aws import aws_emr_security_configuration
from terrascript.resource.hashicorp.aws import aws_flow_log
from terrascript.resource.hashicorp.aws import aws_fms_admin_account
from terrascript.resource.hashicorp.aws import aws_fms_policy
from terrascript.resource.hashicorp.aws import aws_fsx_backup
from terrascript.resource.hashicorp.aws import aws_fsx_lustre_file_system
from terrascript.resource.hashicorp.aws import aws_fsx_ontap_file_system
from terrascript.resource.hashicorp.aws import aws_fsx_windows_file_system
from terrascript.resource.hashicorp.aws import aws_gamelift_alias
from terrascript.resource.hashicorp.aws import aws_gamelift_build
from terrascript.resource.hashicorp.aws import aws_gamelift_fleet
from terrascript.resource.hashicorp.aws import aws_gamelift_game_session_queue
from terrascript.resource.hashicorp.aws import aws_glacier_vault
from terrascript.resource.hashicorp.aws import aws_glacier_vault_lock
from terrascript.resource.hashicorp.aws import aws_globalaccelerator_accelerator
from terrascript.resource.hashicorp.aws import aws_globalaccelerator_endpoint_group
from terrascript.resource.hashicorp.aws import aws_globalaccelerator_listener
from terrascript.resource.hashicorp.aws import aws_glue_catalog_database
from terrascript.resource.hashicorp.aws import aws_glue_catalog_table
from terrascript.resource.hashicorp.aws import aws_glue_classifier
from terrascript.resource.hashicorp.aws import aws_glue_connection
from terrascript.resource.hashicorp.aws import aws_glue_crawler
from terrascript.resource.hashicorp.aws import (
aws_glue_data_catalog_encryption_settings,
)
from terrascript.resource.hashicorp.aws import aws_glue_dev_endpoint
from terrascript.resource.hashicorp.aws import aws_glue_job
from terrascript.resource.hashicorp.aws import aws_glue_ml_transform
from terrascript.resource.hashicorp.aws import aws_glue_partition
from terrascript.resource.hashicorp.aws import aws_glue_registry
from terrascript.resource.hashicorp.aws import aws_glue_resource_policy
from terrascript.resource.hashicorp.aws import aws_glue_schema
from terrascript.resource.hashicorp.aws import aws_glue_security_configuration
from terrascript.resource.hashicorp.aws import aws_glue_trigger
from terrascript.resource.hashicorp.aws import aws_glue_user_defined_function
from terrascript.resource.hashicorp.aws import aws_glue_workflow
from terrascript.resource.hashicorp.aws import aws_guardduty_detector
from terrascript.resource.hashicorp.aws import aws_guardduty_filter
from terrascript.resource.hashicorp.aws import aws_guardduty_invite_accepter
from terrascript.resource.hashicorp.aws import aws_guardduty_ipset
from terrascript.resource.hashicorp.aws import aws_guardduty_member
from terrascript.resource.hashicorp.aws import (
aws_guardduty_organization_admin_account,
)
from terrascript.resource.hashicorp.aws import (
aws_guardduty_organization_configuration,
)
from terrascript.resource.hashicorp.aws import aws_guardduty_publishing_destination
from terrascript.resource.hashicorp.aws import aws_guardduty_threatintelset
from terrascript.resource.hashicorp.aws import aws_iam_access_key
from terrascript.resource.hashicorp.aws import aws_iam_account_alias
from terrascript.resource.hashicorp.aws import aws_iam_account_password_policy
from terrascript.resource.hashicorp.aws import aws_iam_group
from terrascript.resource.hashicorp.aws import aws_iam_group_membership
from terrascript.resource.hashicorp.aws import aws_iam_group_policy
from terrascript.resource.hashicorp.aws import aws_iam_group_policy_attachment
from terrascript.resource.hashicorp.aws import aws_iam_instance_profile
from terrascript.resource.hashicorp.aws import aws_iam_openid_connect_provider
from terrascript.resource.hashicorp.aws import aws_iam_policy
from terrascript.resource.hashicorp.aws import aws_iam_policy_attachment
from terrascript.resource.hashicorp.aws import aws_iam_role
from terrascript.resource.hashicorp.aws import aws_iam_role_policy
from terrascript.resource.hashicorp.aws import aws_iam_role_policy_attachment
from terrascript.resource.hashicorp.aws import aws_iam_saml_provider
from terrascript.resource.hashicorp.aws import aws_iam_server_certificate
from terrascript.resource.hashicorp.aws import aws_iam_service_linked_role
from terrascript.resource.hashicorp.aws import aws_iam_user
from terrascript.resource.hashicorp.aws import aws_iam_user_group_membership
from terrascript.resource.hashicorp.aws import aws_iam_user_login_profile
from terrascript.resource.hashicorp.aws import aws_iam_user_policy
from terrascript.resource.hashicorp.aws import aws_iam_user_policy_attachment
from terrascript.resource.hashicorp.aws import aws_iam_user_ssh_key
from terrascript.resource.hashicorp.aws import aws_imagebuilder_component
from terrascript.resource.hashicorp.aws import (
aws_imagebuilder_distribution_configuration,
)
from terrascript.resource.hashicorp.aws import aws_imagebuilder_image
from terrascript.resource.hashicorp.aws import aws_imagebuilder_image_pipeline
from terrascript.resource.hashicorp.aws import aws_imagebuilder_image_recipe
from terrascript.resource.hashicorp.aws import (
aws_imagebuilder_infrastructure_configuration,
)
from terrascript.resource.hashicorp.aws import aws_inspector_assessment_target
from terrascript.resource.hashicorp.aws import aws_inspector_assessment_template
from terrascript.resource.hashicorp.aws import aws_inspector_resource_group
from terrascript.resource.hashicorp.aws import aws_instance
from terrascript.resource.hashicorp.aws import aws_internet_gateway
from terrascript.resource.hashicorp.aws import aws_iot_certificate
from terrascript.resource.hashicorp.aws import aws_iot_policy
from terrascript.resource.hashicorp.aws import aws_iot_policy_attachment
from terrascript.resource.hashicorp.aws import aws_iot_role_alias
from terrascript.resource.hashicorp.aws import aws_iot_thing
from terrascript.resource.hashicorp.aws import aws_iot_thing_principal_attachment
from terrascript.resource.hashicorp.aws import aws_iot_thing_type
from terrascript.resource.hashicorp.aws import aws_iot_topic_rule
from terrascript.resource.hashicorp.aws import aws_key_pair
from terrascript.resource.hashicorp.aws import aws_kinesis_analytics_application
from terrascript.resource.hashicorp.aws import aws_kinesis_firehose_delivery_stream
from terrascript.resource.hashicorp.aws import aws_kinesis_stream
from terrascript.resource.hashicorp.aws import aws_kinesis_stream_consumer
from terrascript.resource.hashicorp.aws import aws_kinesis_video_stream
from terrascript.resource.hashicorp.aws import aws_kinesisanalyticsv2_application
from terrascript.resource.hashicorp.aws import (
aws_kinesisanalyticsv2_application_snapshot,
)
from terrascript.resource.hashicorp.aws import aws_kms_alias
from terrascript.resource.hashicorp.aws import aws_kms_ciphertext
from terrascript.resource.hashicorp.aws import aws_kms_external_key
from terrascript.resource.hashicorp.aws import aws_kms_grant
from terrascript.resource.hashicorp.aws import aws_kms_key
from terrascript.resource.hashicorp.aws import aws_lakeformation_data_lake_settings
from terrascript.resource.hashicorp.aws import aws_lakeformation_permissions
from terrascript.resource.hashicorp.aws import aws_lakeformation_resource
from terrascript.resource.hashicorp.aws import aws_lambda_alias
from terrascript.resource.hashicorp.aws import aws_lambda_code_signing_config
from terrascript.resource.hashicorp.aws import aws_lambda_event_source_mapping
from terrascript.resource.hashicorp.aws import aws_lambda_function
from terrascript.resource.hashicorp.aws import (
aws_lambda_function_event_invoke_config,
)
from terrascript.resource.hashicorp.aws import aws_lambda_layer_version
from terrascript.resource.hashicorp.aws import aws_lambda_permission
from terrascript.resource.hashicorp.aws import (
aws_lambda_provisioned_concurrency_config,
)
from terrascript.resource.hashicorp.aws import aws_launch_configuration
from terrascript.resource.hashicorp.aws import aws_launch_template
from terrascript.resource.hashicorp.aws import aws_lb
from terrascript.resource.hashicorp.aws import aws_lb_cookie_stickiness_policy
from terrascript.resource.hashicorp.aws import aws_lb_listener
from terrascript.resource.hashicorp.aws import aws_lb_listener_certificate
from terrascript.resource.hashicorp.aws import aws_lb_listener_rule
from terrascript.resource.hashicorp.aws import aws_lb_ssl_negotiation_policy
from terrascript.resource.hashicorp.aws import aws_lb_target_group
from terrascript.resource.hashicorp.aws import aws_lb_target_group_attachment
from terrascript.resource.hashicorp.aws import aws_lex_bot
from terrascript.resource.hashicorp.aws import aws_lex_bot_alias
from terrascript.resource.hashicorp.aws import aws_lex_intent
from terrascript.resource.hashicorp.aws import aws_lex_slot_type
from terrascript.resource.hashicorp.aws import aws_licensemanager_association
from terrascript.resource.hashicorp.aws import (
aws_licensemanager_license_configuration,
)
from terrascript.resource.hashicorp.aws import aws_lightsail_domain
from terrascript.resource.hashicorp.aws import aws_lightsail_instance
from terrascript.resource.hashicorp.aws import aws_lightsail_instance_public_ports
from terrascript.resource.hashicorp.aws import aws_lightsail_key_pair
from terrascript.resource.hashicorp.aws import aws_lightsail_static_ip
from terrascript.resource.hashicorp.aws import aws_lightsail_static_ip_attachment
from terrascript.resource.hashicorp.aws import (
aws_load_balancer_backend_server_policy,
)
from terrascript.resource.hashicorp.aws import aws_load_balancer_listener_policy
from terrascript.resource.hashicorp.aws import aws_load_balancer_policy
from terrascript.resource.hashicorp.aws import aws_macie2_account
from terrascript.resource.hashicorp.aws import aws_macie2_classification_job
from terrascript.resource.hashicorp.aws import aws_macie2_custom_data_identifier
from terrascript.resource.hashicorp.aws import aws_macie2_findings_filter
from terrascript.resource.hashicorp.aws import aws_macie2_invitation_accepter
from terrascript.resource.hashicorp.aws import aws_macie2_member
from terrascript.resource.hashicorp.aws import aws_macie2_organization_admin_account
from terrascript.resource.hashicorp.aws import aws_macie_member_account_association
from terrascript.resource.hashicorp.aws import aws_macie_s3_bucket_association
from terrascript.resource.hashicorp.aws import aws_main_route_table_association
from terrascript.resource.hashicorp.aws import aws_media_convert_queue
from terrascript.resource.hashicorp.aws import aws_media_package_channel
from terrascript.resource.hashicorp.aws import aws_media_store_container
from terrascript.resource.hashicorp.aws import aws_media_store_container_policy
from terrascript.resource.hashicorp.aws import aws_mq_broker
from terrascript.resource.hashicorp.aws import aws_mq_configuration
from terrascript.resource.hashicorp.aws import aws_msk_cluster
from terrascript.resource.hashicorp.aws import aws_msk_configuration
from terrascript.resource.hashicorp.aws import aws_msk_scram_secret_association
from terrascript.resource.hashicorp.aws import aws_mwaa_environment
from terrascript.resource.hashicorp.aws import aws_nat_gateway
from terrascript.resource.hashicorp.aws import aws_neptune_cluster
from terrascript.resource.hashicorp.aws import aws_neptune_cluster_endpoint
from terrascript.resource.hashicorp.aws import aws_neptune_cluster_instance
from terrascript.resource.hashicorp.aws import aws_neptune_cluster_parameter_group
from terrascript.resource.hashicorp.aws import aws_neptune_cluster_snapshot
from terrascript.resource.hashicorp.aws import aws_neptune_event_subscription
from terrascript.resource.hashicorp.aws import aws_neptune_parameter_group
from terrascript.resource.hashicorp.aws import aws_neptune_subnet_group
from terrascript.resource.hashicorp.aws import aws_network_acl
from terrascript.resource.hashicorp.aws import aws_network_acl_rule
from terrascript.resource.hashicorp.aws import aws_network_interface
from terrascript.resource.hashicorp.aws import aws_network_interface_attachment
from terrascript.resource.hashicorp.aws import aws_network_interface_sg_attachment
from terrascript.resource.hashicorp.aws import aws_networkfirewall_firewall
from terrascript.resource.hashicorp.aws import aws_networkfirewall_firewall_policy
from terrascript.resource.hashicorp.aws import (
aws_networkfirewall_logging_configuration,
)
from terrascript.resource.hashicorp.aws import aws_networkfirewall_resource_policy
from terrascript.resource.hashicorp.aws import aws_networkfirewall_rule_group
from terrascript.resource.hashicorp.aws import aws_opsworks_application
from terrascript.resource.hashicorp.aws import aws_opsworks_custom_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_ganglia_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_haproxy_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_instance
from terrascript.resource.hashicorp.aws import aws_opsworks_java_app_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_memcached_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_mysql_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_nodejs_app_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_permission
from terrascript.resource.hashicorp.aws import aws_opsworks_php_app_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_rails_app_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_rds_db_instance
from terrascript.resource.hashicorp.aws import aws_opsworks_stack
from terrascript.resource.hashicorp.aws import aws_opsworks_static_web_layer
from terrascript.resource.hashicorp.aws import aws_opsworks_user_profile
from terrascript.resource.hashicorp.aws import aws_organizations_account
from terrascript.resource.hashicorp.aws import (
aws_organizations_delegated_administrator,
)
from terrascript.resource.hashicorp.aws import aws_organizations_organization
from terrascript.resource.hashicorp.aws import aws_organizations_organizational_unit
from terrascript.resource.hashicorp.aws import aws_organizations_policy
from terrascript.resource.hashicorp.aws import aws_organizations_policy_attachment
from terrascript.resource.hashicorp.aws import aws_pinpoint_adm_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_apns_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_apns_sandbox_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_apns_voip_channel
from terrascript.resource.hashicorp.aws import (
aws_pinpoint_apns_voip_sandbox_channel,
)
from terrascript.resource.hashicorp.aws import aws_pinpoint_app
from terrascript.resource.hashicorp.aws import aws_pinpoint_baidu_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_email_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_event_stream
from terrascript.resource.hashicorp.aws import aws_pinpoint_gcm_channel
from terrascript.resource.hashicorp.aws import aws_pinpoint_sms_channel
from terrascript.resource.hashicorp.aws import aws_placement_group
from terrascript.resource.hashicorp.aws import aws_prometheus_workspace
from terrascript.resource.hashicorp.aws import aws_proxy_protocol_policy
from terrascript.resource.hashicorp.aws import aws_qldb_ledger
from terrascript.resource.hashicorp.aws import aws_quicksight_group
from terrascript.resource.hashicorp.aws import aws_quicksight_group_membership
from terrascript.resource.hashicorp.aws import aws_quicksight_user
from terrascript.resource.hashicorp.aws import aws_ram_principal_association
from terrascript.resource.hashicorp.aws import aws_ram_resource_association
from terrascript.resource.hashicorp.aws import aws_ram_resource_share
from terrascript.resource.hashicorp.aws import aws_ram_resource_share_accepter
from terrascript.resource.hashicorp.aws import aws_rds_cluster
from terrascript.resource.hashicorp.aws import aws_rds_cluster_endpoint
from terrascript.resource.hashicorp.aws import aws_rds_cluster_instance
from terrascript.resource.hashicorp.aws import aws_rds_cluster_parameter_group
from terrascript.resource.hashicorp.aws import aws_rds_cluster_role_association
from terrascript.resource.hashicorp.aws import aws_rds_global_cluster
from terrascript.resource.hashicorp.aws import aws_redshift_cluster
from terrascript.resource.hashicorp.aws import aws_redshift_event_subscription
from terrascript.resource.hashicorp.aws import aws_redshift_parameter_group
from terrascript.resource.hashicorp.aws import aws_redshift_security_group
from terrascript.resource.hashicorp.aws import aws_redshift_snapshot_copy_grant
from terrascript.resource.hashicorp.aws import aws_redshift_snapshot_schedule
from terrascript.resource.hashicorp.aws import (
aws_redshift_snapshot_schedule_association,
)
from terrascript.resource.hashicorp.aws import aws_redshift_subnet_group
from terrascript.resource.hashicorp.aws import aws_resourcegroups_group
from terrascript.resource.hashicorp.aws import aws_route
from terrascript.resource.hashicorp.aws import aws_route53_delegation_set
from terrascript.resource.hashicorp.aws import aws_route53_health_check
from terrascript.resource.hashicorp.aws import aws_route53_hosted_zone_dnssec
from terrascript.resource.hashicorp.aws import aws_route53_key_signing_key
from terrascript.resource.hashicorp.aws import aws_route53_query_log
from terrascript.resource.hashicorp.aws import aws_route53_record
from terrascript.resource.hashicorp.aws import aws_route53_resolver_dnssec_config
from terrascript.resource.hashicorp.aws import aws_route53_resolver_endpoint
from terrascript.resource.hashicorp.aws import aws_route53_resolver_firewall_config
from terrascript.resource.hashicorp.aws import (
aws_route53_resolver_firewall_domain_list,
)
from terrascript.resource.hashicorp.aws import aws_route53_resolver_firewall_rule
from terrascript.resource.hashicorp.aws import (
aws_route53_resolver_firewall_rule_group,
)
from terrascript.resource.hashicorp.aws import (
aws_route53_resolver_firewall_rule_group_association,
)
from terrascript.resource.hashicorp.aws import aws_route53_resolver_query_log_config
from terrascript.resource.hashicorp.aws import (
aws_route53_resolver_query_log_config_association,
)
from terrascript.resource.hashicorp.aws import aws_route53_resolver_rule
from terrascript.resource.hashicorp.aws import aws_route53_resolver_rule_association
from terrascript.resource.hashicorp.aws import (
aws_route53_vpc_association_authorization,
)
from terrascript.resource.hashicorp.aws import aws_route53_zone
from terrascript.resource.hashicorp.aws import aws_route53_zone_association
from terrascript.resource.hashicorp.aws import (
aws_route53recoverycontrolconfig_cluster,
)
from terrascript.resource.hashicorp.aws import (
aws_route53recoverycontrolconfig_control_panel,
)
from terrascript.resource.hashicorp.aws import (
aws_route53recoverycontrolconfig_routing_control,
)
from terrascript.resource.hashicorp.aws import (
aws_route53recoverycontrolconfig_safety_rule,
)
from terrascript.resource.hashicorp.aws import aws_route53recoveryreadiness_cell
from terrascript.resource.hashicorp.aws import (
aws_route53recoveryreadiness_readiness_check,
)
from terrascript.resource.hashicorp.aws import (
aws_route53recoveryreadiness_recovery_group,
)
from terrascript.resource.hashicorp.aws import (
aws_route53recoveryreadiness_resource_set,
)
from terrascript.resource.hashicorp.aws import aws_route_table
from terrascript.resource.hashicorp.aws import aws_route_table_association
from terrascript.resource.hashicorp.aws import aws_s3_access_point
from terrascript.resource.hashicorp.aws import aws_s3_account_public_access_block
from terrascript.resource.hashicorp.aws import aws_s3_bucket
from terrascript.resource.hashicorp.aws import aws_s3_bucket_analytics_configuration
from terrascript.resource.hashicorp.aws import aws_s3_bucket_inventory
from terrascript.resource.hashicorp.aws import aws_s3_bucket_metric
from terrascript.resource.hashicorp.aws import aws_s3_bucket_notification
from terrascript.resource.hashicorp.aws import aws_s3_bucket_object
from terrascript.resource.hashicorp.aws import aws_s3_bucket_ownership_controls
from terrascript.resource.hashicorp.aws import aws_s3_bucket_policy
from terrascript.resource.hashicorp.aws import aws_s3_bucket_public_access_block
from terrascript.resource.hashicorp.aws import aws_s3_object_copy
from terrascript.resource.hashicorp.aws import aws_s3control_bucket
from terrascript.resource.hashicorp.aws import (
aws_s3control_bucket_lifecycle_configuration,
)
from terrascript.resource.hashicorp.aws import aws_s3control_bucket_policy
from terrascript.resource.hashicorp.aws import aws_s3outposts_endpoint
from terrascript.resource.hashicorp.aws import aws_sagemaker_app
from terrascript.resource.hashicorp.aws import aws_sagemaker_app_image_config
from terrascript.resource.hashicorp.aws import aws_sagemaker_code_repository
from terrascript.resource.hashicorp.aws import aws_sagemaker_device_fleet
from terrascript.resource.hashicorp.aws import aws_sagemaker_domain
from terrascript.resource.hashicorp.aws import aws_sagemaker_endpoint
from terrascript.resource.hashicorp.aws import aws_sagemaker_endpoint_configuration
from terrascript.resource.hashicorp.aws import aws_sagemaker_feature_group
from terrascript.resource.hashicorp.aws import aws_sagemaker_flow_definition
from terrascript.resource.hashicorp.aws import aws_sagemaker_human_task_ui
from terrascript.resource.hashicorp.aws import aws_sagemaker_image
from terrascript.resource.hashicorp.aws import aws_sagemaker_image_version
from terrascript.resource.hashicorp.aws import aws_sagemaker_model
from terrascript.resource.hashicorp.aws import aws_sagemaker_model_package_group
from terrascript.resource.hashicorp.aws import aws_sagemaker_notebook_instance
from terrascript.resource.hashicorp.aws import (
aws_sagemaker_notebook_instance_lifecycle_configuration,
)
from terrascript.resource.hashicorp.aws import aws_sagemaker_user_profile
from terrascript.resource.hashicorp.aws import aws_sagemaker_workforce
from terrascript.resource.hashicorp.aws import aws_sagemaker_workteam
from terrascript.resource.hashicorp.aws import aws_schemas_discoverer
from terrascript.resource.hashicorp.aws import aws_schemas_registry
from terrascript.resource.hashicorp.aws import aws_schemas_schema
from terrascript.resource.hashicorp.aws import aws_secretsmanager_secret
from terrascript.resource.hashicorp.aws import aws_secretsmanager_secret_policy
from terrascript.resource.hashicorp.aws import aws_secretsmanager_secret_rotation
from terrascript.resource.hashicorp.aws import aws_secretsmanager_secret_version
from terrascript.resource.hashicorp.aws import aws_security_group
from terrascript.resource.hashicorp.aws import aws_security_group_rule
from terrascript.resource.hashicorp.aws import aws_securityhub_account
from terrascript.resource.hashicorp.aws import aws_securityhub_action_target
from terrascript.resource.hashicorp.aws import aws_securityhub_insight
from terrascript.resource.hashicorp.aws import aws_securityhub_invite_accepter
from terrascript.resource.hashicorp.aws import aws_securityhub_member
from terrascript.resource.hashicorp.aws import (
aws_securityhub_organization_admin_account,
)
from terrascript.resource.hashicorp.aws import (
aws_securityhub_organization_configuration,
)
from terrascript.resource.hashicorp.aws import aws_securityhub_product_subscription
from terrascript.resource.hashicorp.aws import aws_securityhub_standards_control
from terrascript.resource.hashicorp.aws import (
aws_securityhub_standards_subscription,
)
from terrascript.resource.hashicorp.aws import (
aws_serverlessapplicationrepository_cloudformation_stack,
)
from terrascript.resource.hashicorp.aws import aws_service_discovery_http_namespace
from terrascript.resource.hashicorp.aws import aws_service_discovery_instance
from terrascript.resource.hashicorp.aws import (
aws_service_discovery_private_dns_namespace,
)
from terrascript.resource.hashicorp.aws import (
aws_service_discovery_public_dns_namespace,
)
from terrascript.resource.hashicorp.aws import aws_service_discovery_service
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_budget_resource_association,
)
from terrascript.resource.hashicorp.aws import aws_servicecatalog_constraint
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_organizations_access,
)
from terrascript.resource.hashicorp.aws import aws_servicecatalog_portfolio
from terrascript.resource.hashicorp.aws import aws_servicecatalog_portfolio_share
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_principal_portfolio_association,
)
from terrascript.resource.hashicorp.aws import aws_servicecatalog_product
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_product_portfolio_association,
)
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_provisioned_product,
)
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_provisioning_artifact,
)
from terrascript.resource.hashicorp.aws import aws_servicecatalog_service_action
from terrascript.resource.hashicorp.aws import aws_servicecatalog_tag_option
from terrascript.resource.hashicorp.aws import (
aws_servicecatalog_tag_option_resource_association,
)
from terrascript.resource.hashicorp.aws import aws_servicequotas_service_quota
from terrascript.resource.hashicorp.aws import aws_ses_active_receipt_rule_set
from terrascript.resource.hashicorp.aws import aws_ses_configuration_set
from terrascript.resource.hashicorp.aws import aws_ses_domain_dkim
from terrascript.resource.hashicorp.aws import aws_ses_domain_identity
from terrascript.resource.hashicorp.aws import aws_ses_domain_identity_verification
from terrascript.resource.hashicorp.aws import aws_ses_domain_mail_from
from terrascript.resource.hashicorp.aws import aws_ses_email_identity
from terrascript.resource.hashicorp.aws import aws_ses_event_destination
from terrascript.resource.hashicorp.aws import aws_ses_identity_notification_topic
from terrascript.resource.hashicorp.aws import aws_ses_identity_policy
from terrascript.resource.hashicorp.aws import aws_ses_receipt_filter
from terrascript.resource.hashicorp.aws import aws_ses_receipt_rule
from terrascript.resource.hashicorp.aws import aws_ses_receipt_rule_set
from terrascript.resource.hashicorp.aws import aws_ses_template
from terrascript.resource.hashicorp.aws import aws_sfn_activity
from terrascript.resource.hashicorp.aws import aws_sfn_state_machine
from terrascript.resource.hashicorp.aws import aws_shield_protection
from terrascript.resource.hashicorp.aws import aws_shield_protection_group
from terrascript.resource.hashicorp.aws import aws_signer_signing_job
from terrascript.resource.hashicorp.aws import aws_signer_signing_profile
from terrascript.resource.hashicorp.aws import aws_signer_signing_profile_permission
from terrascript.resource.hashicorp.aws import aws_simpledb_domain
from terrascript.resource.hashicorp.aws import aws_snapshot_create_volume_permission
from terrascript.resource.hashicorp.aws import aws_sns_platform_application
from terrascript.resource.hashicorp.aws import aws_sns_sms_preferences
from terrascript.resource.hashicorp.aws import aws_sns_topic
from terrascript.resource.hashicorp.aws import aws_sns_topic_policy
from terrascript.resource.hashicorp.aws import aws_sns_topic_subscription
from terrascript.resource.hashicorp.aws import aws_spot_datafeed_subscription
from terrascript.resource.hashicorp.aws import aws_spot_fleet_request
from terrascript.resource.hashicorp.aws import aws_spot_instance_request
from terrascript.resource.hashicorp.aws import aws_sqs_queue
from terrascript.resource.hashicorp.aws import aws_sqs_queue_policy
from terrascript.resource.hashicorp.aws import aws_ssm_activation
from terrascript.resource.hashicorp.aws import aws_ssm_association
from terrascript.resource.hashicorp.aws import aws_ssm_document
from terrascript.resource.hashicorp.aws import aws_ssm_maintenance_window
from terrascript.resource.hashicorp.aws import aws_ssm_maintenance_window_target
from terrascript.resource.hashicorp.aws import aws_ssm_maintenance_window_task
from terrascript.resource.hashicorp.aws import aws_ssm_parameter
from terrascript.resource.hashicorp.aws import aws_ssm_patch_baseline
from terrascript.resource.hashicorp.aws import aws_ssm_patch_group
from terrascript.resource.hashicorp.aws import aws_ssm_resource_data_sync
from terrascript.resource.hashicorp.aws import aws_ssoadmin_account_assignment
from terrascript.resource.hashicorp.aws import (
aws_ssoadmin_managed_policy_attachment,
)
from terrascript.resource.hashicorp.aws import aws_ssoadmin_permission_set
from terrascript.resource.hashicorp.aws import (
aws_ssoadmin_permission_set_inline_policy,
)
from terrascript.resource.hashicorp.aws import aws_storagegateway_cache
from terrascript.resource.hashicorp.aws import (
aws_storagegateway_cached_iscsi_volume,
)
from terrascript.resource.hashicorp.aws import (
aws_storagegateway_file_system_association,
)
from terrascript.resource.hashicorp.aws import aws_storagegateway_gateway
from terrascript.resource.hashicorp.aws import aws_storagegateway_nfs_file_share
from terrascript.resource.hashicorp.aws import aws_storagegateway_smb_file_share
from terrascript.resource.hashicorp.aws import (
aws_storagegateway_stored_iscsi_volume,
)
from terrascript.resource.hashicorp.aws import aws_storagegateway_tape_pool
from terrascript.resource.hashicorp.aws import aws_storagegateway_upload_buffer
from terrascript.resource.hashicorp.aws import aws_storagegateway_working_storage
from terrascript.resource.hashicorp.aws import aws_subnet
from terrascript.resource.hashicorp.aws import aws_swf_domain
from terrascript.resource.hashicorp.aws import aws_synthetics_canary
from terrascript.resource.hashicorp.aws import aws_timestreamwrite_database
from terrascript.resource.hashicorp.aws import aws_timestreamwrite_table
from terrascript.resource.hashicorp.aws import aws_transfer_access
from terrascript.resource.hashicorp.aws import aws_transfer_server
from terrascript.resource.hashicorp.aws import aws_transfer_ssh_key
from terrascript.resource.hashicorp.aws import aws_transfer_user
from terrascript.resource.hashicorp.aws import aws_volume_attachment
from terrascript.resource.hashicorp.aws import aws_vpc
from terrascript.resource.hashicorp.aws import aws_vpc_dhcp_options
from terrascript.resource.hashicorp.aws import aws_vpc_dhcp_options_association
from terrascript.resource.hashicorp.aws import aws_vpc_endpoint
from terrascript.resource.hashicorp.aws import (
aws_vpc_endpoint_connection_notification,
)
from terrascript.resource.hashicorp.aws import (
aws_vpc_endpoint_route_table_association,
)
from terrascript.resource.hashicorp.aws import aws_vpc_endpoint_service
from terrascript.resource.hashicorp.aws import (
aws_vpc_endpoint_service_allowed_principal,
)
from terrascript.resource.hashicorp.aws import aws_vpc_endpoint_subnet_association
from terrascript.resource.hashicorp.aws import aws_vpc_ipv4_cidr_block_association
from terrascript.resource.hashicorp.aws import aws_vpc_peering_connection
from terrascript.resource.hashicorp.aws import aws_vpc_peering_connection_accepter
from terrascript.resource.hashicorp.aws import aws_vpc_peering_connection_options
from terrascript.resource.hashicorp.aws import aws_vpn_connection
from terrascript.resource.hashicorp.aws import aws_vpn_connection_route
from terrascript.resource.hashicorp.aws import aws_vpn_gateway
from terrascript.resource.hashicorp.aws import aws_vpn_gateway_attachment
from terrascript.resource.hashicorp.aws import aws_vpn_gateway_route_propagation
from terrascript.resource.hashicorp.aws import aws_waf_byte_match_set
from terrascript.resource.hashicorp.aws import aws_waf_geo_match_set
from terrascript.resource.hashicorp.aws import aws_waf_ipset
from terrascript.resource.hashicorp.aws import aws_waf_rate_based_rule
from terrascript.resource.hashicorp.aws import aws_waf_regex_match_set
from terrascript.resource.hashicorp.aws import aws_waf_regex_pattern_set
from terrascript.resource.hashicorp.aws import aws_waf_rule
from terrascript.resource.hashicorp.aws import aws_waf_rule_group
from terrascript.resource.hashicorp.aws import aws_waf_size_constraint_set
from terrascript.resource.hashicorp.aws import aws_waf_sql_injection_match_set
from terrascript.resource.hashicorp.aws import aws_waf_web_acl
from terrascript.resource.hashicorp.aws import aws_waf_xss_match_set
from terrascript.resource.hashicorp.aws import aws_wafregional_byte_match_set
from terrascript.resource.hashicorp.aws import aws_wafregional_geo_match_set
from terrascript.resource.hashicorp.aws import aws_wafregional_ipset
from terrascript.resource.hashicorp.aws import aws_wafregional_rate_based_rule
from terrascript.resource.hashicorp.aws import aws_wafregional_regex_match_set
from terrascript.resource.hashicorp.aws import aws_wafregional_regex_pattern_set
from terrascript.resource.hashicorp.aws import aws_wafregional_rule
from terrascript.resource.hashicorp.aws import aws_wafregional_rule_group
from terrascript.resource.hashicorp.aws import aws_wafregional_size_constraint_set
from terrascript.resource.hashicorp.aws import (
aws_wafregional_sql_injection_match_set,
)
from terrascript.resource.hashicorp.aws import aws_wafregional_web_acl
from terrascript.resource.hashicorp.aws import aws_wafregional_web_acl_association
from terrascript.resource.hashicorp.aws import aws_wafregional_xss_match_set
from terrascript.resource.hashicorp.aws import aws_wafv2_ip_set
from terrascript.resource.hashicorp.aws import aws_wafv2_regex_pattern_set
from terrascript.resource.hashicorp.aws import aws_wafv2_rule_group
from terrascript.resource.hashicorp.aws import aws_wafv2_web_acl
from terrascript.resource.hashicorp.aws import aws_wafv2_web_acl_association
from terrascript.resource.hashicorp.aws import (
aws_wafv2_web_acl_logging_configuration,
)
from terrascript.resource.hashicorp.aws import aws_worklink_fleet
from terrascript.resource.hashicorp.aws import (
aws_worklink_website_certificate_authority_association,
)
from terrascript.resource.hashicorp.aws import aws_workspaces_directory
from terrascript.resource.hashicorp.aws import aws_workspaces_ip_group
from terrascript.resource.hashicorp.aws import aws_workspaces_workspace
from terrascript.resource.hashicorp.aws import aws_xray_encryption_config
from terrascript.resource.hashicorp.aws import aws_xray_group
from terrascript.resource.hashicorp.aws import aws_xray_sampling_rule
def test_datasource_import():
from terrascript.data.hashicorp.aws import aws_acm_certificate
from terrascript.data.hashicorp.aws import aws_acmpca_certificate
from terrascript.data.hashicorp.aws import aws_acmpca_certificate_authority
from terrascript.data.hashicorp.aws import aws_alb
from terrascript.data.hashicorp.aws import aws_alb_listener
from terrascript.data.hashicorp.aws import aws_alb_target_group
from terrascript.data.hashicorp.aws import aws_ami
from terrascript.data.hashicorp.aws import aws_ami_ids
from terrascript.data.hashicorp.aws import aws_api_gateway_api_key
from terrascript.data.hashicorp.aws import aws_api_gateway_domain_name
from terrascript.data.hashicorp.aws import aws_api_gateway_resource
from terrascript.data.hashicorp.aws import aws_api_gateway_rest_api
from terrascript.data.hashicorp.aws import aws_api_gateway_vpc_link
from terrascript.data.hashicorp.aws import aws_apigatewayv2_api
from terrascript.data.hashicorp.aws import aws_apigatewayv2_apis
from terrascript.data.hashicorp.aws import aws_appmesh_mesh
from terrascript.data.hashicorp.aws import aws_appmesh_virtual_service
from terrascript.data.hashicorp.aws import aws_arn
from terrascript.data.hashicorp.aws import aws_autoscaling_group
from terrascript.data.hashicorp.aws import aws_autoscaling_groups
from terrascript.data.hashicorp.aws import aws_availability_zone
from terrascript.data.hashicorp.aws import aws_availability_zones
from terrascript.data.hashicorp.aws import aws_backup_plan
from terrascript.data.hashicorp.aws import aws_backup_selection
from terrascript.data.hashicorp.aws import aws_backup_vault
from terrascript.data.hashicorp.aws import aws_batch_compute_environment
from terrascript.data.hashicorp.aws import aws_batch_job_queue
from terrascript.data.hashicorp.aws import aws_billing_service_account
from terrascript.data.hashicorp.aws import aws_caller_identity
from terrascript.data.hashicorp.aws import aws_canonical_user_id
from terrascript.data.hashicorp.aws import aws_cloudformation_export
from terrascript.data.hashicorp.aws import aws_cloudformation_stack
from terrascript.data.hashicorp.aws import aws_cloudformation_type
from terrascript.data.hashicorp.aws import aws_cloudfront_cache_policy
from terrascript.data.hashicorp.aws import aws_cloudfront_distribution
from terrascript.data.hashicorp.aws import aws_cloudfront_function
from terrascript.data.hashicorp.aws import (
aws_cloudfront_log_delivery_canonical_user_id,
)
from terrascript.data.hashicorp.aws import aws_cloudfront_origin_request_policy
from terrascript.data.hashicorp.aws import aws_cloudhsm_v2_cluster
from terrascript.data.hashicorp.aws import aws_cloudtrail_service_account
from terrascript.data.hashicorp.aws import aws_cloudwatch_event_connection
from terrascript.data.hashicorp.aws import aws_cloudwatch_event_source
from terrascript.data.hashicorp.aws import aws_cloudwatch_log_group
from terrascript.data.hashicorp.aws import aws_cloudwatch_log_groups
from terrascript.data.hashicorp.aws import aws_codeartifact_authorization_token
from terrascript.data.hashicorp.aws import aws_codeartifact_repository_endpoint
from terrascript.data.hashicorp.aws import aws_codecommit_repository
from terrascript.data.hashicorp.aws import aws_codestarconnections_connection
from terrascript.data.hashicorp.aws import aws_cognito_user_pools
from terrascript.data.hashicorp.aws import aws_connect_contact_flow
from terrascript.data.hashicorp.aws import aws_connect_instance
from terrascript.data.hashicorp.aws import aws_cur_report_definition
from terrascript.data.hashicorp.aws import aws_customer_gateway
from terrascript.data.hashicorp.aws import aws_db_cluster_snapshot
from terrascript.data.hashicorp.aws import aws_db_event_categories
from terrascript.data.hashicorp.aws import aws_db_instance
from terrascript.data.hashicorp.aws import aws_db_snapshot
from terrascript.data.hashicorp.aws import aws_db_subnet_group
from terrascript.data.hashicorp.aws import aws_default_tags
from terrascript.data.hashicorp.aws import aws_directory_service_directory
from terrascript.data.hashicorp.aws import aws_docdb_engine_version
from terrascript.data.hashicorp.aws import aws_docdb_orderable_db_instance
from terrascript.data.hashicorp.aws import aws_dx_connection
from terrascript.data.hashicorp.aws import aws_dx_gateway
from terrascript.data.hashicorp.aws import aws_dx_location
from terrascript.data.hashicorp.aws import aws_dx_locations
from terrascript.data.hashicorp.aws import aws_dynamodb_table
from terrascript.data.hashicorp.aws import aws_ebs_default_kms_key
from terrascript.data.hashicorp.aws import aws_ebs_encryption_by_default
from terrascript.data.hashicorp.aws import aws_ebs_snapshot
from terrascript.data.hashicorp.aws import aws_ebs_snapshot_ids
from terrascript.data.hashicorp.aws import aws_ebs_volume
from terrascript.data.hashicorp.aws import aws_ebs_volumes
from terrascript.data.hashicorp.aws import aws_ec2_coip_pool
from terrascript.data.hashicorp.aws import aws_ec2_coip_pools
from terrascript.data.hashicorp.aws import aws_ec2_instance_type
from terrascript.data.hashicorp.aws import aws_ec2_instance_type_offering
from terrascript.data.hashicorp.aws import aws_ec2_instance_type_offerings
from terrascript.data.hashicorp.aws import aws_ec2_local_gateway
from terrascript.data.hashicorp.aws import aws_ec2_local_gateway_route_table
from terrascript.data.hashicorp.aws import aws_ec2_local_gateway_route_tables
from terrascript.data.hashicorp.aws import aws_ec2_local_gateway_virtual_interface
from terrascript.data.hashicorp.aws import (
aws_ec2_local_gateway_virtual_interface_group,
)
from terrascript.data.hashicorp.aws import (
aws_ec2_local_gateway_virtual_interface_groups,
)
from terrascript.data.hashicorp.aws import aws_ec2_local_gateways
from terrascript.data.hashicorp.aws import aws_ec2_managed_prefix_list
from terrascript.data.hashicorp.aws import aws_ec2_spot_price
from terrascript.data.hashicorp.aws import aws_ec2_transit_gateway
from terrascript.data.hashicorp.aws import (
aws_ec2_transit_gateway_dx_gateway_attachment,
)
from terrascript.data.hashicorp.aws import (
aws_ec2_transit_gateway_peering_attachment,
)
from terrascript.data.hashicorp.aws import aws_ec2_transit_gateway_route_table
from terrascript.data.hashicorp.aws import aws_ec2_transit_gateway_route_tables
from terrascript.data.hashicorp.aws import aws_ec2_transit_gateway_vpc_attachment
from terrascript.data.hashicorp.aws import aws_ec2_transit_gateway_vpn_attachment
from terrascript.data.hashicorp.aws import aws_ecr_authorization_token
from terrascript.data.hashicorp.aws import aws_ecr_image
from terrascript.data.hashicorp.aws import aws_ecr_repository
from terrascript.data.hashicorp.aws import aws_ecs_cluster
from terrascript.data.hashicorp.aws import aws_ecs_container_definition
from terrascript.data.hashicorp.aws import aws_ecs_service
from terrascript.data.hashicorp.aws import aws_ecs_task_definition
from terrascript.data.hashicorp.aws import aws_efs_access_point
from terrascript.data.hashicorp.aws import aws_efs_access_points
from terrascript.data.hashicorp.aws import aws_efs_file_system
from terrascript.data.hashicorp.aws import aws_efs_mount_target
from terrascript.data.hashicorp.aws import aws_eip
from terrascript.data.hashicorp.aws import aws_eks_addon
from terrascript.data.hashicorp.aws import aws_eks_cluster
from terrascript.data.hashicorp.aws import aws_eks_cluster_auth
from terrascript.data.hashicorp.aws import aws_eks_clusters
from terrascript.data.hashicorp.aws import aws_eks_node_group
from terrascript.data.hashicorp.aws import aws_eks_node_groups
from terrascript.data.hashicorp.aws import aws_elastic_beanstalk_application
from terrascript.data.hashicorp.aws import aws_elastic_beanstalk_hosted_zone
from terrascript.data.hashicorp.aws import aws_elastic_beanstalk_solution_stack
from terrascript.data.hashicorp.aws import aws_elasticache_cluster
from terrascript.data.hashicorp.aws import aws_elasticache_replication_group
from terrascript.data.hashicorp.aws import aws_elasticache_user
from terrascript.data.hashicorp.aws import aws_elasticsearch_domain
from terrascript.data.hashicorp.aws import aws_elb
from terrascript.data.hashicorp.aws import aws_elb_hosted_zone_id
from terrascript.data.hashicorp.aws import aws_elb_service_account
from terrascript.data.hashicorp.aws import aws_globalaccelerator_accelerator
from terrascript.data.hashicorp.aws import aws_glue_connection
from terrascript.data.hashicorp.aws import aws_glue_data_catalog_encryption_settings
from terrascript.data.hashicorp.aws import aws_glue_script
from terrascript.data.hashicorp.aws import aws_guardduty_detector
from terrascript.data.hashicorp.aws import aws_iam_account_alias
from terrascript.data.hashicorp.aws import aws_iam_group
from terrascript.data.hashicorp.aws import aws_iam_instance_profile
from terrascript.data.hashicorp.aws import aws_iam_policy
from terrascript.data.hashicorp.aws import aws_iam_policy_document
from terrascript.data.hashicorp.aws import aws_iam_role
from terrascript.data.hashicorp.aws import aws_iam_roles
from terrascript.data.hashicorp.aws import aws_iam_server_certificate
from terrascript.data.hashicorp.aws import aws_iam_session_context
from terrascript.data.hashicorp.aws import aws_iam_user
from terrascript.data.hashicorp.aws import aws_iam_users
from terrascript.data.hashicorp.aws import aws_identitystore_group
from terrascript.data.hashicorp.aws import aws_identitystore_user
from terrascript.data.hashicorp.aws import aws_imagebuilder_component
from terrascript.data.hashicorp.aws import (
aws_imagebuilder_distribution_configuration,
)
from terrascript.data.hashicorp.aws import aws_imagebuilder_image
from terrascript.data.hashicorp.aws import aws_imagebuilder_image_pipeline
from terrascript.data.hashicorp.aws import aws_imagebuilder_image_recipe
from terrascript.data.hashicorp.aws import (
aws_imagebuilder_infrastructure_configuration,
)
from terrascript.data.hashicorp.aws import aws_inspector_rules_packages
from terrascript.data.hashicorp.aws import aws_instance
from terrascript.data.hashicorp.aws import aws_instances
from terrascript.data.hashicorp.aws import aws_internet_gateway
from terrascript.data.hashicorp.aws import aws_iot_endpoint
from terrascript.data.hashicorp.aws import aws_ip_ranges
from terrascript.data.hashicorp.aws import aws_kinesis_stream
from terrascript.data.hashicorp.aws import aws_kinesis_stream_consumer
from terrascript.data.hashicorp.aws import aws_kms_alias
from terrascript.data.hashicorp.aws import aws_kms_ciphertext
from terrascript.data.hashicorp.aws import aws_kms_key
from terrascript.data.hashicorp.aws import aws_kms_public_key
from terrascript.data.hashicorp.aws import aws_kms_secret
from terrascript.data.hashicorp.aws import aws_kms_secrets
from terrascript.data.hashicorp.aws import aws_lakeformation_data_lake_settings
from terrascript.data.hashicorp.aws import aws_lakeformation_permissions
from terrascript.data.hashicorp.aws import aws_lakeformation_resource
from terrascript.data.hashicorp.aws import aws_lambda_alias
from terrascript.data.hashicorp.aws import aws_lambda_code_signing_config
from terrascript.data.hashicorp.aws import aws_lambda_function
from terrascript.data.hashicorp.aws import aws_lambda_invocation
from terrascript.data.hashicorp.aws import aws_lambda_layer_version
from terrascript.data.hashicorp.aws import aws_launch_configuration
from terrascript.data.hashicorp.aws import aws_launch_template
from terrascript.data.hashicorp.aws import aws_lb
from terrascript.data.hashicorp.aws import aws_lb_listener
from terrascript.data.hashicorp.aws import aws_lb_target_group
from terrascript.data.hashicorp.aws import aws_lex_bot
from terrascript.data.hashicorp.aws import aws_lex_bot_alias
from terrascript.data.hashicorp.aws import aws_lex_intent
from terrascript.data.hashicorp.aws import aws_lex_slot_type
from terrascript.data.hashicorp.aws import aws_mq_broker
from terrascript.data.hashicorp.aws import aws_msk_broker_nodes
from terrascript.data.hashicorp.aws import aws_msk_cluster
from terrascript.data.hashicorp.aws import aws_msk_configuration
from terrascript.data.hashicorp.aws import aws_msk_kafka_version
from terrascript.data.hashicorp.aws import aws_nat_gateway
from terrascript.data.hashicorp.aws import aws_neptune_engine_version
from terrascript.data.hashicorp.aws import aws_neptune_orderable_db_instance
from terrascript.data.hashicorp.aws import aws_network_acls
from terrascript.data.hashicorp.aws import aws_network_interface
from terrascript.data.hashicorp.aws import aws_network_interfaces
from terrascript.data.hashicorp.aws import (
aws_organizations_delegated_administrators,
)
from terrascript.data.hashicorp.aws import aws_organizations_delegated_services
from terrascript.data.hashicorp.aws import aws_organizations_organization
from terrascript.data.hashicorp.aws import aws_organizations_organizational_units
from terrascript.data.hashicorp.aws import aws_outposts_outpost
from terrascript.data.hashicorp.aws import aws_outposts_outpost_instance_type
from terrascript.data.hashicorp.aws import aws_outposts_outpost_instance_types
from terrascript.data.hashicorp.aws import aws_outposts_outposts
from terrascript.data.hashicorp.aws import aws_outposts_site
from terrascript.data.hashicorp.aws import aws_outposts_sites
from terrascript.data.hashicorp.aws import aws_partition
from terrascript.data.hashicorp.aws import aws_prefix_list
from terrascript.data.hashicorp.aws import aws_pricing_product
from terrascript.data.hashicorp.aws import aws_qldb_ledger
from terrascript.data.hashicorp.aws import aws_ram_resource_share
from terrascript.data.hashicorp.aws import aws_rds_certificate
from terrascript.data.hashicorp.aws import aws_rds_cluster
from terrascript.data.hashicorp.aws import aws_rds_engine_version
from terrascript.data.hashicorp.aws import aws_rds_orderable_db_instance
from terrascript.data.hashicorp.aws import aws_redshift_cluster
from terrascript.data.hashicorp.aws import aws_redshift_orderable_cluster
from terrascript.data.hashicorp.aws import aws_redshift_service_account
from terrascript.data.hashicorp.aws import aws_region
from terrascript.data.hashicorp.aws import aws_regions
from terrascript.data.hashicorp.aws import aws_resourcegroupstaggingapi_resources
from terrascript.data.hashicorp.aws import aws_route
from terrascript.data.hashicorp.aws import aws_route53_delegation_set
from terrascript.data.hashicorp.aws import aws_route53_resolver_endpoint
from terrascript.data.hashicorp.aws import aws_route53_resolver_rule
from terrascript.data.hashicorp.aws import aws_route53_resolver_rules
from terrascript.data.hashicorp.aws import aws_route53_zone
from terrascript.data.hashicorp.aws import aws_route_table
from terrascript.data.hashicorp.aws import aws_route_tables
from terrascript.data.hashicorp.aws import aws_s3_bucket
from terrascript.data.hashicorp.aws import aws_s3_bucket_object
from terrascript.data.hashicorp.aws import aws_s3_bucket_objects
from terrascript.data.hashicorp.aws import aws_sagemaker_prebuilt_ecr_image
from terrascript.data.hashicorp.aws import aws_secretsmanager_secret
from terrascript.data.hashicorp.aws import aws_secretsmanager_secret_rotation
from terrascript.data.hashicorp.aws import aws_secretsmanager_secret_version
from terrascript.data.hashicorp.aws import aws_security_group
from terrascript.data.hashicorp.aws import aws_security_groups
from terrascript.data.hashicorp.aws import (
aws_serverlessapplicationrepository_application,
)
from terrascript.data.hashicorp.aws import aws_service_discovery_dns_namespace
from terrascript.data.hashicorp.aws import aws_servicecatalog_constraint
from terrascript.data.hashicorp.aws import aws_servicecatalog_launch_paths
from terrascript.data.hashicorp.aws import aws_servicecatalog_portfolio
from terrascript.data.hashicorp.aws import aws_servicecatalog_portfolio_constraints
from terrascript.data.hashicorp.aws import aws_servicecatalog_product
from terrascript.data.hashicorp.aws import aws_servicequotas_service
from terrascript.data.hashicorp.aws import aws_servicequotas_service_quota
from terrascript.data.hashicorp.aws import aws_sfn_activity
from terrascript.data.hashicorp.aws import aws_sfn_state_machine
from terrascript.data.hashicorp.aws import aws_signer_signing_job
from terrascript.data.hashicorp.aws import aws_signer_signing_profile
from terrascript.data.hashicorp.aws import aws_sns_topic
from terrascript.data.hashicorp.aws import aws_sqs_queue
from terrascript.data.hashicorp.aws import aws_ssm_document
from terrascript.data.hashicorp.aws import aws_ssm_parameter
from terrascript.data.hashicorp.aws import aws_ssm_patch_baseline
from terrascript.data.hashicorp.aws import aws_ssoadmin_instances
from terrascript.data.hashicorp.aws import aws_ssoadmin_permission_set
from terrascript.data.hashicorp.aws import aws_storagegateway_local_disk
from terrascript.data.hashicorp.aws import aws_subnet
from terrascript.data.hashicorp.aws import aws_subnet_ids
from terrascript.data.hashicorp.aws import aws_subnets
from terrascript.data.hashicorp.aws import aws_transfer_server
from terrascript.data.hashicorp.aws import aws_vpc
from terrascript.data.hashicorp.aws import aws_vpc_dhcp_options
from terrascript.data.hashicorp.aws import aws_vpc_endpoint
from terrascript.data.hashicorp.aws import aws_vpc_endpoint_service
from terrascript.data.hashicorp.aws import aws_vpc_peering_connection
from terrascript.data.hashicorp.aws import aws_vpc_peering_connections
from terrascript.data.hashicorp.aws import aws_vpcs
from terrascript.data.hashicorp.aws import aws_vpn_gateway
from terrascript.data.hashicorp.aws import aws_waf_ipset
from terrascript.data.hashicorp.aws import aws_waf_rate_based_rule
from terrascript.data.hashicorp.aws import aws_waf_rule
from terrascript.data.hashicorp.aws import aws_waf_web_acl
from terrascript.data.hashicorp.aws import aws_wafregional_ipset
from terrascript.data.hashicorp.aws import aws_wafregional_rate_based_rule
from terrascript.data.hashicorp.aws import aws_wafregional_rule
from terrascript.data.hashicorp.aws import aws_wafregional_web_acl
from terrascript.data.hashicorp.aws import aws_wafv2_ip_set
from terrascript.data.hashicorp.aws import aws_wafv2_regex_pattern_set
from terrascript.data.hashicorp.aws import aws_wafv2_rule_group
from terrascript.data.hashicorp.aws import aws_wafv2_web_acl
from terrascript.data.hashicorp.aws import aws_workspaces_bundle
from terrascript.data.hashicorp.aws import aws_workspaces_directory
from terrascript.data.hashicorp.aws import aws_workspaces_image
from terrascript.data.hashicorp.aws import aws_workspaces_workspace
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.hashicorp.aws
#
# t = terrascript.provider.hashicorp.aws.aws()
# s = str(t)
#
# assert 'https://github.com/hashicorp/terraform-provider-aws' in s
# assert '3.60.0' in s
|
UnityPy/classes/PPtr.py | yvsdrop/UnityPy | 313 | 12762095 | from ..files import ObjectReader
from ..streams import EndianBinaryWriter
from ..helpers import ImportHelper
from .. import files
from ..enums import FileType, ClassIDType
import os
from .. import environment
def save_ptr(obj, writer: EndianBinaryWriter):
if isinstance(obj, PPtr):
writer.write_int(obj.file_id)
else:
writer.write_int(0) # it's usually 0......
if obj._version < 14:
writer.write_int(obj.path_id)
else:
writer.write_long(obj.path_id)
cached_managers = dict()
class PPtr:
def __init__(self, reader: ObjectReader):
self._version = reader.version2
self.index = -2
self.file_id = reader.read_int()
self.path_id = reader.read_int() if self._version < 14 else reader.read_long()
self.assets_file = reader.assets_file
self._obj = None
def save(self, writer: EndianBinaryWriter):
save_ptr(self, writer)
def get_obj(self):
if self._obj != None:
return self._obj
manager = None
if self.file_id == 0:
manager = self.assets_file
elif self.file_id > 0 and self.file_id - 1 < len(self.assets_file.externals):
if self.index == -2:
external_name = self.assets_file.externals[self.file_id - 1].name
parent = self.assets_file.parent
if parent is not None:
if external_name in parent.files:
manager = parent.files[external_name]
elif external_name.upper() in parent.files:
manager = parent.files[external_name.upper()]
else:
while not isinstance(parent, environment.Environment):
parent = parent.parent
if parent.path:
path = parent.path
files = os.listdir(path)
if external_name in files:
parent.load_files([os.path.join(path, external_name)])
manager = parent.files[external_name]
else:
if external_name not in cached_managers:
typ, reader = ImportHelper.check_file_type(external_name)
if typ == FileType.AssetsFile:
cached_managers[external_name] = files.SerializedFile(reader)
if external_name in cached_managers:
manager = cached_managers[external_name]
if manager and self.path_id in manager.objects:
self._obj = manager.objects[self.path_id]
else:
self._obj = None
return self._obj
def __getattr__(self, key):
obj = self.get_obj()
if obj is None:
if key == "type":
return ClassIDType.UnknownType
raise AttributeError(key)
return getattr(obj, key)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self._obj.__class__.__repr__(self.get_obj()) if self.get_obj() else "Not Found")
def __bool__(self):
return True if self.get_obj() else False
|
tests/test_bql.py | almartin82/bayeslite | 964 | 12762100 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import StringIO
import apsw
import pytest
import struct
import bayeslite
import bayeslite.ast as ast
import bayeslite.compiler as compiler
import bayeslite.core as core
import bayeslite.guess as guess
import bayeslite.backends.troll_rng as troll
import bayeslite.parse as parse
from bayeslite.exception import BQLError
from bayeslite.math_util import relerr
from bayeslite.backends.cgpm_backend import CGPM_Backend
from bayeslite.util import cursor_value
import test_core
import test_csv
from stochastic import stochastic
def bql2sql(string, setup=None):
with bayeslite.bayesdb_open(':memory:') as bdb:
test_core.t1_schema(bdb)
test_core.t1_data(bdb)
bdb.execute('''
create population p1 for t1 (
id ignore;
label nominal;
age numerical;
weight numerical
)
''')
if setup is not None:
setup(bdb)
phrases = parse.parse_bql_string(string)
out = compiler.Output(0, {}, ())
for phrase in phrases:
assert ast.is_query(phrase)
compiler.compile_query(bdb, phrase, out)
out.write(';')
return out.getvalue()
# XXX Kludgey mess. Please reorganize.
def bql2sqlparam(string):
with bayeslite.bayesdb_open(':memory:') as bdb:
test_core.t1_schema(bdb)
test_core.t1_data(bdb)
bdb.execute('''
create population p1 for t1 (
id ignore;
label nominal;
age numerical;
weight numerical
)
''')
phrases = parse.parse_bql_string(string)
out0 = StringIO.StringIO()
for phrase in phrases:
out = None
if isinstance(phrase, ast.Parametrized):
bindings = (None,) * phrase.n_numpar
out = compiler.Output(phrase.n_numpar, phrase.nampar_map,
bindings)
phrase = phrase.phrase
else:
out = StringIO.StringIO()
assert ast.is_query(phrase)
compiler.compile_query(bdb, phrase, out)
# XXX Do something about the parameters.
out0.write(out.getvalue())
out0.write(';')
return out0.getvalue()
def bql_execute(bdb, string, bindings=()):
return map(tuple, bdb.execute(string, bindings))
def empty(cursor):
assert cursor is not None
assert cursor.description is not None
assert len(cursor.description) == 0
with pytest.raises(StopIteration):
cursor.next()
def test_trivial_population():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
# XXX if (not) exists
bdb.execute('''
create population p for t (
guess stattypes of (*);
age numerical
)
''')
bdb.execute('drop population p')
def test_population_invalid_numerical():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
with pytest.raises(BQLError):
bdb.execute('''
create population p for t (
guess stattypes of (*);
gender numerical
)
''')
def test_population_invalid_numerical_alterpop_addvar():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
bdb.execute('''
create population p for t (
guess stattypes of (*);
ignore gender
)
''')
with pytest.raises(BQLError):
bdb.execute('alter population p add variable gender numerical')
bdb.execute('drop population p')
def test_population_invalid_numerical_alterpop_stattype():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
bdb.execute('''
create population p for t (
guess stattypes of (*);
gender nominal
)
''')
with pytest.raises(BQLError):
bdb.execute('''
alter population p set stattype of gender to numerical
''')
bdb.execute('drop population p')
def test_similarity_identity():
with test_core.t1() as (bdb, population_id, _generator_id):
bdb.execute('initialize 6 models for p1_cc;')
rowids = bdb.sql_execute('select rowid from t1')
for rowid in rowids:
c = bdb.execute('''
estimate similarity of (rowid=?) to (rowid=?)
in the context of age by p1
''', (rowid[0], rowid[0])).fetchall()
assert len(c) == 1
assert c[0][0] == 1
def test_predictive_relevance():
assert bql2sql('''
estimate predictive relevance
of (label = 'Uganda')
to existing rows (rowid < 4)
and hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "weight"
by p1
''') == \
'SELECT bql_row_predictive_relevance(1, NULL, NULL, ' \
'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'Uganda\')), '\
'\'[1, 2, 3]\', 3, '\
'2, 82, 3, 14, NULL, 2, 74, 1, \'Europe\', 3, 7, NULL);'
assert bql2sql('''
estimate predictive relevance
of (label = 'mumble')
to existing rows (label = 'frotz' or age <= 4)
in the context of "label"
by p1
''') == \
'SELECT bql_row_predictive_relevance(1, NULL, NULL, ' \
'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'mumble\')), '\
'\'[5, 8]\', 1);'
assert bql2sql('''
estimate label,
predictive relevance
to hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'hunf', "weight" = 7)
)
in the context of "age",
_rowid_ + 1
from p1
''') == \
'SELECT "label", bql_row_predictive_relevance(1, NULL, NULL, _rowid_, '\
'\'[]\', 2, 2, 82, 3, 14, NULL, 2, 74, 1, \'hunf\', 3, 7, NULL), '\
'("_rowid_" + 1) FROM "t1";'
# No matching rows should still compile.
assert bql2sql('''
estimate label,
predictive relevance to existing rows (rowid < 0)
in the context of "age"
from p1
''') == \
'SELECT "label", bql_row_predictive_relevance(1, NULL, NULL, _rowid_, '\
'\'[]\', 2) FROM "t1";'
# When using `BY`, require OF to be specified.
with pytest.raises(BQLError):
bql2sql('''
estimate predictive relevance
to hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "age"
by p1
''')
# When using `FROM`, require OF to be unspecified.
with pytest.raises(BQLError):
bql2sql('''
estimate predictive relevance
of (name = 'mansour')
to hypothetical rows with values (
("age" = 82, "weight" = 14)
)
in the context of "age"
from p1
''')
assert bql2sql('''
estimate label from p1
where
(predictive relevance to existing rows (label = 'quux' and age < 5)
in the context of "weight") > 1
order by
predictive relevance
to hypothetical rows with values ((label='zot'))
in the context of "age"
''') == \
'SELECT "label" FROM "t1" WHERE '\
'(bql_row_predictive_relevance(1, NULL, NULL, '\
'_rowid_, \'[5]\', 3) > 1) '\
'ORDER BY bql_row_predictive_relevance(1, NULL, NULL, '\
'_rowid_, \'[]\', 2, 1, \'zot\', NULL);'
@stochastic(max_runs=2, min_passes=1)
def test_conditional_probability(seed):
with test_core.t1(seed=seed) as (bdb, _population_id, _generator_id):
bdb.execute('drop generator p1_cc')
bdb.execute('drop population p1')
bdb.execute('''
create population p1 for t1 (
ignore id, label;
set stattype of age to numerical;
set stattype of weight to numerical
)
''')
bdb.execute('''
create generator p1_cond_prob_cc for p1;
''')
bdb.execute('initialize 1 model for p1_cond_prob_cc')
bdb.execute('alter generator p1_cond_prob_cc '
'ensure variables * dependent')
bdb.execute('analyze p1_cond_prob_cc for 1 iteration')
q0 = 'estimate probability density of age = 8 by p1'
q1 = 'estimate probability density of age = 8 given () by p1'
age_is_8 = bdb.execute(q0).fetchvalue()
assert age_is_8 == bdb.execute(q1).fetchvalue()
q2 = 'estimate probability density of age = 8 given (weight = 16)' \
' by p1'
age_is_8_given_weight_is_16 = bdb.execute(q2).fetchvalue()
assert age_is_8 < age_is_8_given_weight_is_16
probs = bdb.execute(
'estimate probability density of value 8 given (weight = 16)'
' from columns of p1 where v.name != \'weight\'').fetchall()
assert [(age_is_8_given_weight_is_16,)] == probs
@stochastic(max_runs=2, min_passes=1)
def test_joint_probability(seed):
with test_core.t1(seed=seed) as (bdb, _population_id, _generator_id):
bdb.execute('initialize 10 models for p1_cc')
bdb.execute('analyze p1_cc for 10 iterations')
q0 = 'estimate probability density of age = 8 by p1'
q1 = 'estimate probability density of (age = 8) by p1'
assert bdb.execute(q0).fetchvalue() == bdb.execute(q1).fetchvalue()
q1 = 'estimate probability density of (age = 8) given () by p1'
assert bdb.execute(q0).fetchvalue() == bdb.execute(q1).fetchvalue()
q2 = 'estimate probability density of age = 8 given (weight = 16)' \
' by p1'
assert bdb.execute(q0).fetchvalue() < bdb.execute(q2).fetchvalue()
q0 = 'estimate probability density of age = 8 by p1'
q1 = 'estimate probability density of (age = 8, weight = 16) by p1'
assert bdb.execute(q1).fetchvalue() < bdb.execute(q0).fetchvalue()
q2 = 'estimate probability density of (age = 8, weight = 16)' \
" given (label = 'mumble') by p1"
assert bdb.execute(q1).fetchvalue() < bdb.execute(q2).fetchvalue()
def test_badbql():
with test_core.t1() as (bdb, _population_id, _generator_id):
with pytest.raises(ValueError):
bdb.execute('')
with pytest.raises(ValueError):
bdb.execute(';')
with pytest.raises(ValueError):
bdb.execute('select 0; select 1')
def test_select_trivial():
assert bql2sql('select null;') == 'SELECT NULL;'
assert bql2sql("select 'x';") == "SELECT 'x';"
assert bql2sql("select 'x''y';") == "SELECT 'x''y';"
assert bql2sql('select "x";') == 'SELECT "x";'
assert bql2sql('select "x""y";') == 'SELECT "x""y";'
assert bql2sql('select 0;') == 'SELECT 0;'
assert bql2sql('select 0.;') == 'SELECT 0.0;'
assert bql2sql('select .0;') == 'SELECT 0.0;'
assert bql2sql('select 0.0;') == 'SELECT 0.0;'
assert bql2sql('select 1e0;') == 'SELECT 1.0;'
assert bql2sql('select 1e+1;') == 'SELECT 10.0;'
assert bql2sql('select 1e-1;') == 'SELECT 0.1;'
assert bql2sql('select -1e+1;') == 'SELECT (- 10.0);'
assert bql2sql('select +1e-1;') == 'SELECT (+ 0.1);'
assert bql2sql('select SQRT(1-EXP(-2*value)) FROM bm_mi;') == \
'SELECT "SQRT"((1 - "EXP"(((- 2) * "value")))) FROM "bm_mi";'
assert bql2sql('select .1e0;') == 'SELECT 0.1;'
assert bql2sql('select 1.e10;') == 'SELECT 10000000000.0;'
assert bql2sql('select all 0;') == 'SELECT 0;'
assert bql2sql('select distinct 0;') == 'SELECT DISTINCT 0;'
assert bql2sql('select 0 as z;') == 'SELECT 0 AS "z";'
assert bql2sql('select * from t;') == 'SELECT * FROM "t";'
assert bql2sql('select t.* from t;') == 'SELECT "t".* FROM "t";'
assert bql2sql('select c from t;') == 'SELECT "c" FROM "t";'
assert bql2sql('select c as d from t;') == 'SELECT "c" AS "d" FROM "t";'
assert bql2sql('select t.c as d from t;') == \
'SELECT "t"."c" AS "d" FROM "t";'
assert bql2sql('select t.c as d, p as q, x from t;') == \
'SELECT "t"."c" AS "d", "p" AS "q", "x" FROM "t";'
assert bql2sql('select * from t, u;') == 'SELECT * FROM "t", "u";'
assert bql2sql('select * from t as u;') == 'SELECT * FROM "t" AS "u";'
assert bql2sql('select * from (select 0);') == 'SELECT * FROM (SELECT 0);'
assert bql2sql('select t.c from (select d as c from u) as t;') == \
'SELECT "t"."c" FROM (SELECT "d" AS "c" FROM "u") AS "t";'
assert bql2sql('select * where x;') == 'SELECT * WHERE "x";'
assert bql2sql('select * from t where x;') == \
'SELECT * FROM "t" WHERE "x";'
assert bql2sql('select * group by x;') == 'SELECT * GROUP BY "x";'
assert bql2sql('select * from t where x group by y;') == \
'SELECT * FROM "t" WHERE "x" GROUP BY "y";'
assert bql2sql('select * from t where x group by y, z;') == \
'SELECT * FROM "t" WHERE "x" GROUP BY "y", "z";'
assert bql2sql('select * from t where x group by y having sum(z) < 1') == \
'SELECT * FROM "t" WHERE "x" GROUP BY "y" HAVING ("sum"("z") < 1);'
assert bql2sql('select * order by x;') == 'SELECT * ORDER BY "x";'
assert bql2sql('select * order by x asc;') == 'SELECT * ORDER BY "x";'
assert bql2sql('select * order by x desc;') == \
'SELECT * ORDER BY "x" DESC;'
assert bql2sql('select * order by x, y;') == 'SELECT * ORDER BY "x", "y";'
assert bql2sql('select * order by x desc, y;') == \
'SELECT * ORDER BY "x" DESC, "y";'
assert bql2sql('select * order by x, y asc;') == \
'SELECT * ORDER BY "x", "y";'
assert bql2sql('select * limit 32;') == 'SELECT * LIMIT 32;'
assert bql2sql('select * limit 32 offset 16;') == \
'SELECT * LIMIT 32 OFFSET 16;'
assert bql2sql('select * limit 16, 32;') == 'SELECT * LIMIT 32 OFFSET 16;'
assert bql2sql('select (select0);') == 'SELECT "select0";'
assert bql2sql('select (select 0);') == 'SELECT (SELECT 0);'
assert bql2sql('select f(f(), f(x), y);') == \
'SELECT "f"("f"(), "f"("x"), "y");'
assert bql2sql('select a and b or c or not d is e is not f like j;') == \
'SELECT ((("a" AND "b") OR "c") OR' \
+ ' (NOT ((("d" IS "e") IS NOT "f") LIKE "j")));'
assert bql2sql('select a like b not like c like d escape e;') == \
'SELECT ((("a" LIKE "b") NOT LIKE "c") LIKE "d" ESCAPE "e");'
assert bql2sql('select a like b escape c glob d not glob e;') == \
'SELECT ((("a" LIKE "b" ESCAPE "c") GLOB "d") NOT GLOB "e");'
assert bql2sql('select a not glob b glob c escape d;') == \
'SELECT (("a" NOT GLOB "b") GLOB "c" ESCAPE "d");'
assert bql2sql('select a glob b escape c regexp e not regexp f;') == \
'SELECT ((("a" GLOB "b" ESCAPE "c") REGEXP "e") NOT REGEXP "f");'
assert bql2sql('select a not regexp b regexp c escape d;') == \
'SELECT (("a" NOT REGEXP "b") REGEXP "c" ESCAPE "d");'
assert bql2sql('select a regexp b escape c not regexp d escape e;') == \
'SELECT (("a" REGEXP "b" ESCAPE "c") NOT REGEXP "d" ESCAPE "e");'
assert bql2sql('select a not regexp b escape c match e not match f;') == \
'SELECT ((("a" NOT REGEXP "b" ESCAPE "c") MATCH "e") NOT MATCH "f");'
assert bql2sql('select a not match b match c escape d;') == \
'SELECT (("a" NOT MATCH "b") MATCH "c" ESCAPE "d");'
assert bql2sql('select a match b escape c not match d escape e;') == \
'SELECT (("a" MATCH "b" ESCAPE "c") NOT MATCH "d" ESCAPE "e");'
assert bql2sql('select a not match b escape c between d and e;') == \
'SELECT (("a" NOT MATCH "b" ESCAPE "c") BETWEEN "d" AND "e");'
assert bql2sql('select a between b and c and d;') == \
'SELECT (("a" BETWEEN "b" AND "c") AND "d");'
assert bql2sql('select a like b like c escape d between e and f;') == \
'SELECT ((("a" LIKE "b") LIKE "c" ESCAPE "d") BETWEEN "e" AND "f");'
assert bql2sql('select a between b and c not between d and e;') == \
'SELECT (("a" BETWEEN "b" AND "c") NOT BETWEEN "d" AND "e");'
assert bql2sql('select a not between b and c in (select f);') == \
'SELECT (("a" NOT BETWEEN "b" AND "c") IN (SELECT "f"));'
assert bql2sql('select a in (select b) and c not in (select d);') == \
'SELECT (("a" IN (SELECT "b")) AND ("c" NOT IN (SELECT "d")));'
assert bql2sql("select a in (1 + 2, '3') and b not in (select c);") == \
'SELECT (("a" IN ((1 + 2), \'3\')) AND ("b" NOT IN (SELECT "c")));'
assert bql2sql('select a in (select b) isnull notnull!=c<>d<e<=f>g;') == \
'SELECT ((((("a" IN (SELECT "b")) ISNULL) NOTNULL) != "c") !=' \
+ ' ((("d" < "e") <= "f") > "g"));'
assert bql2sql('select a>b>=c<<d>>e&f|g+h-i*j/k;') == \
'SELECT (("a" > "b") >= (((("c" << "d") >> "e") & "f") |' \
+ ' (("g" + "h") - (("i" * "j") / "k"))));'
assert bql2sql('select a/b%c||~~d collate e collate\'f\'||1;') == \
'SELECT (("a" / "b") % (("c" || (((~ (~ "d")) COLLATE "e")' \
+ ' COLLATE "f")) || 1));'
assert bql2sql('select cast(f(x) as binary blob);') == \
'SELECT CAST("f"("x") AS "binary" "blob");'
assert bql2sql('select cast(42 as varint(73));') == \
'SELECT CAST(42 AS "varint"(73));'
assert bql2sql('select cast(f(x, y, z) as varchar(12 ,34));') == \
'SELECT CAST("f"("x", "y", "z") AS "varchar"(12, 34));'
assert bql2sql('select exists (select a) and not exists (select b);') == \
'SELECT (EXISTS (SELECT "a") AND (NOT EXISTS (SELECT "b")));'
assert bql2sql('select case when a - b then c else d end from t;') == \
'SELECT CASE WHEN ("a" - "b") THEN "c" ELSE "d" END FROM "t";'
assert bql2sql('select case f(a) when b + c then d else e end from t;') \
== \
'SELECT CASE "f"("a") WHEN ("b" + "c") THEN "d" ELSE "e" END FROM "t";'
def test_estimate_bql():
# PREDICTIVE PROBABILITY
assert bql2sql('estimate predictive probability of weight from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (age, weight) '
'from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[2, 3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (age, weight) given '
'(label) from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[2, 3]\', \'[1]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (*) from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[1, 2, 3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of (*) given (age, weight) '
'from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[1]\', \'[2, 3]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of age given (*) '
'from p1;') == \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[2]\', \'[1, 3]\')' \
' FROM "t1";'
assert bql2sql('estimate label, predictive probability of weight'
' from p1;') \
== \
'SELECT "label", ' \
'bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[3]\', \'[]\')' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of weight, label'
' from p1;') \
== \
'SELECT bql_row_column_predictive_probability(1, NULL, NULL, _rowid_, '\
'\'[3]\', \'[]\'),' \
' "label"' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of weight + 1'
' from p1;') == \
'SELECT (bql_row_column_predictive_probability(1, NULL, NULL, '\
'_rowid_, \'[3]\', \'[]\') + 1)' \
' FROM "t1";'
assert bql2sql('estimate predictive probability of weight given (*) + 1'
' from p1;') == \
'SELECT (bql_row_column_predictive_probability(1, NULL, NULL, '\
'_rowid_, \'[3]\', \'[1, 2]\') + 1)' \
' FROM "t1";'
# PREDICTIVE PROBABILITY parse and compilation errors.
with pytest.raises(parse.BQLParseError):
# Need a table.
bql2sql('estimate predictive probability of weight;')
with pytest.raises(parse.BQLParseError):
# Need at most one generator.
bql2sql('estimate predictive probability of weight'
' from p1, p1;')
with pytest.raises(parse.BQLParseError):
# Need a generator name, not a subquery.
bql2sql('estimate predictive probability of weight'
' from (select 0);')
with pytest.raises(parse.BQLParseError):
# Need a column.
bql2sql('estimate predictive probability from p1;')
with pytest.raises(bayeslite.BQLError):
# Using (*) in both targets and constraints.
bql2sql('estimate predictive probability of (*) given (*) from p1;')
with pytest.raises(bayeslite.BQLError):
# Using (weight, *) in targets.
bql2sql('estimate predictive probability of (weight, *) given (age) '
'from p1;')
with pytest.raises(bayeslite.BQLError):
# Using (age, *) in constraints.
bql2sql('estimate predictive probability of weight given (*, age) '
'from p1;')
with pytest.raises(bayeslite.BQLError):
# Using duplicate column age.
bql2sql('estimate predictive probability of age given (weight, age) '
'from p1;')
# PROBABILITY DENISTY.
assert bql2sql('estimate probability density of weight = 20 from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20) FROM "t1";'
assert bql2sql('estimate probability density of weight = 20'
' given (age = 8)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, NULL, 2, 8) FROM "t1";'
assert bql2sql('estimate probability density of (weight = 20, age = 8)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, 2, 8) FROM "t1";'
assert bql2sql('estimate probability density of (weight = 20, age = 8)'
" given (label = 'mumble') from p1;") == \
"SELECT bql_pdf_joint(1, NULL, NULL, 3, 20, 2, 8, NULL, 1, 'mumble')" \
' FROM "t1";'
assert bql2sql('estimate probability density of weight = (c + 1)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, ("c" + 1)) FROM "t1";'
assert bql2sql('estimate probability density of weight = f(c)'
' from p1;') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 3, "f"("c")) FROM "t1";'
assert bql2sql('estimate similarity to (rowid = 5) '
'in the context of weight from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 3) FROM "t1";'
assert bql2sql(
'estimate similarity of (rowid = 12) to (rowid = 5) '
'in the context of weight from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 12)),' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 3) FROM "t1";'
assert bql2sql('estimate similarity to (rowid = 5) in the context of age'
' from p1') == \
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 2) FROM "t1";'
assert bql2sql(
'estimate similarity of (rowid = 5) to (height = 7 and age < 10)'
' in the context of weight from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)),' \
' (SELECT _rowid_ FROM "t1" WHERE (("height" = 7) AND ("age" < 10))),' \
' 3) FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Cannot use all variables for similarity.
bql2sql(
'estimate similarity to (rowid = 5) in the context of * from p1;')
assert bql2sql('estimate similarity to (rowid = 5)'
' in the context of age from p1;') == \
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,' \
' (SELECT _rowid_ FROM "t1" WHERE ("rowid" = 5)), 2) FROM "t1";'
assert bql2sql('estimate dependence probability of age with weight'
' from p1;') == \
'SELECT bql_column_dependence_probability(1, NULL, NULL, 2, 3) '\
'FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Need both rows fixed.
bql2sql('estimate similarity to (rowid=2) in the context of r by p1')
with pytest.raises(bayeslite.BQLError):
# Need both rows fixed.
bql2sql('estimate similarity in the context of r within p1')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate dependence probability with age from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate dependence probability from p1;')
assert bql2sql('estimate mutual information of age with weight' +
' from p1;') == \
'SELECT bql_column_mutual_information('\
'1, NULL, NULL, \'[2]\', \'[3]\', NULL)'\
' FROM "t1";'
assert bql2sql('estimate mutual information of age with weight' +
' using 42 samples from p1;') == \
'SELECT bql_column_mutual_information('\
'1, NULL, NULL, \'[2]\', \'[3]\', 42)'\
' FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information with age from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information with age using 42 samples'
' from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate mutual information using 42 samples from p1;')
# XXX Should be SELECT, not ESTIMATE, here?
assert bql2sql('estimate correlation of age with weight from p1;') == \
'SELECT bql_column_correlation(1, NULL, NULL, 2, 3) FROM "t1";'
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate correlation with age from p1;')
with pytest.raises(bayeslite.BQLError):
# Need both columns fixed.
bql2sql('estimate correlation from p1;')
with pytest.raises(BQLError):
# Variable must exist.
bql2sql('estimate correlation with agee from variables of p1')
def test_predict_outside_infer():
with pytest.raises(bayeslite.BQLError):
# No PREDICT outside INFER.
bql2sql('estimate predict age with confidence 0.9 from p1;')
def test_infer_explicit_predict_confidence():
assert bql2sql('infer explicit predict age with confidence 0.9'
' from p1;') == \
'SELECT bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL) FROM "t1";'
def test_infer_explicit_predict_confidence_nsamples():
assert bql2sql('infer explicit'
' predict age with confidence 0.9 using 42 samples'
' from p1;') == \
'SELECT bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42) FROM "t1";'
def test_infer_explicit_verbatim_and_predict_confidence():
assert bql2sql('infer explicit rowid, age,'
' predict age confidence age_conf from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence():
assert bql2sql('infer explicit rowid, age,'
' predict age from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_confidence_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age confidence age_conf using 42 samples from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 42)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age using 42 samples from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 42)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_confidence_as():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf confidence age_conf from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence_as():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, NULL)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_confidence_as_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf confidence age_conf using 87 samples'
' from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf",' \
' bql_json_get(c2, \'confidence\') AS "age_conf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 87)' \
' AS c2 FROM "t1");'
def test_infer_explicit_verbatim_and_predict_noconfidence_as_nsamples():
assert bql2sql('infer explicit rowid, age,'
' predict age as age_inf using 87 samples'
' from p1') == \
'SELECT c0 AS "rowid", c1 AS "age",' \
' bql_json_get(c2, \'value\') AS "age_inf"' \
' FROM (SELECT "rowid" AS c0, "age" AS c1,' \
' bql_predict_confidence(1, NULL, NULL, _rowid_, 2, 87)' \
' AS c2 FROM "t1");'
def test_infer_auto():
assert bql2sql('infer rowid, age, weight from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, NULL))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_nsamples():
assert bql2sql('infer rowid, age, weight using (1+2) samples from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, (1 + 2)))' \
' AS "age",' \
' "IFNULL"("weight",'\
' bql_predict(1, NULL, NULL, _rowid_, 3, 0, (1 + 2)))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_with_confidence():
assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \
' AS "age",' \
' "IFNULL"("weight",'\
' bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, NULL))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_with_confidence_nsamples():
assert bql2sql('infer rowid, age, weight with confidence 0.9'
' using sqrt(2) samples'
' from p1') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9,' \
' "sqrt"(2)))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,' \
' "sqrt"(2)))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_with_confidence_where():
assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1'
' where label = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,'\
' NULL))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("label" = \'foo\');'
def test_infer_auto_with_confidence_nsamples_where():
assert bql2sql('infer rowid, age, weight with confidence 0.9'
' using 42 samples'
' from p1'
' where label = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, 42))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("label" = \'foo\');'
def test_infer_auto_with_confidence_nsamples_where_predict():
assert bql2sql('infer rowid, age, weight with confidence 0.9 from p1'
' where ifnull(label, predict label with confidence 0.7)'
' = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9,' \
' NULL))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("ifnull"("label",' \
' bql_predict(1, NULL, NULL, _rowid_, 1, 0.7, NULL))' \
' = \'foo\');'
def test_infer_auto_with_confidence_nsamples_where_predict_nsamples():
assert bql2sql('infer rowid, age, weight with confidence 0.9'
' using 42 samples'
' from p1'
' where ifnull(label, predict label with confidence 0.7'
' using 73 samples)'
' = \'foo\'') \
== \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0.9, 42))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0.9, 42))' \
' AS "weight"' \
' FROM "t1"' \
' WHERE ("ifnull"("label",' \
' bql_predict(1, NULL, NULL, _rowid_, 1, 0.7, 73))' \
' = \'foo\');'
def test_infer_auto_star():
assert bql2sql('infer rowid, * from p1') == \
'SELECT "rowid" AS "rowid", "id" AS "id",' \
' "IFNULL"("label", bql_predict(1, NULL, NULL, _rowid_, 1, 0, NULL))' \
' AS "label",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, NULL))' \
' AS "weight"' \
' FROM "t1";'
def test_infer_auto_star_nsamples():
assert bql2sql('infer rowid, * using 1 samples from p1') == \
'SELECT "rowid" AS "rowid", "id" AS "id",' \
' "IFNULL"("label", bql_predict(1, NULL, NULL, _rowid_, 1, 0, 1))' \
' AS "label",' \
' "IFNULL"("age", bql_predict(1, NULL, NULL, _rowid_, 2, 0, 1))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, NULL, NULL, _rowid_, 3, 0, 1))' \
' AS "weight"' \
' FROM "t1";'
def test_estimate_columns_trivial():
prefix0 = 'SELECT v.name AS name'
prefix1 = ' FROM bayesdb_variable AS v' \
' WHERE v.population_id = 1' \
' AND v.generator_id IS NULL'
prefix = prefix0 + prefix1
assert bql2sql('estimate * from columns of p1;') == \
prefix + ';'
assert bql2sql('estimate * from columns of p1 where' +
' (probability density of value 42) > 0.5') == \
prefix + \
' AND (bql_column_value_probability(1, NULL, NULL, v.colno, 42) > 0.5);'
assert bql2sql('estimate * from columns of p1'
' where (probability density of value 8)'
' > (probability density of age = 16)') == \
prefix + \
' AND (bql_column_value_probability(1, NULL, NULL, v.colno, 8) >' \
' bql_pdf_joint(1, NULL, NULL, 2, 16));'
assert bql2sql('estimate *, probability density of value 8 given (age = 8)'
' from columns of p1;') == \
prefix0 + \
', bql_column_value_probability(1, NULL, NULL, v.colno, 8, 2, 8)' + \
prefix1 + ';'
with pytest.raises(bayeslite.BQLError):
bql2sql('estimate probability density of value 8 given (agee = 8)'
' from columns of p1')
with pytest.raises(bayeslite.BQLError):
# PREDICTIVE PROBABILITY makes no sense without row.
bql2sql('estimate * from columns of p1 where' +
' predictive probability of x > 0;')
with pytest.raises(bayeslite.BQLError):
# SIMILARITY makes no sense without row.
bql2sql('estimate * from columns of p1 where' +
' similarity to (rowid = x) in the context of c > 0;')
assert bql2sql('estimate * from columns of p1 where' +
' dependence probability with age > 0.5;') == \
prefix + \
' AND (bql_column_dependence_probability(1, NULL, NULL, 2, v.colno)' \
' > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 where' +
' dependence probability of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1'
' where dependence probability > 0.5;')
assert bql2sql('estimate * from columns of p1 order by' +
' mutual information with age;') == \
prefix + \
' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2]\','\
' \'[\' || v.colno || \']\', NULL);'
assert bql2sql('estimate * from columns of p1 order by' +
' mutual information with (age, label) using 42 samples;') == \
prefix + \
' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2, 1]\','\
' \'[\' || v.colno || \']\', 42);'
assert bql2sql('estimate * from columns of p1 order by' +
' mutual information with (age, label)'
' given (weight=12) using 42 samples;') == \
prefix + \
' ORDER BY bql_column_mutual_information(1, NULL, NULL, \'[2, 1]\','\
' \'[\' || v.colno || \']\', 42, 3, 12);'
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 order by' +
' mutual information of age with weight;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1'
' where mutual information > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 order by' +
' mutual information of age with weight using 42 samples;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 where' +
' mutual information using 42 samples > 0.5;')
assert bql2sql('estimate * from columns of p1 order by' +
' correlation with age desc;') == \
prefix + ' ORDER BY bql_column_correlation(1, NULL, NULL, 2, v.colno)' \
' DESC;'
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 order by' +
' correlation of age with weight;')
with pytest.raises(bayeslite.BQLError):
# Must omit exactly one column.
bql2sql('estimate * from columns of p1 where correlation > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Makes no sense.
bql2sql('estimate * from columns of p1'
' where predict age with confidence 0.9 > 30;')
assert bql2sql('estimate'
' *, dependence probability with weight as depprob,'
' mutual information with weight as mutinf'
' from columns of p1'
' where depprob > 0.5 order by mutinf desc') == \
prefix0 + \
', bql_column_dependence_probability(1, NULL, NULL, 3, v.colno)' \
' AS "depprob"' \
', bql_column_mutual_information(1, NULL, NULL, \'[3]\',' \
' \'[\' || v.colno || \']\', NULL) AS "mutinf"' \
+ prefix1 + \
' AND ("depprob" > 0.5)' \
' ORDER BY "mutinf" DESC;'
assert bql2sql('estimate'
' *, dependence probability with weight as depprob,'
' mutual information with (age, weight) as mutinf'
' from columns of p1'
' where depprob > 0.5 order by mutinf desc') == \
prefix0 + \
', bql_column_dependence_probability(1, NULL, NULL, 3, v.colno)' \
' AS "depprob"' \
', bql_column_mutual_information(1, NULL, NULL, \'[2, 3]\',' \
' \'[\' || v.colno || \']\', NULL) AS "mutinf"' \
+ prefix1 + \
' AND ("depprob" > 0.5)' \
' ORDER BY "mutinf" DESC;'
# XXX This mixes up target and reference variables, which is OK,
# because MI is symmetric, but...oops.
assert bql2sql('estimate * from variables of p1'
' where probability of (mutual information with age < 0.1)'
' > 0.8') == \
prefix + \
' AND ((SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \
' FROM (SELECT mi AS "v0" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[' || v.colno || ']'))) > 0.8);"
assert bql2sql('estimate * from variables of p1'
' order by probability of (mutual information with age < 0.1)') ==\
prefix + \
' ORDER BY (SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \
' FROM (SELECT mi AS "v0" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[' || v.colno || ']')));"
def test_estimate_pairwise_trivial():
prefix = 'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1, '
infix = ' AS value'
infix0 = ' FROM bayesdb_population AS p,'
infix0 += ' bayesdb_variable AS v0,'
infix0 += ' bayesdb_variable AS v1'
infix0 += ' WHERE p.id = 1'
infix0 += ' AND v0.population_id = p.id AND v1.population_id = p.id'
infix0 += ' AND v0.generator_id IS NULL'
infix0 += ' AND v1.generator_id IS NULL'
infix += infix0
assert bql2sql('estimate dependence probability'
' from pairwise columns of p1;') == \
prefix + \
'bql_column_dependence_probability(1, NULL, NULL, v0.colno,'\
' v1.colno)' + \
infix + ';'
assert bql2sql('estimate mutual information'
' from pairwise columns of p1 where'
' (probability density of age = 0) > 0.5;') == \
prefix + \
'bql_column_mutual_information(1, NULL, NULL, '\
'\'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL)' + \
infix + \
' AND (bql_pdf_joint(1, NULL, NULL, 2, 0) > 0.5);'
assert bql2sql('estimate mutual information given (label=\'go\', weight)'
' from pairwise columns of p1 where'
' (probability density of age = 0) > 0.5;') == \
prefix + \
'bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL,'\
' 1, \'go\', 3, NULL)' + \
infix + \
' AND (bql_pdf_joint(1, NULL, NULL, 2, 0) > 0.5);'
with pytest.raises(bayeslite.BQLError):
# PROBABILITY DENSITY OF VALUE is 1-column.
bql2sql('estimate correlation from pairwise columns of p1 where' +
' (probability density of value 0) > 0.5;')
with pytest.raises(bayeslite.BQLError):
# PREDICTIVE PROBABILITY OF is a row function.
bql2sql('estimate dependence probability'
' from pairwise columns of p1' +
' where predictive probability of x > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where dependence probability of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information from pairwise columns of p1'
' where dependence probability with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information using 42 samples'
' from pairwise columns of p1'
' where dependence probability with weight > 0.5;')
assert bql2sql('estimate correlation from pairwise columns of p1'
' where dependence probability > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' \
' (bql_column_dependence_probability(1, NULL, NULL, v0.colno,' \
' v1.colno)' \
' > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where mutual information of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where mutual information of age with weight using 42 samples'
' > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information from pairwise columns of p1'
' where mutual information with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information using 42 samples'
' from pairwise columns of p1'
' where mutual information with weight using 42 samples > 0.5;')
assert bql2sql('estimate correlation from pairwise columns of p1' +
' where mutual information > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' + \
' (bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL) > 0.5);'
assert bql2sql('estimate correlation from pairwise columns of p1' +
' where mutual information using 42 samples > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' + \
' (bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', 42) > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where correlation of age with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information from pairwise columns of p1'
' where correlation with weight > 0.5;')
with pytest.raises(bayeslite.BQLError):
# Must omit both columns.
bql2sql('estimate mutual information using 42 samples'
' from pairwise columns of p1'
' where correlation with weight > 0.5;')
assert bql2sql('estimate correlation from pairwise columns of p1'
' where correlation > 0.5;') == \
prefix + 'bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno)' + \
infix + ' AND' + \
' (bql_column_correlation(1, NULL, NULL, v0.colno, v1.colno) > 0.5);'
with pytest.raises(bayeslite.BQLError):
# Makes no sense.
bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' where predict age with confidence 0.9 > 30;')
assert bql2sql('estimate dependence probability as depprob,'
' mutual information as mutinf'
' from pairwise columns of p1'
' where depprob > 0.5 order by mutinf desc') == \
prefix + \
'bql_column_dependence_probability(1, NULL, NULL, v0.colno, v1.colno)' \
' AS "depprob",' \
' bql_column_mutual_information(1, NULL, NULL,'\
' \'[\' || v0.colno || \']\', \'[\' || v1.colno || \']\', NULL)'\
' AS "mutinf"' \
+ infix0 + \
' AND ("depprob" > 0.5)' \
' ORDER BY "mutinf" DESC;'
def test_estimate_pairwise_row():
prefix = 'SELECT r0._rowid_ AS rowid0, r1._rowid_ AS rowid1'
infix = ' AS value FROM "t1" AS r0, "t1" AS r1'
assert bql2sql('estimate similarity in the context of age' +
' from pairwise p1;') == \
prefix + ', bql_row_similarity(1, NULL, NULL,'\
' r0._rowid_, r1._rowid_, 2)' + \
infix + ';'
with pytest.raises(bayeslite.BQLError):
# PREDICT is a 1-row function.
bql2sql('estimate predict age with confidence 0.9 from pairwise t1;')
def test_estimate_pairwise_selected_columns():
assert bql2sql('estimate dependence probability'
' from pairwise columns of p1 for label, age') == \
'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \
' bql_column_dependence_probability(1, NULL, NULL,' \
' v0.colno, v1.colno)' \
' AS value' \
' FROM bayesdb_population AS p,' \
' bayesdb_variable AS v0,' \
' bayesdb_variable AS v1' \
' WHERE p.id = 1' \
' AND v0.population_id = p.id AND v1.population_id = p.id' \
' AND v0.generator_id IS NULL AND v1.generator_id IS NULL' \
' AND v0.colno IN (1, 2) AND v1.colno IN (1, 2);'
assert bql2sql('estimate dependence probability'
' from pairwise columns of p1'
' for (ESTIMATE * FROM COLUMNS OF p1'
' ORDER BY name DESC LIMIT 2)') == \
'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \
' bql_column_dependence_probability(1, NULL, NULL, v0.colno,' \
' v1.colno)' \
' AS value' \
' FROM bayesdb_population AS p,' \
' bayesdb_variable AS v0,' \
' bayesdb_variable AS v1' \
' WHERE p.id = 1' \
' AND v0.population_id = p.id AND v1.population_id = p.id' \
' AND v0.generator_id IS NULL AND v1.generator_id IS NULL' \
' AND v0.colno IN (3, 1) AND v1.colno IN (3, 1);'
def test_select_columns_subquery():
assert bql2sql('select id, t1.(estimate * from columns of p1'
' order by name asc limit 2) from t1') == \
'SELECT "id", "t1"."age", "t1"."label" FROM "t1";'
@pytest.mark.xfail(strict=True, reason='no simulate vars from models of')
def test_simulate_models_columns_subquery():
assert bql2sql('simulate weight, t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT * FROM "bayesdb_temp_0";'
assert bql2sql('simulate 0, weight, t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT 0, "v0" AS "weight", "v1" AS "age", "v2" AS "label" FROM' \
' (SELECT * FROM "bayesdb_temp_0");'
assert bql2sql('simulate weight + 1, t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT ("v0" + 1), "v1" AS "age", "v2" AS "label" FROM' \
' (SELECT * FROM "bayesdb_temp_0");'
assert bql2sql('simulate weight + 1 AS wp1,'
' t1.(estimate * from columns of p1'
' order by name asc limit 2) from models of p1') == \
'SELECT ("v0" + 1) AS "wp1", "v1" AS "age", "v2" AS "label" FROM' \
' (SELECT * FROM "bayesdb_temp_0");'
def test_simulate_columns_subquery():
# XXX This test is a little unsatisfactory -- we do not get to see
# what the variables in the result are named...
assert bql2sql('simulate weight, t1.(estimate * from columns of p1'
' order by name asc limit 2) from p1 limit 10') == \
'SELECT * FROM "bayesdb_temp_0";'
with pytest.raises(parse.BQLParseError):
# Compound columns not yet implemented for SIMULATE.
bql2sql('simulate weight + 1, t1.(estimate * from columns of p1'
' order by name asc limit 2) from p1 limit 10')
def test_simulate_models():
# Base case.
assert bql2sql('simulate mutual information of age with weight'
' from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]';"
# Multiple target variables.
assert bql2sql('simulate mutual information of (label, age) with weight'
' from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[1, 2]'" \
" AND reference_vars = '[3]';"
# Multiple reference variables.
assert bql2sql('simulate mutual information of age with (label, weight)'
' from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[1, 3]';"
# Specified number of samples.
assert bql2sql('simulate mutual information of age with weight'
' using 42 samples from models of p1') == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]'" \
' AND nsamples = 42;'
# Conditional.
assert bql2sql('simulate mutual information of age with weight'
" given (label = 'foo') from models of p1") == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]'" \
" AND conditions = '{\"1\": \"foo\"}';"
# Modeled by a specific generator.
assert bql2sql('simulate mutual information of age with weight'
' from models of p1 modeled by g1',
lambda bdb: bdb.execute('create generator g1 for p1')) == \
'SELECT mi FROM bql_mutinf' \
' WHERE population_id = 1' \
' AND generator_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]';"
# Two mutual informations.
assert bql2sql('simulate mutual information of age with weight AS "mi(aw)",'
' mutual information of label with weight AS "mi(lw)"'
' from models of p1') == \
'SELECT t0."mi(aw)" AS "mi(aw)", t1."mi(lw)" AS "mi(lw)"' \
' FROM (SELECT _rowid_, mi AS "mi(aw)" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]') AS t0," \
' (SELECT _rowid_, mi AS "mi(lw)" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[1]'" \
" AND reference_vars = '[3]') AS t1" \
' WHERE t0._rowid_ = t1._rowid_;'
def test_probability_of_mutinf():
assert bql2sql('estimate probability of'
' (mutual information of age with weight < 0.1) > 0.5'
' within p1') == \
'SELECT ((SELECT "AVG"("x") FROM (SELECT ("v0" < 0.1) AS "x"' \
' FROM (SELECT mi AS "v0" FROM bql_mutinf' \
' WHERE population_id = 1' \
" AND target_vars = '[2]'" \
" AND reference_vars = '[3]'))) > 0.5);"
def test_modeledby_usingmodels_trival():
def setup(bdb):
bdb.execute('create generator m1 for p1 using cgpm;')
assert bql2sql('estimate predictive probability of weight + 1'
' from p1 modeled by m1 using models 1-3, 5;', setup=setup) == \
'SELECT (bql_row_column_predictive_probability(1, 1, \'[1, 2, 3, 5]\','\
' _rowid_, \'[3]\', \'[]\') + 1)' \
' FROM "t1";'
assert bql2sql(
'infer rowid, age, weight from p1 modeled by m1 using model 7',
setup=setup) == \
'SELECT "rowid" AS "rowid",' \
' "IFNULL"("age", bql_predict(1, 1, \'[7]\', _rowid_, 2, 0, NULL))' \
' AS "age",' \
' "IFNULL"("weight", bql_predict(1, 1, \'[7]\', _rowid_, 3, 0, NULL))' \
' AS "weight"' \
' FROM "t1";'
assert bql2sql('infer explicit predict age with confidence 0.9'
' from p1 using models 0, 3-5;',
setup=setup) == \
'SELECT bql_predict(1, NULL, \'[0, 3, 4, 5]\', _rowid_, 2, 0.9, NULL)'\
' FROM "t1";'
assert bql2sql('''
estimate predictive relevance
of (label = 'Uganda')
to existing rows (rowid < 4)
and hypothetical rows with values (
("age" = 82, "weight" = 14),
("age" = 74, label = 'Europe', "weight" = 7)
)
in the context of "weight"
by p1 modeled by m1 using models 8, 10-12
''', setup=setup) == \
'SELECT bql_row_predictive_relevance(1, 1, \'[8, 10, 11, 12]\', ' \
'(SELECT _rowid_ FROM "t1" WHERE ("label" = \'Uganda\')), '\
'\'[1, 2, 3]\', 3, '\
'2, 82, 3, 14, NULL, 2, 74, 1, \'Europe\', 3, 7, NULL);'
assert bql2sql('''
estimate dependence probability
from pairwise columns of p1
for label, age
modeled by m1
using models 1, 4, 12
''', setup=setup) == \
'SELECT 1 AS population_id, v0.name AS name0, v1.name AS name1,' \
' bql_column_dependence_probability(1, 1, \'[1, 4, 12]\',' \
' v0.colno, v1.colno)' \
' AS value' \
' FROM bayesdb_population AS p,' \
' bayesdb_variable AS v0,' \
' bayesdb_variable AS v1' \
' WHERE p.id = 1' \
' AND v0.population_id = p.id AND v1.population_id = p.id' \
' AND (v0.generator_id IS NULL OR v0.generator_id = 1)' \
' AND (v1.generator_id IS NULL OR v1.generator_id = 1)' \
' AND v0.colno IN (1, 2) AND v1.colno IN (1, 2);'
assert bql2sql('''
estimate mutual information of age with weight
from p1 modeled by m1 using model 1;
''', setup=setup) == \
'SELECT bql_column_mutual_information('\
'1, 1, \'[1]\', \'[2]\', \'[3]\', NULL)'\
' FROM "t1";'
def test_simulate_columns_all():
with pytest.raises(parse.BQLParseError):
bql2sql('simulate * from p1 limit 1')
def test_trivial_commands():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
# XXX Query parameters!
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
with open(fname, 'rU') as f:
with pytest.raises(ValueError):
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True,
ifnotexists=True)
guess.bayesdb_guess_population(bdb, 'p', 't')
with pytest.raises(ValueError):
guess.bayesdb_guess_population(bdb, 'p', 't')
guess.bayesdb_guess_population(bdb, 'p', 't', ifnotexists=True)
bdb.execute('create generator p_cc for p;')
bdb.execute('initialize 2 models for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('initialize 2 models for p_cc')
bdb.execute('drop models from p_cc')
bdb.execute('drop models from p_cc')
bdb.execute('initialize 2 models for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('initialize 2 models for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop models 0-2 from p_cc')
bdb.execute('drop models 0-1 from p_cc')
with bdb.savepoint():
bdb.execute('initialize 2 models for p_cc')
bdb.execute('drop models 0-1 from p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop models 0-1 from p_cc')
bdb.execute('initialize 2 models for p_cc')
bdb.execute('initialize 1 model if not exists for p_cc')
bdb.execute('initialize 2 models if not exists for p_cc')
population_id = core.bayesdb_get_population(bdb, 'p')
generator_id = core.bayesdb_get_generator(bdb, population_id, 'p_cc')
assert core.bayesdb_generator_table(bdb, generator_id) == 't'
bdb.execute('alter table t rename to t')
assert core.bayesdb_generator_table(bdb, generator_id) == 't'
bdb.execute('alter table t rename to T')
assert core.bayesdb_generator_table(bdb, generator_id) == 'T'
bdb.execute('alter population p rename to p')
assert core.bayesdb_population_name(bdb, population_id) == 'p'
bdb.execute('alter population p rename to p2')
assert core.bayesdb_population_name(bdb, population_id) == 'p2'
bdb.execute('alter population p2 rename to p')
assert core.bayesdb_population_name(bdb, population_id) == 'p'
bdb.execute('estimate count(*) from p').fetchall()
bdb.execute('alter table t rename to t')
assert core.bayesdb_generator_table(bdb, generator_id) == 't'
bdb.execute('alter generator p_cc rename to p0_cc')
assert core.bayesdb_generator_name(bdb, generator_id) == 'p0_cc'
bdb.execute('alter generator p0_cc rename to zot, rename to P0_CC')
assert core.bayesdb_generator_name(bdb, generator_id) == 'P0_CC'
bdb.execute('alter generator P0_cc rename to P0_cc')
assert core.bayesdb_generator_name(bdb, generator_id) == 'P0_cc'
bdb.execute('alter generator p0_CC rename to p0_cc')
assert core.bayesdb_generator_name(bdb, generator_id) == 'p0_cc'
bdb.execute('estimate count(*) from p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate count(*) from p_cc')
bdb.execute('alter generator p0_cc rename to P0_cc')
bdb.execute('analyze p0_cc for 1 iteration')
colno = core.bayesdb_variable_number(bdb, population_id, generator_id,
'gender')
with pytest.raises(parse.BQLParseError):
# Rename the table's columns, not the generator's columns.
bdb.execute('alter generator p0_cc rename gender to sex')
with pytest.raises(NotImplementedError): # XXX
bdb.execute('alter table t rename to t0, rename gender to sex')
assert core.bayesdb_variable_number(
bdb, population_id, generator_id, 'sex') \
== colno
bdb.execute('analyze p0_cc model 0 for 1 iteration')
bdb.execute('alter generator p0_cc rename to p_cc')
assert core.bayesdb_variable_number(
bdb, population_id, generator_id, 'sex') \
== colno
bdb.execute('select sex from t0').fetchall()
with pytest.raises(AssertionError): # XXX
bdb.execute('select gender from t0')
assert False, 'Need to fix quoting of unknown columns!'
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predict sex with confidence 0.9'
' from p').fetchall()
bdb.execute('infer explicit predict sex with confidence 0.9'
' from p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predict gender with confidence 0.9'
' from p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('infer explicit predict gender with confidence 0.9'
' from p')
bdb.execute('alter table t0 rename sex to gender')
assert core.bayesdb_variable_number(
bdb, population_id, generator_id, 'gender') \
== colno
bdb.execute('alter generator p0_cc rename to p_cc') # XXX
bdb.execute('alter table t rename to T0') # XXX
bdb.sql_execute('create table t0_temp(x)')
bdb.execute('alter table T0 rename to t0')
assert bdb.execute('select count(*) from t0_temp').fetchvalue() == 0
assert bdb.execute('select count(*) from t0').fetchvalue() > 0
with pytest.raises(bayeslite.BQLError):
# Cannot specify models with rename.
bdb.execute('alter generator p_cc models (1) rename to p_cc_fail')
bdb.execute('drop table T0_TEMP')
bdb.execute('analyze p_cc model 0 for 1 iteration')
bdb.execute('analyze p_cc model 1 for 1 iteration')
bdb.execute('analyze p_cc models 0-1 for 1 iteration')
bdb.execute('analyze p_cc models 0,1 for 1 iteration')
bdb.execute('analyze p_cc for 1 iteration')
bdb.execute('select * from t0').fetchall()
bdb.execute('select * from T0').fetchall()
bdb.execute('estimate * from p').fetchall()
bdb.execute('estimate * from P').fetchall()
# SIMIARITY IN THE CONTEXT OF requires exactly 1 variable.
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate similarity in the context of * '
'from pairwise p').fetchall()
bdb.execute('estimate similarity in the context of age '
'from pairwise p').fetchall()
bdb.execute('alter population p rename to p2')
assert core.bayesdb_population_name(bdb, population_id) == 'p2'
bdb.execute('estimate similarity to (rowid=1) in the context of rank '
'from p2').fetchall()
bdb.execute('select value from'
' (estimate correlation from pairwise columns of p2)').fetchall()
bdb.execute('infer explicit predict age with confidence 0.9'
' from p2').fetchall()
bdb.execute('infer explicit predict AGE with confidence 0.9'
' from P2').fetchall()
bdb.execute('infer explicit predict aGe with confidence 0.9'
' from P2').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predict agee with confidence 0.9 from p2')
with pytest.raises(bayeslite.BQLError):
bdb.execute('infer explicit predict agee with confidence 0.9'
' from p2')
guess.bayesdb_guess_population(bdb, 'pe', 't0',
overrides=[
('age', 'numerical'),
('rank', 'numerical'),
])
bdb.execute('create generator pe_cc for pe;')
with pytest.raises(bayeslite.BQLError):
# No models to analyze.
bdb.execute('analyze pe_cc for 1 iteration')
bdb.execute('initialize 1 model if not exists for pe_cc')
bdb.execute('analyze pe_cc for 1 iteration')
bdb.execute('estimate correlation'
' from pairwise columns of pe').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('initialize 4 models if not exists for t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('analyze t0 for 1 iteration')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate * from t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate * from columns of t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate correlation from pairwise columns of t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate similarity in the context of age '
'from pairwise t')
bdb.execute('initialize 6 models if not exists for p_cc')
bdb.execute('analyze p_cc for 1 iteration')
def test_trivial_deadline():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 second')
def test_parametrized():
assert bql2sqlparam('select * from t where id = ?') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = :foo') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = $foo') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = @foo') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where id = ?123') == \
'SELECT * FROM "t" WHERE ("id" = ?1);'
assert bql2sqlparam('select * from t where a = $foo and b = ?1;') == \
'SELECT * FROM "t" WHERE (("a" = ?1) AND ("b" = ?1));'
assert bql2sqlparam('select * from t' +
' where a = ?123 and b = :foo and c = ?124') == \
'SELECT * FROM "t" WHERE' + \
' ((("a" = ?1) AND ("b" = ?2)) AND ("c" = ?2));'
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
assert bql_execute(bdb, 'select count(*) from t') == [(7,)]
assert bql_execute(bdb, 'select count(distinct division) from t') == \
[(6,)]
assert bql_execute(bdb, 'select * from t where height > ?', (70,)) == \
[
(41, 'M', 65600, 72, 'marketing', 4),
(30, 'M', 70000, 73, 'sales', 4),
(30, 'F', 81000, 73, 'engineering', 3),
]
assert bql_execute(bdb, 'select * from t where height > ?123',
(0,)*122 + (70,)) == \
[
(41, 'M', 65600, 72, 'marketing', 4),
(30, 'M', 70000, 73, 'sales', 4),
(30, 'F', 81000, 73, 'engineering', 3),
]
assert bql_execute(bdb, 'select age from t where division = :division',
{':division': 'sales'}) == \
[(34,), (30,)]
assert bql_execute(bdb, 'select division from t' +
' where age < @age and rank > ?;',
(40, 4)) == \
[('accounting',)]
assert bql_execute(bdb, 'select division from t' +
' where age < @age and rank > :rank;',
{':RANK': 4, '@aGe': 40}) == \
[('accounting',)]
with pytest.raises(ValueError):
bdb.execute('select * from t where age < ? and rank > :r',
{':r': 4})
def traced_execute(query, *args):
bql = []
def trace(string, _bindings):
bql.append(' '.join(string.split()))
bdb.trace(trace)
with bdb.savepoint():
bdb.execute(query, *args)
bdb.untrace(trace)
return bql
def sqltraced_execute(query, *args):
sql = []
def trace(string, _bindings):
sql.append(' '.join(string.split()))
bdb.sql_trace(trace)
with bdb.savepoint():
bdb.execute(query, *args)
bdb.sql_untrace(trace)
return sql
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('create generator p_cc for p;')
bdb.execute('initialize 1 model for p_cc;')
assert traced_execute('estimate similarity to (rowid = 1)'
' in the context of (estimate * from columns of p limit 1)'
' from p;') == [
'estimate similarity to (rowid = 1)' \
' in the context of (estimate * from columns of p limit 1)' \
' from p;',
]
assert sqltraced_execute('estimate similarity to (rowid = 1)'
' in the context of (estimate * from columns of p limit 1)'
' from p;') == [
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT v.name AS name FROM bayesdb_variable AS v'
' WHERE v.population_id = 1'
' AND v.generator_id IS NULL'
' LIMIT 1',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population'
' WHERE id = ?',
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'
' (SELECT _rowid_ FROM "t" WHERE ("rowid" = 1)), 0) FROM "t"',
'SELECT id FROM bayesdb_generator WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual '
'WHERE generator_id = ? AND table_rowid = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator '
'WHERE generator_id = ?'
]
assert sqltraced_execute('estimate similarity to (rowid = 1)'
' in the context of (estimate * from columns of p limit ?)'
' from p;',
(1,)) == [
'SELECT COUNT(*) FROM bayesdb_population'
' WHERE name = ?',
'SELECT id FROM bayesdb_population'
' WHERE name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT COUNT(*) FROM bayesdb_population'
' WHERE name = ?',
'SELECT id FROM bayesdb_population'
' WHERE name = ?',
# ESTIMATE * FROM COLUMNS OF:
'SELECT v.name AS name'
' FROM bayesdb_variable AS v'
' WHERE v.population_id = 1'
' AND v.generator_id IS NULL'
' LIMIT ?1',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
# ESTIMATE SIMILARITY TO (rowid=1):
'SELECT bql_row_similarity(1, NULL, NULL, _rowid_,'
' (SELECT _rowid_ FROM "t" WHERE ("rowid" = 1)), 0) FROM "t"',
'SELECT id FROM bayesdb_generator WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?'
]
assert sqltraced_execute(
'create temp table if not exists sim as '
'simulate age, RANK, division '
'from p given gender = \'F\' limit 4') == [
'PRAGMA table_info("sim")',
'PRAGMA table_info("bayesdb_temp_0")',
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT CAST(4 AS INTEGER), \'F\'',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT MAX(_rowid_) FROM "t"',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT id FROM bayesdb_generator'
' WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT 1 FROM "t" WHERE oid = ?',
'SELECT 1 FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ? LIMIT 1',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT code FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND value = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ? AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'CREATE TEMP TABLE "bayesdb_temp_0"'
' ("age","RANK","division")',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'INSERT INTO "bayesdb_temp_0" ("age","RANK","division")'
' VALUES (?,?,?)',
'CREATE TEMP TABLE IF NOT EXISTS "sim" AS'
' SELECT * FROM "bayesdb_temp_0"',
'DROP TABLE "bayesdb_temp_0"'
]
assert sqltraced_execute(
'select * from (simulate age from p '
'given gender = \'F\' limit 4)') == [
'PRAGMA table_info("bayesdb_temp_1")',
'SELECT COUNT(*) FROM bayesdb_population WHERE name = ?',
'SELECT id FROM bayesdb_population WHERE name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT CAST(4 AS INTEGER), \'F\'',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT COUNT(*) FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT colno FROM bayesdb_variable'
' WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?)'
' AND name = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT MAX(_rowid_) FROM "t"',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT token FROM bayesdb_rowid_tokens',
'SELECT id FROM bayesdb_generator WHERE population_id = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT tabname FROM bayesdb_population WHERE id = ?',
'SELECT 1 FROM "t" WHERE oid = ?',
'SELECT 1 FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ? LIMIT 1',
'SELECT cgpm_rowid FROM bayesdb_cgpm_individual'
' WHERE generator_id = ? AND table_rowid = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT code FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND value = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT stattype FROM bayesdb_variable WHERE population_id = ?'
' AND (generator_id IS NULL OR generator_id = ?) AND colno = ?',
'SELECT value FROM bayesdb_cgpm_category'
' WHERE generator_id = ? AND colno = ? AND code = ?',
'CREATE TEMP TABLE "bayesdb_temp_1" ("age")',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'INSERT INTO "bayesdb_temp_1" ("age") VALUES (?)',
'SELECT * FROM (SELECT * FROM "bayesdb_temp_1")',
'DROP TABLE "bayesdb_temp_1"',
]
bdb.execute('''
create population q for t (
age NUMERICAL;
gender NOMINAL; -- Not binary!
salary NUMERICAL;
height NUMERICAL;
division NOMINAL;
rank NOMINAL;
)
''')
bdb.execute('create generator q_cc for q;')
bdb.execute('initialize 1 model for q_cc;')
assert sqltraced_execute('analyze q_cc for 1 iteration;') == [
'SELECT COUNT(*) FROM bayesdb_generator WHERE name = ?',
'SELECT id FROM bayesdb_generator WHERE name = ?',
'SELECT backend FROM bayesdb_generator WHERE id = ?',
'SELECT engine_json, engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'SELECT population_id FROM bayesdb_generator WHERE id = ?',
'SELECT engine_stamp FROM bayesdb_cgpm_generator'
' WHERE generator_id = ?',
'UPDATE bayesdb_cgpm_generator'
' SET engine_json = :engine_json, engine_stamp = :engine_stamp'
' WHERE generator_id = :generator_id']
def test_create_table_ifnotexists_as_simulate():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
# If not exists table tests
guess.bayesdb_guess_population(bdb, 'p', 't',
overrides=[('age', 'numerical')])
bdb.execute('create generator p_cc for p;')
bdb.execute('initialize 1 model for p_cc')
bdb.execute('analyze p_cc for 1 iteration')
bdb.execute('''
create table if not exists u as
simulate age from p limit 10
''')
bdb.execute("drop table u")
bdb.execute('''
create table if not exists w as simulate age from p
given division='sales' limit 10
''')
bdb.execute("drop table w")
bdb.execute("create table u as simulate age from p limit 10")
x = bdb.execute("select count (*) from u").fetchvalue()
bdb.execute('''
create table if not exists u as simulate age from p limit 10
''')
bdb.execute('''
create table if not exists u as simulate age from p
given division='sales' limit 10
''')
assert x == bdb.execute("select count (*) from u").fetchvalue()
def test_createtab():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
with pytest.raises(apsw.SQLError):
bdb.execute('drop table t')
bdb.execute('drop table if exists t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop population p')
bdb.execute('drop population if exists p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop generator p_cc')
bdb.execute('drop generator if exists p_cc')
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
with bdb.savepoint():
# Savepoint because we don't actually want the new data to
# be inserted.
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True, ifnotexists=True)
guess.bayesdb_guess_population(bdb, 'p', 't',
overrides=[('age', 'numerical')])
bdb.execute('create generator p_cc for p;')
with pytest.raises(bayeslite.BQLError):
# Redefining population.
bdb.execute('create population p for t (age numerical)')
with pytest.raises(bayeslite.BQLError):
# Redefining generator.
bdb.execute('create generator p_cc for p;')
# Make sure ignore columns work.
#
# XXX Also check key columns.
guess.bayesdb_guess_population(bdb, 'p0', 't',
overrides=[('age', 'ignore')])
bdb.execute('drop population p0')
population_id = core.bayesdb_get_population(bdb, 'p')
colno = core.bayesdb_variable_number(bdb, population_id, None, 'age')
assert core.bayesdb_variable_stattype(
bdb, population_id, None, colno) == 'numerical'
bdb.execute('initialize 1 model for p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop table t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop population p')
bdb.execute('drop generator p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop generator p_cc')
with pytest.raises(bayeslite.BQLError):
bdb.execute('drop table t')
bdb.execute('drop generator if exists p_cc')
bdb.execute('drop population p')
bdb.execute('drop population if exists p')
bdb.execute('drop table t')
bdb.execute('drop table if exists t')
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True, create=True)
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute("create table u as select * from t where gender = 'F'")
assert bql_execute(bdb, 'select * from u') == [
(23, 'F', 81000, 67, 'data science', 3),
(36, 'F', 96000, 70, 'management', 2),
(30, 'F', 81000, 73, 'engineering', 3),
]
with pytest.raises(bayeslite.BQLError):
bdb.execute("create table u as select * from t where gender = 'F'")
bdb.execute('drop table u')
with pytest.raises(apsw.SQLError):
bql_execute(bdb, 'select * from u')
bdb.execute("create temp table u as"
" select * from t where gender = 'F'")
assert bql_execute(bdb, 'select * from u') == [
(23, 'F', 81000, 67, 'data science', 3),
(36, 'F', 96000, 70, 'management', 2),
(30, 'F', 81000, 73, 'engineering', 3),
]
# XXX Test to make sure TEMP is passed through, and the table
# doesn't persist on disk.
def test_alterpop_addvar():
with bayeslite.bayesdb_open() as bdb:
bayeslite.bayesdb_read_csv(
bdb, 't', StringIO.StringIO(test_csv.csv_data),
header=True, create=True)
bdb.execute('''
create population p for t with schema(
age numerical;
gender nominal;
salary numerical;
height ignore;
division ignore;
rank ignore;
)
''')
population_id = core.bayesdb_get_population(bdb, 'p')
bdb.execute('create generator m for p;')
# Fail when variable does not exist in base table.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable quux;')
# Fail when variable already in population.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable age numerical;')
# Fail when given invalid statistical type.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable heigh numr;')
# Alter pop with stattype.
assert not core.bayesdb_has_variable(bdb, population_id, None, 'height')
bdb.execute('alter population p add variable height numerical;')
assert core.bayesdb_has_variable(bdb, population_id, None, 'height')
# Alter pop multiple without stattype.
assert not core.bayesdb_has_variable(bdb, population_id, None, 'rank')
assert not core.bayesdb_has_variable(
bdb, population_id, None, 'division')
bdb.execute('''
alter population p
add variable rank,
add variable division;
''')
assert core.bayesdb_has_variable(bdb, population_id, None, 'rank')
assert core.bayesdb_has_variable(bdb, population_id, None, 'division')
# Add a new column weight to the base table.
bdb.sql_execute('alter table t add column weight real;')
# Fail when no values in new column.
with pytest.raises(bayeslite.BQLError):
bdb.execute('alter population p add variable weight numerical;')
assert not core.bayesdb_has_variable(bdb, population_id, None, 'weight')
# Update a single value and update the population.
bdb.sql_execute('update t set weight = 1 where oid = 1;')
bdb.execute('alter population p add variable weight numerical;')
assert core.bayesdb_has_variable(bdb, population_id, None, 'weight')
def test_txn():
with test_csv.bayesdb_csv_file(test_csv.csv_data) as (bdb, fname):
# Make sure rollback and commit fail outside a transaction.
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('COMMIT')
# Open a transaction which we'll roll back.
bdb.execute('BEGIN')
try:
# Make sure transactions don't nest. (Use savepoints.)
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('BEGIN')
finally:
bdb.execute('ROLLBACK')
# Make sure rollback and commit still fail outside a transaction.
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('COMMIT')
# Open a transaction which we'll commit.
bdb.execute('BEGIN')
try:
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('BEGIN')
finally:
bdb.execute('COMMIT')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BayesDBTxnError):
bdb.execute('COMMIT')
# Make sure ROLLBACK undoes the effects of the transaction.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
finally:
bdb.execute('ROLLBACK')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
# Make sure CREATE and DROP both work in the transaction.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('DROP TABLE t')
bdb.execute('DROP POPULATION p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
bdb.execute('DROP TABLE t')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
finally:
bdb.execute('ROLLBACK')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
# Make sure CREATE and DROP work even if we commit.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('DROP TABLE t')
bdb.execute('DROP POPULATION p')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
bdb.execute('DROP TABLE t')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
finally:
bdb.execute('COMMIT')
with pytest.raises(bayeslite.BQLError):
bdb.execute('ESTIMATE * FROM p')
with pytest.raises(apsw.SQLError):
bdb.execute('SELECT * FROM t')
# Make sure CREATE persists if we commit.
bdb.execute('BEGIN')
try:
with open(fname, 'rU') as f:
bayeslite.bayesdb_read_csv(bdb, 't', f, header=True,
create=True)
bdb.execute('SELECT * FROM t').fetchall()
guess.bayesdb_guess_population(bdb, 'p', 't')
bdb.execute('ESTIMATE * FROM p').fetchall()
finally:
bdb.execute('COMMIT')
bdb.execute('SELECT * FROM t').fetchall()
bdb.execute('ESTIMATE * FROM p').fetchall()
# Make sure bdb.transaction works, rolls back on exception,
# and handles nesting correctly in the context of savepoints.
try:
with bdb.transaction():
bdb.sql_execute('create table quagga(x)')
raise StopIteration
except StopIteration:
pass
with pytest.raises(apsw.SQLError):
bdb.execute('select * from quagga')
with bdb.transaction():
with bdb.savepoint():
with bdb.savepoint():
pass
with bdb.savepoint():
with pytest.raises(bayeslite.BayesDBTxnError):
with bdb.transaction():
pass
# XXX To do: Make sure other effects (e.g., analysis) get
# rolled back by ROLLBACK.
def test_predprob_null():
backend = CGPM_Backend({}, multiprocess=False)
with test_core.bayesdb(backend=backend) as bdb:
bdb.sql_execute('''
create table foo (
id integer primary key not null,
x numeric,
y numeric,
z numeric
)
''')
bdb.sql_execute("insert into foo values (1, 1, 'strange', 3)")
bdb.sql_execute("insert into foo values (2, 1.2, 'strange', 1)")
bdb.sql_execute("insert into foo values (3, 0.8, 'strange', 3)")
bdb.sql_execute("insert into foo values (4, NULL, 'strange', 9)")
bdb.sql_execute("insert into foo values (5, 73, 'up', 11)")
bdb.sql_execute("insert into foo values (6, 80, 'up', -1)")
bdb.sql_execute("insert into foo values (7, 60, 'up', NULL)")
bdb.sql_execute("insert into foo values (8, 67, NULL, NULL)")
bdb.sql_execute("insert into foo values (9, 3.1415926, 'down', 1)")
bdb.sql_execute("insert into foo values (10, 1.4142135, 'down', 0)")
bdb.sql_execute("insert into foo values (11, 2.7182818, 'down', -1)")
bdb.sql_execute("insert into foo values (12, NULL, 'down', 10)")
bdb.execute('''
create population pfoo for foo (
id ignore;
x numerical;
y nominal;
z numerical;
)
''')
bdb.execute('create generator pfoo_cc for pfoo using cgpm;')
bdb.execute('initialize 1 model for pfoo_cc')
bdb.execute('analyze pfoo_cc for 1 iteration')
# Null value => null predictive probability.
assert bdb.execute('estimate predictive probability of x'
' from pfoo where id = 4;').fetchall() == \
[(None,)]
# Nonnull value => nonnull predictive probability.
x = bdb.execute('estimate predictive probability of x'
' from pfoo where id = 5').fetchall()
assert len(x) == 1
assert len(x[0]) == 1
assert isinstance(x[0][0], (int, float))
# All null values => null predictive probability.
assert bdb.execute('estimate predictive probability of (y, z)'
' from pfoo where id = 8;').fetchall() == \
[(None,)]
# Some nonnull values => nonnull predictive probability.
x = bdb.execute('estimate predictive probability of (x, z)'
' from pfoo where id = 8;').fetchall()
assert len(x) == 1
assert len(x[0]) == 1
assert isinstance(x[0][0], (int, float))
# All NULL constraints => same result regardless of given clause.
c0 = bdb.execute('estimate predictive probability of x'
' from pfoo where id = 8;')
v0 = cursor_value(c0)
assert v0 is not None
c1 = bdb.execute('estimate predictive probability of x given (y, z)'
' from pfoo where id = 8;')
v1 = cursor_value(c1)
assert relerr(v0, v1) < 0.0001
def test_guess_all():
with test_core.bayesdb() as bdb:
bdb.sql_execute('create table foo (x numeric, y numeric, z numeric)')
bdb.sql_execute('insert into foo values (1, 2, 3)')
bdb.sql_execute('insert into foo values (4, 5, 6)')
# XXX GUESS(*)
guess.bayesdb_guess_population(bdb, 'pfoo', 'foo')
def test_misc_errors():
with test_core.t1() as (bdb, _population_id, _generator_id):
with pytest.raises(bayeslite.BQLError):
bdb.execute('create table t1 as SELECT 1 FROM t1'
# t1 already exists as a table.
' limit 1')
with pytest.raises(bayeslite.BQLError):
# t1 already exists as a table.
bdb.execute('create table t1 as simulate weight from p1'
' limit 1')
with pytest.raises(bayeslite.BQLError):
# t1x does not exist as a population.
bdb.execute('create table t1_sim as simulate weight from t1x'
' limit 1')
with pytest.raises(bayeslite.BQLError):
# p1 does not have a variable waught.
bdb.execute('create table t1_sim as simulate waught from p1'
' limit 1')
with pytest.raises(bayeslite.BQLError):
# p1 does not have a variable agee.
bdb.execute('create table t1_sim as simulate weight from p1'
' given agee = 42 limit 1')
with bdb.savepoint():
bdb.sql_execute('create table t2(x)')
with pytest.raises(bayeslite.BQLError):
# t1 already exists as a table.
bdb.execute('alter table t2 rename to t1')
with pytest.raises(NotImplementedError):
# Renaming columns is not yet implemented.
bdb.execute('alter table t1 rename weight to mass')
with pytest.raises(bayeslite.BQLError):
# xcat does not exist as a backend.
bdb.execute('create generator p1_xc for p1 using xcat()')
with pytest.raises(bayeslite.BQLError):
# p1 already exists as a population.
bdb.execute('create generator p1_cc for p1;')
with pytest.raises(bayeslite.BQLError):
# multinomial is not a known statistical type.
bdb.execute('''
create population q1 for t1(
ignore id, label, weight;
weight multinomial
)
''')
with pytest.raises(bayeslite.BQLError):
# p1_xc does not exist as a generator.
bdb.execute('alter generator p1_xc rename to p1_xcat')
with bdb.savepoint():
bdb.execute('create generator p1_xc for p1;')
with pytest.raises(bayeslite.BQLError):
# p1_xc already exists as a generator.
bdb.execute('alter generator p1_cc rename to p1_xc')
with pytest.raises(bayeslite.BQLParseError):
# WAIT is not allowed.
bdb.execute('analyze p1_cc for 1 iteration wait')
with bdb.savepoint():
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('initialize 1 model for p1_xc')
bdb.execute('analyze p1_xc for 1 iteration')
with pytest.raises(apsw.SQLError):
bdb.execute('select'
' nonexistent((simulate age from p1 limit 1));')
with pytest.raises(ValueError):
bdb.execute('select :x', {'y': 42})
with pytest.raises(ValueError):
bdb.execute('select :x', {'x': 53, 'y': 42})
with pytest.raises(ValueError):
bdb.execute('select ?, ?', (1,))
with pytest.raises(ValueError):
bdb.execute('select ?', (1, 2))
with pytest.raises(TypeError):
bdb.execute('select ?', 42)
with pytest.raises(NotImplementedError):
bdb.execute('infer explicit predict age confidence ac, *'
' from p1')
with pytest.raises(NotImplementedError):
bdb.execute('infer explicit predict age confidence ac,'
' t1.(select age from t1 limit 1) from p1')
with pytest.raises(bayeslite.BQLError):
try:
bdb.execute('estimate similarity to (rowid=1)'
' in the context of agee from p1')
except bayeslite.BQLError as e:
assert 'No such columns in population:' in str(e)
raise
def test_nested_simulate():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('select (simulate age from p1 limit 1),'
' (simulate weight from p1 limit 1)').fetchall()
assert bdb.temp_table_name() == 'bayesdb_temp_2'
assert not core.bayesdb_has_table(bdb, 'bayesdb_temp_0')
assert not core.bayesdb_has_table(bdb, 'bayesdb_temp_1')
bdb.execute('simulate weight from p1'
' given age = (simulate age from p1 limit 1)'
' limit 1').fetchall()
# Make sure unwinding doesn't raise an exception. Calling
# __del__ directly, rather than via del(), has two effects:
#
# (a) It actually raises any exceptions in the method, unlike
# del(), which suppresses them.
#
# (b) It may cause a subsequent __del__ to fail and raise an
# exception, so that a subsequent del(), including an implicit
# one at the end of a scope, may print a message to stderr.
#
# Effect (a) is what we are actually trying to test. Effect
# (b) is a harmless consequence as far as pytest is concerned,
# as long as the test otherwise passes.
bdb.execute('simulate weight from p1'
' given age = (simulate age from p1 limit 1)'
' limit 1').__del__()
def test_checkpoint__ci_slow():
with test_core.t1() as (bdb, population_id, generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 10 iterations checkpoint 1 iteration')
# No checkpoint by seconds.
with pytest.raises(NotImplementedError):
bdb.execute('analyze p1_cc for 5 seconds checkpoint 1 second')
bdb.execute('drop models from p1_cc')
bdb.execute('initialize 1 model for p1_cc')
# No checkpoint by seconds.
with pytest.raises(NotImplementedError):
bdb.execute('analyze p1_cc for 5 iterations checkpoint 1 second')
bdb.execute('drop models from p1_cc')
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration checkpoint 2 iterations')
def test_infer_confidence__ci_slow():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('infer explicit rowid, rowid as another_rowid, 4,'
' age, predict age as age_inf confidence age_conf'
' from p1').fetchall()
def test_infer_as_estimate():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
bdb.execute('infer explicit predictive probability of age'
' from p1').fetchall()
def test_infer_error():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('infer explicit predict age confidence age_conf'
' from p1').fetchall()
with pytest.raises(bayeslite.BQLError):
bdb.execute('infer explicit predict agee confidence age_conf'
' from p1').fetchall()
def test_estimate_by():
with test_core.t1() as (bdb, _population_id, _generator_id):
bdb.execute('initialize 1 model for p1_cc')
bdb.execute('analyze p1_cc for 1 iteration')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate predictive probability of age'
' by p1')
with pytest.raises(bayeslite.BQLError):
bdb.execute('estimate similarity to (rowid=1) '
'in the context of age by p1')
def check(x, bindings=None):
assert len(bdb.execute(x, bindings=bindings).fetchall()) == 1
check('estimate probability density of age = 42 by p1')
check('estimate dependence probability of age with weight by p1')
check('estimate mutual information of age with weight by p1')
check('estimate correlation of age with weight by p1')
check('estimate correlation pvalue of age with weight by p1')
rowid = bdb.execute('select min(rowid) from t1').fetchall()[0][0]
check('''
estimate similarity of (rowid=?) to (rowid=?)
in the context of weight by p1
''', (rowid, rowid,))
def test_empty_cursor():
with bayeslite.bayesdb_open() as bdb:
assert bdb.execute('SELECT 0').connection == bdb
empty(bdb.execute('BEGIN'))
empty(bdb.execute('COMMIT'))
empty(bdb.sql_execute('CREATE TABLE t(x, y, z)'))
empty(bdb.sql_execute('INSERT INTO t VALUES(1,2,3)'))
empty(bdb.sql_execute('INSERT INTO t VALUES(4,5,6)'))
empty(bdb.sql_execute('INSERT INTO t VALUES(7,8,9)'))
empty(bdb.execute('CREATE POPULATION p FOR t '
'(IGNORE z,y; x NOMINAL)'))
empty(bdb.execute('CREATE GENERATOR p_cc FOR p;'))
empty(bdb.execute('INITIALIZE 1 MODEL FOR p_cc'))
empty(bdb.execute('DROP GENERATOR p_cc'))
empty(bdb.execute('DROP POPULATION p'))
empty(bdb.execute('DROP TABLE t'))
def test_create_generator_ifnotexists():
# XXX Test other backends too, because they have a role in ensuring that
# this works. Their create_generator will still be called.
#
# [TRC 20160627: The above comment appears to be no longer true --
# if it was ever true.]
for using_clause in ('cgpm()',):
with bayeslite.bayesdb_open() as bdb:
bdb.sql_execute('CREATE TABLE t(x, y, z)')
bdb.sql_execute('INSERT INTO t VALUES(1,2,3)')
bdb.execute('''
CREATE POPULATION p FOR t (
x NUMERICAL;
y NUMERICAL;
z NOMINAL;
)
''')
for _i in (0, 1):
bdb.execute('CREATE GENERATOR IF NOT EXISTS p_cc FOR p USING '
+ using_clause)
try:
bdb.execute('CREATE GENERATOR p_cc FOR p USING ' + using_clause)
assert False # Should have said it exists.
except bayeslite.BQLError:
pass
def test_bql_rand():
with bayeslite.bayesdb_open() as bdb:
bdb.sql_execute('CREATE TABLE frobotz(x)')
for _ in range(10):
bdb.sql_execute('INSERT INTO frobotz VALUES(2)')
cursor = bdb.execute('SELECT bql_rand() FROM frobotz LIMIT 10;')
rands = cursor.fetchall()
# These are "the" random numbers (internal PRNG is seeded to 0)
ans = [(0.28348770982811367,), (0.4789774612650598,), (0.07824908989551316,),
(0.6091223239372148,), (0.03906608409906187,), (0.3690599096081546,),
(0.8223420512129717,), (0.7777771914916722,), (0.061856771629497986,),
(0.6492586781908201,)]
assert rands == ans
def test_bql_rand2():
seed = struct.pack('<QQQQ', 0, 0, 0, 3)
with bayeslite.bayesdb_open(seed=seed) as bdb:
bdb.sql_execute('CREATE TABLE frobotz(x)')
for _ in range(10):
bdb.sql_execute('INSERT INTO frobotz VALUES(2)')
cursor = bdb.execute('SELECT bql_rand() FROM frobotz LIMIT 10;')
rands = cursor.fetchall()
ans = [(0.8351877951287725,), (0.9735099617243271,), (0.026142315910925418,),
(0.09380653289687524,), (0.1097050387582088,), (0.33154896906379605,),
(0.4579314980719317,), (0.09072802203491703,), (0.5276180968829105,),
(0.9993280772797679,)]
assert rands == ans
class MockTracerOneQuery(bayeslite.IBayesDBTracer):
def __init__(self, q, qid):
self.q = q
self.qid = qid
self.start_calls = 0
self.ready_calls = 0
self.error_calls = 0
self.finished_calls = 0
self.abandoned_calls = 0
def start(self, qid, query, bindings):
assert qid == self.qid
assert query == self.q
assert bindings == ()
self.start_calls += 1
def ready(self, qid, _cursor):
assert qid == self.qid
self.ready_calls += 1
def error(self, qid, _e):
assert qid == self.qid
self.error_calls += 1
def finished(self, qid):
assert qid == self.qid
self.finished_calls += 1
def abandoned(self, qid):
assert qid == self.qid
self.abandoned_calls += 1
def test_tracing_smoke():
with test_core.t1() as (bdb, _population_id, _generator_id):
q = 'SELECT * FROM t1'
tracer = MockTracerOneQuery(q, 1)
bdb.trace(tracer)
cursor = bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
cursor.fetchall()
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 0
del cursor
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 1
bdb.untrace(tracer)
# XXX Make sure the whole cursor API works.
q = 'SELECT 42'
tracer = MockTracerOneQuery(q, 2)
bdb.trace(tracer)
cursor = bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
assert cursor.fetchvalue() == 42
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 0
del cursor
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 1
assert tracer.abandoned_calls == 1
def test_tracing_error_smoke():
with test_core.t1() as (bdb, _population_id, _generator_id):
q = 'SELECT * FROM wrong'
tracer = MockTracerOneQuery(q, 1)
bdb.trace(tracer)
with pytest.raises(apsw.SQLError):
bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 0
assert tracer.error_calls == 1
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
class Boom(Exception):
pass
class ErroneousBackend(troll.TrollBackend):
def __init__(self):
self.call_ct = 0
def name(self):
return 'erroneous'
def logpdf_joint(self, *_args, **_kwargs):
if self.call_ct > 10: # Wait to avoid raising during sqlite's prefetch
raise Boom()
self.call_ct += 1
return 0
def test_tracing_execution_error_smoke():
with test_core.t1() as (bdb, _population_id, _generator_id):
bayeslite.bayesdb_register_backend(bdb, ErroneousBackend())
bdb.execute('DROP GENERATOR p1_cc')
bdb.execute('CREATE GENERATOR p1_err FOR p1 USING erroneous()')
q = 'ESTIMATE PREDICTIVE PROBABILITY OF age FROM p1'
tracer = MockTracerOneQuery(q, 1)
bdb.trace(tracer)
cursor = bdb.execute(q)
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 0
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
with pytest.raises(Boom):
cursor.fetchall()
assert tracer.start_calls == 1
assert tracer.ready_calls == 1
assert tracer.error_calls == 1
assert tracer.finished_calls == 0
assert tracer.abandoned_calls == 0
def test_pdf_var():
with test_core.t1() as (bdb, population_id, _generator_id):
bdb.execute('initialize 6 models for p1_cc;')
c = bdb.execute(
'estimate probability density of label = label from p1')
c.fetchall()
assert bql2sql(
'estimate probability density of label = label from p1') == \
'SELECT bql_pdf_joint(1, NULL, NULL, 1, "label") FROM "t1";'
|
jaeger_client/throttler.py | jaegertracing/jaeger-client-python | 372 | 12762102 | <reponame>jaegertracing/jaeger-client-python
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import random
from threading import Lock
from typing import Any, Optional
from tornado.ioloop import PeriodicCallback
from .constants import DEFAULT_THROTTLER_REFRESH_INTERVAL
from .metrics import Metrics, MetricsFactory
from .utils import ErrorReporter
MINIMUM_CREDITS = 1.0
default_logger = logging.getLogger('jaeger_tracing')
class Throttler(object):
def set_client_id(self, client_id: int) -> None:
"""
Called by tracer to set client ID of throttler.
"""
pass
def is_allowed(self, operation: str) -> bool:
raise NotImplementedError()
def close(self) -> None:
pass
class RemoteThrottler(Throttler):
"""
RemoteThrottler controls the flow of spans emitted from client to prevent
flooding. RemoteThrottler requests credits from the throttling service
periodically. These credits determine the amount of debug spans a client
may emit for a particular operation without receiving more credits.
:param channel: channel for communicating with jaeger-agent
:param service_name: name of this application
:param kwargs: optional parameters
- refresh_interval: interval in seconds for requesting more credits
- logger: Logger instance
- metrics_factory: factory to create throttler-specific metrics
- error_reporter: ErrorReporter instance
"""
def __init__(self, channel: Any, service_name: str, **kwargs: Any) -> None:
self.channel = channel
self.service_name = service_name
self.client_id: Optional[int] = None
self.refresh_interval = \
kwargs.get('refresh_interval', DEFAULT_THROTTLER_REFRESH_INTERVAL)
self.logger = kwargs.get('logger', default_logger)
metrics_factory = kwargs.get('metrics_factory', MetricsFactory())
self.metrics = ThrottlerMetrics(metrics_factory)
self.error_reporter = kwargs.get('error_reporter', ErrorReporter(Metrics()))
self.credits: dict = {}
self.lock = Lock()
self.running = True
self.periodic = None
if not self.channel.io_loop:
self.logger.error(
'Cannot acquire IOLoop, throttler will not be updated')
else:
self.channel.io_loop.add_callback(self._init_polling)
def is_allowed(self, operation: str) -> bool:
with self.lock:
if operation not in self.credits:
self.credits[operation] = 0.0
self.metrics.throttled_debug_spans(1)
return False
value = self.credits[operation]
if value < MINIMUM_CREDITS:
self.metrics.throttled_debug_spans(1)
return False
self.credits[operation] = value - MINIMUM_CREDITS
return True
def set_client_id(self, client_id: int) -> None:
with self.lock:
if self.client_id is None:
self.client_id = client_id
def _init_polling(self):
"""
Bootstrap polling for throttler.
To avoid spiky traffic from throttler clients, we use a random delay
before the first poll.
"""
with self.lock:
if not self.running:
return
r = random.Random()
delay = r.random() * self.refresh_interval
self.channel.io_loop.call_later(
delay=delay, callback=self._delayed_polling)
self.logger.info(
'Delaying throttling credit polling by %d sec', delay)
def _operations(self):
with self.lock:
return self.credits.keys()
def _delayed_polling(self):
def callback():
self._fetch_credits(self._operations())
periodic = PeriodicCallback(
callback=callback,
# convert interval to milliseconds
callback_time=self.refresh_interval * 1000)
self._fetch_credits(self._operations())
with self.lock:
if not self.running:
return
self.periodic = periodic
self.periodic.start()
self.logger.info(
'Throttling client started with refresh interval %d sec',
self.refresh_interval)
def _fetch_credits(self, operations):
if not operations:
return
self.logger.debug('Requesting throttling credits')
fut = self.channel.request_throttling_credits(
self.service_name, self.client_id, operations)
fut.add_done_callback(self._request_callback)
def _request_callback(self, future):
exception = future.exception()
if exception:
self.metrics.throttler_update_failure(1)
self.error_reporter.error(
'Failed to get throttling credits from jaeger-agent: %s',
exception)
return
response = future.result()
# In Python 3.5 response.body is of type bytes and json.loads() does only support str
# See: https://github.com/jaegertracing/jaeger-client-python/issues/180
if hasattr(response.body, 'decode') and callable(response.body.decode):
response_body = response.body.decode('utf-8')
else:
response_body = response.body
try:
throttling_response = json.loads(response_body)
self.logger.debug('Received throttling response: %s',
throttling_response)
self._update_credits(throttling_response)
self.metrics.throttler_update_success(1)
except Exception as e:
self.metrics.throttler_update_failure(1)
self.error_reporter.error(
'Failed to parse throttling credits response '
'from jaeger-agent: %s [%s]', e, response_body)
return
def _update_credits(self, response):
with self.lock:
for op_balance in response['balances']:
op = op_balance['operation']
balance = op_balance['balance']
if op not in self.credits:
self.credits[op] = 0
self.credits[op] += balance
self.logger.debug('credits = %s', self.credits)
def close(self) -> None:
with self.lock:
self.running = False
if self.periodic:
self.periodic.stop()
class ThrottlerMetrics(object):
"""
Metrics specific to throttler.
"""
def __init__(self, metrics_factory: MetricsFactory) -> None:
self.throttled_debug_spans = \
metrics_factory.create_counter(name='jaeger:throttled_debug_spans')
self.throttler_update_success = \
metrics_factory.create_counter(name='jaeger:throttler_update',
tags={'result': 'ok'})
self.throttler_update_failure = \
metrics_factory.create_counter(name='jaeger:throttler_update',
tags={'result': 'err'})
|
esmvaltool/cmorizers/obs/cmorize_obs_ghcn_cams.py | cffbots/ESMValTool | 148 | 12762104 | <filename>esmvaltool/cmorizers/obs/cmorize_obs_ghcn_cams.py
"""ESMValTool CMORizer for GHCN-CAMS data.
Tier
Tier 2: other freely-available dataset.
Source
https://www.esrl.noaa.gov/psd/data/gridded/data.ghcncams.html
ftp://ftp.cdc.noaa.gov/Datasets/ghcncams/air.mon.mean.nc
Last access
20200304
"""
import logging
import os
import iris
from . import utilities as utils
logger = logging.getLogger(__name__)
def _extract_variable(short_name, var, cfg, filepath, out_dir):
"""Extract variable."""
raw_var = var.get('raw', short_name)
cube = iris.load_cube(filepath, utils.var_name_constraint(raw_var))
# Fix units
if 'raw_units' in var:
cube.units = var['raw_units']
cmor_info = cfg['cmor_table'].get_variable(var['mip'], short_name)
cube.convert_units(cmor_info.units)
utils.convert_timeunits(cube, 1950)
# Fix coordinates
utils.fix_coords(cube)
if 'height2m' in cmor_info.dimensions:
utils.add_height2m(cube)
# Fix metadata
attrs = cfg['attributes']
attrs['mip'] = var['mip']
utils.fix_var_metadata(cube, cmor_info)
utils.set_global_atts(cube, attrs)
# Save variable
utils.save_variable(cube,
short_name,
out_dir,
attrs,
unlimited_dimensions=['time'])
def cmorization(in_dir, out_dir, cfg, _):
"""Cmorization func call."""
filepath = os.path.join(in_dir, cfg['filename'])
# Run the cmorization
for (short_name, var) in cfg['variables'].items():
logger.info("CMORizing variable '%s'", short_name)
_extract_variable(short_name, var, cfg, filepath, out_dir)
|
examples/ServiceSchema.py | msitt/blpapi-python | 228 | 12762107 | # ServiceSchema.py
from __future__ import print_function
from __future__ import absolute_import
from optparse import OptionParser, OptionValueError
import os
import platform as plat
import sys
if sys.version_info >= (3, 8) and plat.system().lower() == "windows":
# pylint: disable=no-member
with os.add_dll_directory(os.getenv('BLPAPI_LIBDIR')):
import blpapi
else:
import blpapi
REFERENCE_DATA_RESPONSE = blpapi.Name("ReferenceDataResponse")
ELEMENT_DATATYPE_NAMES = {
blpapi.DataType.BOOL: "BOOL",
blpapi.DataType.CHAR: "CHAR",
blpapi.DataType.BYTE: "BYTE",
blpapi.DataType.INT32: "INT32",
blpapi.DataType.INT64: "INT64",
blpapi.DataType.FLOAT32: "FLOAT32",
blpapi.DataType.FLOAT64: "FLOAT64",
blpapi.DataType.STRING: "STRING",
blpapi.DataType.BYTEARRAY: "BYTEARRAY",
blpapi.DataType.DATE: "DATE",
blpapi.DataType.TIME: "TIME",
blpapi.DataType.DECIMAL: "DECIMAL",
blpapi.DataType.DATETIME: "DATETIME",
blpapi.DataType.ENUMERATION: "ENUMERATION",
blpapi.DataType.SEQUENCE: "SEQUENCE",
blpapi.DataType.CHOICE: "CHOICE",
blpapi.DataType.CORRELATION_ID: "CORRELATION_ID"
}
SCHEMA_STATUS_NAMES = {
blpapi.SchemaStatus.ACTIVE: "ACTIVE",
blpapi.SchemaStatus.DEPRECATED: "DEPRECATED",
blpapi.SchemaStatus.INACTIVE: "INACTIVE",
blpapi.SchemaStatus.PENDING_DEPRECATION: "PENDING"
}
def authOptionCallback(_option, _opt, value, parser):
"""Parse authorization options from user input"""
vals = value.split('=', 1)
if value == "user":
authUser = blpapi.AuthUser.createWithLogonName()
authOptions = blpapi.AuthOptions.createWithUser(authUser)
elif value == "none":
authOptions = None
elif vals[0] == "app" and len(vals) == 2:
appName = vals[1]
authOptions = blpapi.AuthOptions.createWithApp(appName)
elif vals[0] == "userapp" and len(vals) == 2:
appName = vals[1]
authUser = blpapi.AuthUser.createWithLogonName()
authOptions = blpapi.AuthOptions\
.createWithUserAndApp(authUser, appName)
elif vals[0] == "dir" and len(vals) == 2:
activeDirectoryProperty = vals[1]
authUser = blpapi.AuthUser\
.createWithActiveDirectoryProperty(activeDirectoryProperty)
authOptions = blpapi.AuthOptions.createWithUser(authUser)
elif vals[0] == "manual":
parts = []
if len(vals) == 2:
parts = vals[1].split(',')
if len(parts) != 3:
raise OptionValueError("Invalid auth option {}".format(value))
appName, ip, userId = parts
authUser = blpapi.AuthUser.createWithManualOptions(userId, ip)
authOptions = blpapi.AuthOptions.createWithUserAndApp(authUser, appName)
else:
raise OptionValueError("Invalid auth option '{}'".format(value))
parser.values.auth = {'option' : authOptions}
def parseCmdLine():
parser = OptionParser()
parser.add_option("-a",
"--host",
dest="host",
help="HOST address to connect to",
metavar="HOST",
default="localhost")
parser.add_option("-p",
"--port",
dest="port",
type="int",
help="PORT to connect to (%default)",
metavar="PORT",
default=8194)
parser.add_option("-s",
"--service",
default="//blp/apiflds",
help="SERVICE to print the schema of "
"('//blp/apiflds' by default)")
parser.add_option("--auth",
dest="auth",
help="authentication option: "
"user|none|app=<app>|userapp=<app>|dir=<property>"
"|manual=<app,ip,user>"
" (default: user)\n"
"'none' is applicable to Desktop API product "
"that requires Bloomberg Professional service "
"to be installed locally.",
metavar="option",
action="callback",
callback=authOptionCallback,
type="string",
default={"option" :
blpapi.AuthOptions.createWithUser(
blpapi.AuthUser.createWithLogonName())})
(options, _) = parser.parse_args()
return options
def printMessage(msg):
print("[{0}]: {1}".format(", ".join(map(str, msg.correlationIds())), msg))
def getIndent(level):
return "" if level == 0 else " ".ljust(level * 2)
# Print enumeration (constant list)
def printEnumeration(cl, level):
indent = getIndent(level + 1)
print(indent + " {0} {1} {2} \"{3}\" possible values:".format(
cl.name(),
SCHEMA_STATUS_NAMES[cl.status()],
ELEMENT_DATATYPE_NAMES[cl.datatype()],
cl.description()))
# Enumerate and print all constant list's values (constants)
for i in cl:
print(indent + " {0} {1} {2} \"{3}\" = {4!s}".format(
i.name(),
SCHEMA_STATUS_NAMES[i.status()],
ELEMENT_DATATYPE_NAMES[i.datatype()],
i.description(),
i.getValue()))
# Recursively print element definition
def printElementDefinition(ed, level=0):
indent = getIndent(level)
maxValues = ed.maxValues()
if maxValues == blpapi.SchemaElementDefinition.UNBOUNDED:
valuesRange = "[{0}, INF)".format(ed.minValues())
else:
valuesRange = "[{0}, {1}]".format(ed.minValues(), maxValues)
# Get and print alternate element names
alternateNames = ed.alternateNames()
if alternateNames:
alternateNames = "[{0}]".format(",".join(map(str, alternateNames)))
else:
alternateNames = ""
print(indent + "* {0} {1} {2} {3} \"{4}\"".format(
ed.name(),
SCHEMA_STATUS_NAMES[ed.status()],
valuesRange,
alternateNames,
ed.description()))
# Get and print related type definition
td = ed.typeDefinition()
print(indent + " {0} {1} {2} {3}{4}{5}\"{6}\"".format(
td.name(),
SCHEMA_STATUS_NAMES[td.status()],
ELEMENT_DATATYPE_NAMES[td.datatype()],
"complex " if td.isComplexType() else "",
"simple " if td.isSimpleType() else "",
"enum " if td.isEnumerationType() else "",
td.description()))
# Get and print all possible values for enumeration type
enumeration = td.enumeration()
if not enumeration is None:
printEnumeration(enumeration, level)
if td.numElementDefinitions():
print(indent + " Elements[{0}]:".format(
td.numElementDefinitions()))
# Enumerate and print all sub-element definitions
for i in td.elementDefinitions():
printElementDefinition(i, level + 1)
def printOperation(operation, _service):
print("{0} \"{1}\" Request:".format(
operation.name(),
operation.description()))
# Print operation's request definition
printElementDefinition(operation.requestDefinition(), 1)
print("Responses[{0}]:".format(operation.numResponseDefinitions()))
# Enumerate and print all operation's response definitions
for r in operation.responseDefinitions():
printElementDefinition(r, 1)
print()
def main():
options = parseCmdLine()
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(options.host)
sessionOptions.setServerPort(options.port)
sessionOptions.setSessionIdentityOptions(options.auth['option'])
# Create a Session
session = blpapi.Session(sessionOptions)
# Start a Session
if not session.start():
raise Exception("Can't start session.")
try:
print("Session started.")
# Open service to get reference data from
if not session.openService(options.service):
raise Exception("Can't open '{0}' service.".format(
options.service))
# Obtain previously opened service
service = session.getService(options.service)
print("Service {0}:".format(options.service))
print("Service event definitions[{0}]:".format(
service.numEventDefinitions()))
# Enumerate and print all service's event definitions
for ed in service.eventDefinitions():
printElementDefinition(ed)
print()
print("Operations[{0}]:".format(service.numOperations()))
# Enumerate and print all service's operations
for operation in service.operations():
printOperation(operation, service)
finally:
# Stop the session
session.stop()
if __name__ == "__main__":
print("ServiceSchema")
try:
main()
except KeyboardInterrupt:
print("Ctrl+C pressed. Stopping...")
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
|
save_raw_fea.py | insad/pytorch-kaldi | 2,248 | 12762126 | ##########################################################
# pytorch-kaldi v.0.1
# <NAME>, <NAME>
# Mila, University of Montreal
# October 2018
#
# Description: This script generates kaldi ark files containing raw features.
# The file list must be a file containing "snt_id file.wav".
# Note that only wav files are supported here (sphere or other format are not supported)
##########################################################
import scipy.io.wavfile
import math
import numpy as np
import os
from data_io import read_vec_int_ark, write_mat
# Run it for all the data chunks (e.g., train, dev, test) => uncomment
lab_folder = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/exp/dnn4_pretrain-dbn_dnn_ali_test"
lab_opts = "ali-to-pdf"
out_folder = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test"
wav_lst = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/test/wav.lst"
scp_file_out = "/users/parcollet/KALDI/kaldi-trunk/egs/timit/s5/data/raw_TIMIT_200ms/test/feats_raw.scp"
# lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_dev'
# lab_opts='ali-to-pdf'
# out_folder='raw_TIMIT_200ms/dev'
# wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/dev/wav_lst.scp'
# scp_file_out='quick_test/data/dev/feats_raw.scp'
# lab_folder='quick_test/dnn4_pretrain-dbn_dnn_ali_test'
# lab_opts='ali-to-pdf'
# out_folder='raw_TIMIT_200ms/test'
# wav_lst='/home/mirco/pytorch-kaldi-new/quick_test/data/test/wav_lst.scp'
# scp_file_out='quick_test/data/test/feats_raw.scp'
sig_fs = 16000 # Hz
sig_wlen = 200 # ms
lab_fs = 16000 # Hz
lab_wlen = 25 # ms
lab_wshift = 10 # ms
sig_wlen_samp = int((sig_fs * sig_wlen) / 1000)
lab_wlen_samp = int((lab_fs * lab_wlen) / 1000)
lab_wshift_samp = int((lab_fs * lab_wshift) / 1000)
# Create the output folder
try:
os.stat(out_folder)
except:
os.makedirs(out_folder)
# Creare the scp file
scp_file = open(scp_file_out, "w")
# reading the labels
lab = {
k: v
for k, v in read_vec_int_ark(
"gunzip -c " + lab_folder + "/ali*.gz | " + lab_opts + " " + lab_folder + "/final.mdl ark:- ark:-|", out_folder
)
}
# reading the list file
with open(wav_lst) as f:
sig_lst = f.readlines()
sig_lst = [x.strip() for x in sig_lst]
for sig_file in sig_lst:
sig_id = sig_file.split(" ")[0]
sig_path = sig_file.split(" ")[1]
[fs, signal] = scipy.io.wavfile.read(sig_path)
signal = signal.astype(float) / 32768
signal = signal / np.max(np.abs(signal))
cnt_fr = 0
beg_samp = 0
frame_all = []
while beg_samp + lab_wlen_samp < signal.shape[0]:
sample_fr = np.zeros(sig_wlen_samp)
central_sample_lab = int(((beg_samp + lab_wlen_samp / 2) - 1))
central_fr_index = int(((sig_wlen_samp / 2) - 1))
beg_signal_fr = int(central_sample_lab - (sig_wlen_samp / 2))
end_signal_fr = int(central_sample_lab + (sig_wlen_samp / 2))
if beg_signal_fr >= 0 and end_signal_fr <= signal.shape[0]:
sample_fr = signal[beg_signal_fr:end_signal_fr]
else:
if beg_signal_fr < 0:
n_left_samples = central_sample_lab
sample_fr[central_fr_index - n_left_samples + 1 :] = signal[0:end_signal_fr]
if end_signal_fr > signal.shape[0]:
n_right_samples = signal.shape[0] - central_sample_lab
sample_fr[0 : central_fr_index + n_right_samples + 1] = signal[beg_signal_fr:]
frame_all.append(sample_fr)
cnt_fr = cnt_fr + 1
beg_samp = beg_samp + lab_wshift_samp
frame_all = np.asarray(frame_all)
# Save the matrix into a kaldi ark
out_file = out_folder + "/" + sig_id + ".ark"
write_mat(out_folder, out_file, frame_all, key=sig_id)
print(sig_id)
scp_file.write(sig_id + " " + out_folder + "/" + sig_id + ".ark:" + str(len(sig_id) + 1) + "\n")
N_fr_comp = 1 + math.floor((signal.shape[0] - 400) / 160)
# print("%s %i %i "%(lab[sig_id].shape[0],N_fr_comp,cnt_fr))
scp_file.close()
|
tensorflow_privacy/privacy/estimators/v1/dnn_test.py | amad-person/privacy | 2,327 | 12762166 | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for DP-enabled DNNClassifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_privacy.privacy.estimators import test_utils
from tensorflow_privacy.privacy.estimators.v1 import dnn
from tensorflow_privacy.privacy.optimizers.dp_optimizer import DPGradientDescentGaussianOptimizer
class DPDNNClassifierTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for DP-enabled DNNClassifier."""
@parameterized.named_parameters(
('BinaryClassDNN', 2),
('MultiClassDNN 3', 3),
('MultiClassDNN 4', 4),
)
def testDNN(self, n_classes):
train_features, train_labels = test_utils.make_input_data(256, n_classes)
feature_columns = []
for key in train_features:
feature_columns.append(tf.feature_column.numeric_column(key=key))
optimizer = functools.partial(
DPGradientDescentGaussianOptimizer,
learning_rate=0.5,
l2_norm_clip=1.0,
noise_multiplier=0.0,
num_microbatches=1)
classifier = dnn.DNNClassifier(
hidden_units=[10],
activation_fn='relu',
feature_columns=feature_columns,
n_classes=n_classes,
optimizer=optimizer,
loss_reduction=tf.losses.Reduction.NONE)
classifier.train(
input_fn=test_utils.make_input_fn(train_features, train_labels, True,
16))
test_features, test_labels = test_utils.make_input_data(64, n_classes)
classifier.evaluate(
input_fn=test_utils.make_input_fn(test_features, test_labels, False,
16))
predict_features, predict_labels = test_utils.make_input_data(64, n_classes)
classifier.predict(
input_fn=test_utils.make_input_fn(predict_features, predict_labels,
False))
if __name__ == '__main__':
tf.test.main()
|
hs_core/management/commands/add_owner.py | hydroshare/hydroshare | 178 | 12762176 | """ Add an owner to a resource or resources
Usage: add_owner {username} {resource list}
"""
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from hs_core.models import BaseResource
from hs_core.hydroshare.utils import get_resource_by_shortkey
from hs_access_control.models.privilege import UserResourcePrivilege, PrivilegeCodes
from django_irods.icommands import SessionException
from django.db import transaction
def set_quota_holder(resource, user):
try:
resource.set_quota_holder(user, user)
except SessionException as ex:
# some resources copied from www for testing do not exist in the iRODS backend,
# hence need to skip these test artifects
print(resource.short_id + ' raised SessionException when setting quota holder: ' +
ex.stderr)
except AttributeError as ex:
# when federation is not set up correctly, istorage does not have a session
# attribute, hence raise AttributeError - ignore for testing
print((resource.short_id + ' raised AttributeError when setting quota holder: ' +
str(ex)))
except ValueError as ex:
# when federation is not set up correctly, istorage does not have a session
# attribute, hence raise AttributeError - ignore for testing
print((resource.short_id + ' raised ValueError when setting quota holder: ' +
str(ex)))
class Command(BaseCommand):
help = "add owner to resource"
def add_arguments(self, parser):
parser.add_argument('new_owner', type=str)
parser.add_argument(
'--owned_by',
dest='owned_by',
help='prior owner of the resources'
)
parser.add_argument(
'--set_quota_holder',
action='store_true', # True for presence, False for absence
dest='set_quota_holder', # value is options['set_quota_holder']
help='set quota holder as new owner')
# a list of resource id's: none does nothing.
parser.add_argument('resource_ids', nargs='*', type=str)
def handle(self, *args, **options):
user = User.objects.get(username=options['new_owner'])
admin = User.objects.get(username='admin')
if options['owned_by'] is not None:
prior = User.objects.get(username=options['owned_by'])
for res in BaseResource.objects.filter(r2urp__user=prior,
r2urp__privilege=PrivilegeCodes.OWNER):
with transaction.atomic():
resource = res.get_content_model()
UserResourcePrivilege.share(user=user,
resource=resource,
privilege=PrivilegeCodes.OWNER,
grantor=admin)
print("added owner {} to {}".format(options['new_owner'], resource.short_id))
if options['set_quota_holder']:
set_quota_holder(resource, user)
print("set quota holder to {} for {}".format(options['new_owner'],
resource.short_id))
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
resource = get_resource_by_shortkey(rid, or_404=False)
with transaction.atomic():
UserResourcePrivilege.share(user=user,
resource=resource,
privilege=PrivilegeCodes.OWNER,
grantor=admin)
print("added owner {} to {}".format(options['new_owner'], rid))
if options['set_quota_holder']:
set_quota_holder(resource, user)
print("set quota holder to {} for {}".format(options['new_owner'],
resource.short_id))
|
tests/test_primitive_data/test_real.py | amih90/bacpypes | 240 | 12762181 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Primitive Data Real
------------------------
"""
import unittest
import struct
import math
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, xtob
from bacpypes.errors import InvalidTag
from bacpypes.primitivedata import Real, Tag
# some debugging
_debug = 0
_log = ModuleLogger(globals())
@bacpypes_debugging
def real_tag(x):
"""Convert a hex string to an real application tag."""
if _debug: real_tag._debug("real_tag %r", x)
b = xtob(x)
tag = Tag(Tag.applicationTagClass, Tag.realAppTag, len(b), b)
if _debug: real_tag._debug(" - tag: %r", tag)
return tag
@bacpypes_debugging
def real_encode(obj):
"""Encode an Real object into a tag."""
if _debug: real_encode._debug("real_encode %r", obj)
tag = Tag()
obj.encode(tag)
if _debug: real_encode._debug(" - tag: %r, %r", tag, tag.tagData)
return tag
@bacpypes_debugging
def real_decode(tag):
"""Decode an real application tag into an real."""
if _debug: real_decode._debug("real_decode %r", tag)
obj = Real(tag)
if _debug: real_decode._debug(" - obj: %r, %r", obj, obj.value)
return obj
@bacpypes_debugging
def real_endec(v, x):
"""Pass the value to Real, construct a tag from the hex string,
and compare results of encode and decoding each other."""
if _debug: real_endec._debug("real_endec %r %r", v, x)
tag = real_tag(x)
if _debug: real_endec._debug(" - tag: %r, %r", tag, tag.tagData)
obj = Real(v)
if _debug: real_endec._debug(" - obj: %r, %r", obj, obj.value)
assert real_encode(obj) == tag
if _debug: real_endec._debug(" - tags match")
if math.isnan(v):
assert math.isnan(real_decode(tag).value)
if _debug: real_endec._debug(" - both NaN")
else:
assert real_decode(tag) == obj
if _debug: real_endec._debug(" - objects match")
@bacpypes_debugging
class TestReal(unittest.TestCase):
def test_real(self):
if _debug: TestReal._debug("test_real")
obj = Real()
assert obj.value == 0.0
with self.assertRaises(TypeError):
Real("some string")
def test_real_real(self):
if _debug: TestReal._debug("test_real_real")
obj = Real(1.0)
assert obj.value == 1.0
assert str(obj) == "Real(1)"
obj = Real(73.5)
assert obj.value == 73.5
assert str(obj) == "Real(73.5)"
def test_real_tag(self):
if _debug: TestReal._debug("test_real_tag")
tag = Tag(Tag.applicationTagClass, Tag.realAppTag, 1, xtob('3f800000'))
obj = Real(tag)
assert obj.value == 1.0
tag = Tag(Tag.applicationTagClass, Tag.booleanAppTag, 0, xtob(''))
with self.assertRaises(InvalidTag):
Real(tag)
tag = Tag(Tag.contextTagClass, 0, 1, xtob('ff'))
with self.assertRaises(InvalidTag):
Real(tag)
tag = Tag(Tag.openingTagClass, 0)
with self.assertRaises(InvalidTag):
Real(tag)
def test_real_copy(self):
if _debug: TestReal._debug("test_real_copy")
obj1 = Real(12)
obj2 = Real(obj1)
assert obj2.value == 12
def test_real_endec(self):
if _debug: TestReal._debug("test_real_endec")
with self.assertRaises(InvalidTag):
obj = Real(real_tag(''))
real_endec(0, '00000000')
real_endec(1, '3f800000')
real_endec(-1, 'bf800000')
real_endec(73.5, '42930000')
inf = float('inf')
real_endec(inf, '7f800000')
real_endec(-inf, 'ff800000')
nan = float('nan')
real_endec(nan, '7fc00000') |
insights/parsers/named_conf.py | lhuett/insights-core | 121 | 12762200 | """
NamedConf parser - file ``/etc/named.conf``
===========================================
NamedConf parser the file named configuration file.
Named is a name server used by BIND.
"""
from insights.specs import Specs
from insights.core.plugins import parser
from insights.parsers import SkipException
from insights.parsers.named_checkconf import NamedCheckconf
@parser(Specs.named_conf)
class NamedConf(NamedCheckconf):
"""
Class for parsing the file ``/etc/named.conf```, We use class ``NamedCheckConf`` to parse most
of the named.conf configurations and class ``NamedConf`` to parse the `include` directives.
.. note::
Please refer to the super-class :py:class:`insights.parsers.named_checkconf:NamedCheckConf`
for more usage information.
Attributes:
includes (list): List of files in 'include' section.
Raises:
SkipException: When content is empty or cannot be parsed.
Examples:
>>> named_conf.includes
['/etc/crypto-policies/back-ends/bind.config']
"""
def parse_content(self, content):
includes = []
super(NamedConf, self).parse_content(content)
try:
for line in [l for l in content if l.strip().startswith('include ') and ';' in l]:
includes.append(line.split(';')[0].replace('"', '').split()[1])
except IndexError:
raise SkipException("Syntax error of include directive")
self.includes = includes
|
eval/src/tests/tensor/onnx_wrapper/dynamic.py | Anlon-Burke/vespa | 4,054 | 12762204 | # Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import onnx
from onnx import helper, TensorProto
QUERY_TENSOR = helper.make_tensor_value_info('query_tensor', TensorProto.FLOAT, ['batch', 4])
ATTRIBUTE_TENSOR = helper.make_tensor_value_info('attribute_tensor', TensorProto.FLOAT, [4, 1])
BIAS_TENSOR = helper.make_tensor_value_info('bias_tensor', TensorProto.FLOAT, ['batch', -1])
OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, ['batch', 1])
nodes = [
helper.make_node(
'MatMul',
['query_tensor', 'attribute_tensor'],
['matmul'],
),
helper.make_node(
'ReduceSum',
['bias_tensor'],
['reduce'],
axes=[1]
),
helper.make_node(
'Add',
['matmul', 'reduce'],
['output'],
),
]
graph_def = helper.make_graph(
nodes,
'dynamic_scoring',
[
QUERY_TENSOR,
ATTRIBUTE_TENSOR,
BIAS_TENSOR,
],
[OUTPUT],
)
model_def = helper.make_model(graph_def, producer_name='dynamic.py', opset_imports=[onnx.OperatorSetIdProto(version=12)])
onnx.save(model_def, 'dynamic.onnx')
|
src/utils/embeddingvis.py | fatterbetter/CodeSearchNet | 1,681 | 12762227 | <reponame>fatterbetter/CodeSearchNet
#!/usr/bin/env python
"""
Usage:
embeddingvis.py [options] plot-tsne (--code | --query) MODEL_PATH
embeddingvis.py [options] print-nns (--code | --query) MODEL_PATH DISTANCE_THRESHOLD
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--distance-metric METRIC The distance metric to use [default: cosine]
--num-nns NUM The number of nearest neighbors to show when print-nns. [default: 2]
--lim-items NUM Maximum number of items to use. Useful when memory is limited. [default: -1]
-h --help Show this screen.
--hypers-override HYPERS JSON dictionary overriding hyperparameter values.
--language LANG The code language to use. Only when --code option is given. [default: python]
--debug Enable debug routines. [default: False]
"""
from docopt import docopt
from dpu_utils.utils import RichPath, run_and_debug
from sklearn.manifold import TSNE
import numpy as np
from scipy.spatial.distance import pdist
import matplotlib.pyplot as plt
import model_restore_helper
from utils.visutils import square_to_condensed
def run(arguments) -> None:
azure_info_path = arguments.get('--azure-info', None)
model_path = RichPath.create(arguments['MODEL_PATH'], azure_info_path=azure_info_path)
model = model_restore_helper.restore(
path=model_path,
is_train=False)
if arguments['--query']:
embeddings, elements = model.get_query_token_embeddings()
else:
embeddings, elements = model.get_code_token_embeddings(arguments['--language'])
max_num_elements = int(arguments['--lim-items'])
if max_num_elements > 0:
embeddings, elements = embeddings[:max_num_elements], elements[:max_num_elements]
print(f'Collected {len(elements)} elements to visualize.')
embeddings = model.sess.run(fetches=embeddings)
if arguments['plot-tsne']:
emb_2d = TSNE(n_components=2, verbose=1, metric=arguments['--distance-metric']).fit_transform(embeddings)
plt.scatter(emb_2d[:, 0], emb_2d[:, 1])
for i in range(len(elements)):
plt.annotate(elements[i], xy=(emb_2d[i,0], emb_2d[i,1]))
plt.show()
elif arguments['print-nns']:
flat_distances = pdist(embeddings, arguments['--distance-metric'])
num_nns = int(arguments['--num-nns'])
for i, element in enumerate(elements):
distance_from_i = np.fromiter(
(flat_distances[square_to_condensed(i, j, len(elements))] if i != j else float('inf') for j in
range(len(elements))), dtype=np.float)
nns = [int(k) for k in np.argsort(distance_from_i)[:num_nns]] # The first two NNs
if distance_from_i[nns[0]] > float(arguments['DISTANCE_THRESHOLD']):
continue
try:
print(f'{element} --> ' + ', '.join(f'{elements[n]} ({distance_from_i[n]:.2f})' for n in nns))
except:
print('Error printing token for nearest neighbors pair.')
if __name__ == '__main__':
args = docopt(__doc__)
run_and_debug(lambda: run(args), args.get('--debug', False)) |
dirigible/fts/tests/test_2734_ClearCells.py | EnoX1/dirigible-spreadsheet | 168 | 12762233 | # Copyright (c) 2010 Resolver Systems Ltd.
# All Rights Reserved
#
try:
import unittest2 as unittest
except ImportError:
import unittest
from functionaltest import FunctionalTest
import key_codes
from textwrap import dedent
class Test_2734_ClearCells(FunctionalTest):
def test_delete_key_clears_selected_cells(self):
self.assert_key_deletes_cells(key_codes.DELETE)
def test_backspace_key_clears_selected_cells(self):
self.assert_key_deletes_cells(key_codes.BACKSPACE)
def assert_key_deletes_cells(self, key_code):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
# * He enters some data in A1:A3
self.enter_cell_text(1, 1, 'a1')
self.enter_cell_text(1, 2, 'a2')
self.enter_cell_text(1, 3, 'a3')
self.wait_for_cell_value(1, 3, 'a3')
# * He clicks on A1 and hits delete
self.click_on_cell(1, 1)
self.human_key_press(key_code)
# * He sees the value in A1 disappear while the others remain
self.wait_for_cell_value(1, 1, '')
self.wait_for_cell_value(1, 2, 'a2')
self.wait_for_cell_value(1, 3, 'a3')
# * He selects the range a2:a3
self.select_range_with_shift_click((1, 2), (1, 3))
# He hits delete
self.human_key_press(key_code)
# * He sees that all the cells are now cleared
self.wait_for_cell_value(1, 1, '')
self.wait_for_cell_value(1, 2, '')
self.wait_for_cell_value(1, 3, '')
def test_delete_key_while_editing_still_does_what_it_should(self):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
# * He enters three characters in A1
self.open_cell_for_editing(1, 1)
self.human_key_press(key_codes.NUMBER_1)
self.human_key_press(key_codes.NUMBER_2)
self.human_key_press(key_codes.NUMBER_3)
# * He moves left twice
self.human_key_press(key_codes.LEFT)
self.human_key_press(key_codes.LEFT)
# He hits delete
self.human_key_press(key_codes.DELETE)
# the middle character is now missing
self.wait_for_cell_editor_content('13')
def test_backspace_key_while_editing_still_does_what_it_should(self):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
# * He enters three characters in A1
self.open_cell_for_editing(1, 1)
self.human_key_press(key_codes.NUMBER_1)
self.human_key_press(key_codes.NUMBER_2)
self.human_key_press(key_codes.NUMBER_3)
# * He moves left once
self.human_key_press(key_codes.LEFT)
# He hits backspace
self.human_key_press(key_codes.BACKSPACE)
# the middle character is now missing
self.wait_for_cell_editor_content('13')
def test_can_clear_cell_from_usercode(self):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
# * He enters some data in A1:A3
self.enter_cell_text(1, 1, 'a1')
self.enter_cell_text(1, 2, 'a2')
self.enter_cell_text(1, 3, 'a3')
self.wait_for_cell_value(1, 3, 'a3')
# * He tries to use the clear() function from usercode on a cell
# and then tries to access some of the supposedly cleared attributes of the cell
self.prepend_usercode(dedent('''
worksheet.a1.error = 'harold puts a deliberate pointless error in'
worksheet.a1.clear()
worksheet.b1.formula = str(worksheet.a1.value)
worksheet.b2.formula = str(worksheet.a1.formula)
worksheet.b3.formula = str(worksheet.a1.formatted_value)
worksheet.b4.formula = str(worksheet.a1.error)
'''))
# * He sees the value in a1 disappear
self.wait_for_cell_value(1, 1, '')
self.wait_for_cell_value(1, 2, 'a2')
self.wait_for_cell_value(1, 3, 'a3')
# * He sees his little investigations also produce the expected results
self.wait_for_cell_value(2, 1, '<undefined>')
self.wait_for_cell_value(2, 2, 'None')
self.wait_for_cell_value(2, 3, '')
self.wait_for_cell_value(2, 4, 'None')
def test_can_clear_cell_range_from_usercode(self):
# * Harold logs in and creates a new sheet
self.login_and_create_new_sheet()
# * He enters some data in A1:A3
self.enter_cell_text(1, 1, 'a1')
self.enter_cell_text(1, 2, 'a2')
self.enter_cell_text(1, 3, 'a3')
self.wait_for_cell_value(1, 3, 'a3')
# * He tries to use the clear() function from usercode on a cell range
self.prepend_usercode(dedent('''
worksheet.a1.error = 'harold puts a deliberate pointless error in'
worksheet.a2.error = 'harold puts another deliberate pointless error in'
worksheet.cell_range("a1:a2").clear()
worksheet.b1.formula = str(worksheet.a1.value)
worksheet.b2.formula = str(worksheet.a1.formula)
worksheet.b3.formula = str(worksheet.a1.formatted_value)
worksheet.b4.formula = str(worksheet.a1.error)
worksheet.c1.formula = str(worksheet.a2.value)
worksheet.c2.formula = str(worksheet.a2.formula)
worksheet.c3.formula = str(worksheet.a2.formatted_value)
worksheet.c4.formula = str(worksheet.a2.error)
'''))
# * He sees the value in a1 and a2 disappear
self.wait_for_cell_value(1, 1, '')
self.wait_for_cell_value(1, 2, '')
self.wait_for_cell_value(1, 3, 'a3')
# * He sees his little investigations also produce the expected results
self.wait_for_cell_value(2, 1, '<undefined>')
self.wait_for_cell_value(2, 2, 'None')
self.wait_for_cell_value(2, 3, '')
self.wait_for_cell_value(2, 4, 'None')
self.wait_for_cell_value(3, 1, '<undefined>')
self.wait_for_cell_value(3, 2, 'None')
self.wait_for_cell_value(3, 3, '')
self.wait_for_cell_value(3, 4, 'None')
|
libcity/data/dataset/cstn_dataset.py | moghadas76/test_bigcity | 221 | 12762247 | <reponame>moghadas76/test_bigcity
import os
import numpy as np
from libcity.data.dataset import TrafficStateGridOdDataset
from libcity.data.utils import generate_dataloader
from libcity.utils import ensure_dir
class CSTNDataset(TrafficStateGridOdDataset):
def __init__(self, config):
super().__init__(config)
self.feature_name = {'X': 'float', 'W': 'float', 'y': 'float'}
def _generate_ext_data(self, ext_data):
num_samples = ext_data.shape[0]
offsets = np.sort(np.concatenate((np.arange(-self.input_window - self.output_window + 1, 1, 1),)))
min_t = abs(min(offsets))
max_t = abs(num_samples - abs(max(offsets)))
W = []
for t in range(min_t, max_t):
W_t = ext_data[t + offsets, ...]
W.append(W_t)
W = np.stack(W, axis=0)
return W
def _generate_data(self):
"""
加载数据文件(.gridod)和外部数据(.ext),以X, W, y的形式返回
Returns:
tuple: tuple contains:
X(np.ndarray): 模型输入数据,(num_samples, input_length, ..., feature_dim) \n
W(np.ndarray): 模型外部数据,(num_samples, input_length, ext_dim)
y(np.ndarray): 模型输出数据,(num_samples, output_length, ..., feature_dim)
"""
# 处理多数据文件问题
if isinstance(self.data_files, list):
data_files = self.data_files.copy()
else:
data_files = [self.data_files].copy()
# 加载外部数据
ext_data = self._load_ext() # (len_time, ext_dim)
W = self._generate_ext_data(ext_data)
# 加载基本特征数据
X_list, y_list = [], []
for filename in data_files:
df = self._load_dyna(filename) # (len_time, ..., feature_dim)
X, y = self._generate_input_data(df)
# x: (num_samples, input_length, input_dim)
# y: (num_samples, output_length, ..., output_dim)
X_list.append(X)
y_list.append(y)
X = np.concatenate(X_list)
y = np.concatenate(y_list)
df = self._load_dyna(data_files[0]).squeeze()
self._logger.info("Dataset created")
self._logger.info("X shape: {}, W shape: {}, y shape: ".format(str(X.shape), str(W.shape), y.shape))
return X, W, y
def _split_train_val_test(self, X, W, y):
test_rate = 1 - self.train_rate - self.eval_rate
num_samples = X.shape[0]
num_test = round(num_samples * test_rate)
num_train = round(num_samples * self.train_rate)
num_eval = num_samples - num_test - num_train
# train
x_train, w_train, y_train = X[:num_train], W[:num_train], y[:num_train]
# eval
x_eval, w_eval, y_eval = X[num_train: num_train + num_eval], \
W[num_train: num_train + num_eval], y[num_train: num_train + num_eval]
# test
x_test, w_test, y_test = X[-num_test:], W[-num_test:], y[-num_test:]
# log
self._logger.info(
"train\tX: {}, W: {}, y: {}".format(str(x_train.shape), str(w_train.shape), str(y_train.shape)))
self._logger.info("eval\tX: {}, W: {}, y: {}".format(str(x_eval.shape), str(w_eval.shape), str(y_eval.shape)))
self._logger.info("test\tX: {}, W: {}, y: {}".format(str(x_test.shape), str(w_test.shape), str(y_test.shape)))
return x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test
def _load_cache_train_val_test(self):
self._logger.info('Loading ' + self.cache_file_name)
cat_data = np.load(self.cache_file_name)
x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test = \
cat_data['x_train'], cat_data['w_train'], cat_data['y_train'], cat_data['x_eval'], cat_data['w_eval'], \
cat_data['y_eval'], cat_data['x_test'], cat_data['w_test'], cat_data['y_test']
self._logger.info(
"train\tX: {}, W: {}, y: {}".format(str(x_train.shape), str(w_train.shape), str(y_train.shape)))
self._logger.info("eval\tX: {}, W: {}, y: {}".format(str(x_eval.shape), str(w_eval.shape), str(y_eval.shape)))
self._logger.info("test\tX: {}, W: {}, y: {}".format(str(x_test.shape), str(w_test.shape), str(y_test.shape)))
return x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test
def _generate_train_val_test(self):
X, W, y = self._generate_data()
x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test = self._split_train_val_test(X, W, y)
if self.cache_dataset:
ensure_dir(self.cache_file_folder)
np.savez_compressed(
self.cache_file_name,
x_train=x_train,
w_train=w_train,
y_train=y_train,
x_test=x_test,
w_test=w_test,
y_test=y_test,
x_eval=x_eval,
w_eval=w_eval,
y_eval=y_eval,
)
self._logger.info('Saved at ' + self.cache_file_name)
return x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test
def get_data(self):
# 加载数据集
x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test = [], [], [], [], [], [], [], [], []
if self.data is None:
if self.cache_dataset and os.path.exists(self.cache_file_name):
x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test = self._load_cache_train_val_test()
else:
x_train, w_train, y_train, x_eval, w_eval, y_eval, x_test, w_test, y_test = self._generate_train_val_test()
# 数据归一化
self.feature_dim = x_train.shape[-1]
self.ext_dim = w_train.shape[-1]
self.scaler = self._get_scalar(self.scaler_type, x_train, y_train)
x_train[..., :self.output_dim] = self.scaler.transform(x_train[..., :self.output_dim])
w_train[..., :self.output_dim] = self.scaler.transform(w_train[..., :self.output_dim])
y_train[..., :self.output_dim] = self.scaler.transform(y_train[..., :self.output_dim])
x_eval[..., :self.output_dim] = self.scaler.transform(x_eval[..., :self.output_dim])
w_eval[..., :self.output_dim] = self.scaler.transform(w_eval[..., :self.output_dim])
y_eval[..., :self.output_dim] = self.scaler.transform(y_eval[..., :self.output_dim])
x_test[..., :self.output_dim] = self.scaler.transform(x_test[..., :self.output_dim])
w_test[..., :self.output_dim] = self.scaler.transform(w_test[..., :self.output_dim])
y_test[..., :self.output_dim] = self.scaler.transform(y_test[..., :self.output_dim])
train_data = list(zip(x_train, w_train, y_train))
eval_data = list(zip(x_eval, w_eval, y_eval))
test_data = list(zip(x_test, w_test, y_test))
# 转Dataloader
self.train_dataloader, self.eval_dataloader, self.test_dataloader = \
generate_dataloader(train_data, eval_data, test_data, self.feature_name,
self.batch_size, self.num_workers, pad_with_last_sample=self.pad_with_last_sample)
self.num_batches = len(self.train_dataloader)
return self.train_dataloader, self.eval_dataloader, self.test_dataloader
def get_data_feature(self):
"""
返回数据集特征,scaler是归一化方法,adj_mx是邻接矩阵,num_nodes是网格的个数,
len_row是网格的行数,len_column是网格的列数,
feature_dim是输入数据的维度,output_dim是模型输出的维度
Returns:
dict: 包含数据集的相关特征的字典
"""
return {"scaler": self.scaler,
"num_nodes": self.num_nodes, "feature_dim": self.feature_dim, "ext_dim": self.ext_dim,
"output_dim": self.output_dim, "len_row": self.len_row, "len_column": self.len_column,
"num_batches": self.num_batches}
|
tests/test_main.py | greggles/cutadapt | 375 | 12762251 | import pytest
from cutadapt.__main__ import main, parse_cutoffs, parse_lengths, CommandLineError, setup_logging
def test_help():
with pytest.raises(SystemExit) as e:
main(["--help"])
assert e.value.args[0] == 0
def test_parse_cutoffs():
assert parse_cutoffs("5") == (0, 5)
assert parse_cutoffs("6,7") == (6, 7)
with pytest.raises(CommandLineError):
parse_cutoffs("a,7")
with pytest.raises(CommandLineError):
parse_cutoffs("a")
with pytest.raises(CommandLineError):
parse_cutoffs("a,7")
with pytest.raises(CommandLineError):
parse_cutoffs("1,2,3")
def test_parse_lengths():
assert parse_lengths("25") == (25, )
assert parse_lengths("17:25") == (17, 25)
assert parse_lengths("25:") == (25, None)
assert parse_lengths(":25") == (None, 25)
with pytest.raises(CommandLineError):
parse_lengths("1:2:3")
with pytest.raises(CommandLineError):
parse_lengths("a:2")
with pytest.raises(CommandLineError):
parse_lengths("a")
with pytest.raises(CommandLineError):
parse_lengths("2:a")
with pytest.raises(CommandLineError):
parse_lengths(":")
def test_setup_logging():
import logging
logger = logging.getLogger(__name__)
setup_logging(logger, log_to_stderr=False, quiet=False, minimal=False, debug=False)
logger.info("Log message")
setup_logging(logger, log_to_stderr=False, debug=1)
setup_logging(logger, log_to_stderr=False, quiet=True)
setup_logging(logger, log_to_stderr=False, minimal=True)
|
plugin.video.mrknowtv/resources/lib/sources/pierwsza.py | mrknow/filmkodi | 105 | 12762266 | # -*- coding: utf-8 -*-
'''
Mrknow TV Add-on
Copyright (C) 2016 mrknow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urlparse,base64,urllib
import re, time, datetime
import json
from resources.lib.lib import control
from resources.lib.lib import client
from resources.lib.lib import stale
def get(url, params={}):
try:
params['api_id'] = stale.pierwszatv_apiid
params['checksum'] = stale.pierwszatv_checksum
url = urlparse.urljoin('http://pierwsza.tv', url)
url = url + '?' + urllib.urlencode(params)
headers = {'Content-Type': 'application/json'}
result = client.request(url, headers=headers, output='response', error=True)
if not (result[0] == '401' or result[0] == '405'): return result[1]
result = client.request(url, headers=headers)
#control.log('ZZZZZZZZ PIerwsza result: %s' % result)
return result
except:
pass
def getstream(id):
try:
control.set_setting('pierwszatv.tokenExpireIn', '')
control.set_setting('pierwszatv.serverId', '')
control.set_setting('pierwszatv.streamId', '')
control.set_setting('pierwszatv.token', '')
if getPierwszaCredentialsInfo() == False:
if control.yesnoDialog(control.lang(40003).encode('utf-8'), control.lang(30481).encode('utf-8'), '', 'Trakt', control.lang(30483).encode('utf-8'), control.lang(30482).encode('utf-8')):
control.set_setting('pierwszatv.user', '')
control.set_setting('pierwszatv.password', '')
control.openSettings('1.4')
raise Exception()
url = '/api/stream/create'
params = {}
params['id'] =id
params['user'] =control.setting('pierwszatv.user').strip()
params['password'] = urllib.quote_plus(control.setting('pierwszatv.password'))
result = get(url, params)
control.log('x1x1x1: %s' % result)
result = json.loads(result)
if result['status'] == 'ok':
#time.sleep(1)
expirein = int(int(result['tokenExpireIn'])*0.75)
expirewhen = datetime.datetime.now() + datetime.timedelta(seconds=expirein)
control.set_setting('pierwszatv.tokenExpireIn', str(int(time.mktime(expirewhen.timetuple()))))
control.set_setting('pierwszatv.serverId', result['serverId'])
control.set_setting('pierwszatv.streamId', result['streamId'])
control.set_setting('pierwszatv.token', result['token'])
for i in range(0, 5):
try:
r = get('/api/stream/status', {'serverId': result['serverId'] , 'streamId': result['streamId'], 'token': result['token']})
r = json.loads(r)
if r['status'] == 'ok':
#control.infoDialog(control.lang(30489).encode('utf-8'), time=6000)
for j in range(0, 20):
time.sleep(1)
control.infoDialog(control.lang(30489).encode('utf-8'), time=500)
try:
result2 = client.request(r['source']+'?token='+result['token'],safe=True, timeout='2')
control.log('Pierwsza link check nr: %s: result:%s' % (j,result2))
if result2 == None: raise Exception()
else: return r['source']+'?token='+result['token']
except:
pass
return r['source']+'?token='+result['token']
time.sleep(3)
except:
pass
if result['status'] == 'error':
control.infoDialog('%s' % result['message'].encode('utf-8'))
control.dialog.ok(control.addonInfo('name'), result['message'].encode('utf-8'), '')
return None
except Exception as e:
control.log('Error pierwsza.getstream %s' % e )
def getPierwszaCredentialsInfo():
user = control.setting('pierwszatv.user').strip()
password = control.setting('pierwszatv.password')
if (user == '' or password == ''): return False
return True
def streamrefresh():
try:
#mynow = int(datetime.datetime.now().strftime('%s'))
mynow = int(str(int(time.mktime(datetime.datetime.now().timetuple()))))
expired = int(control.get_setting('pierwszatv.tokenExpireIn'))
#control.log('XXXX Exp:%s Now:%s' % (expired, mynow))
if mynow>expired:
control.log('Pierwsza refresh')
url = '/api/stream/refresh'
params = {}
params['serverId'] =control.get_setting('pierwszatv.serverId')
params['streamId'] =control.get_setting('pierwszatv.streamId')
params['token'] = control.get_setting('pierwszatv.token')
result = get(url, params)
result = json.loads(result)
expirein = int(int(result['tokenExpireIn'])*0.75)
expirewhen = datetime.datetime.now() + datetime.timedelta(seconds=expirein)
control.set_setting('pierwszatv.tokenExpireIn', str(int(time.mktime(expirewhen.timetuple()))))
except Exception as e:
control.log('Error pierwsza.refresh %s' % e )
raise Exception()
def chanels():
items = []
try:
result = get('/api/channels')
result = json.loads(result)
for i in result['channels']:
try:
items.append(i)
except:
pass
if len(items) == 0:
items = result
except:
control.log('Error pierwsza.chanels' )
pass
return items
|
study/vowel_summary.py | Kshitiz-Bansal/wavetorch | 470 | 12762275 | """Generate a summary of a previously trained vowel recognition model.
"""
import torch
import wavetorch
import argparse
import yaml
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
try:
from helpers.plot import mpl_set_latex
mpl_set_latex()
except ImportError:
import warnings
warnings.warn('The helpers package is unavailable', ImportWarning)
COL_TRAIN = "#1f77b4"
COL_TEST = "#2ca02c"
parser = argparse.ArgumentParser()
parser.add_argument('filename', type=str)
parser.add_argument('--vmin', type=float, default=1e-3)
parser.add_argument('--vmax', type=float, default=1.0)
parser.add_argument('--fig', type=str, default=None)
parser.add_argument('--title_off', action='store_true')
parser.add_argument('--labels', action='store_true')
parser.add_argument('--vowel_samples', nargs='+', type=int, default=None)
if __name__ == '__main__':
args = parser.parse_args()
model, history, history_state, cfg = wavetorch.io.load_model(args.filename)
try:
if cfg['seed'] is not None:
torch.manual_seed(cfg['seed'])
except:
pass
print("Configuration for model in %s is:" % args.filename)
print(yaml.dump(cfg, default_flow_style=False))
sr = cfg['data']['sr']
gender = cfg['data']['gender']
vowels = cfg['data']['vowels']
N_classes = len(vowels)
fig = plt.figure( figsize=(7, 4.75), constrained_layout=True)
gs = fig.add_gridspec(1, 2, width_ratios=[1, 0.4])
gs_left = gs[0].subgridspec(3, 2)
gs_right = gs[1].subgridspec(N_classes+1, 1, height_ratios=[1 for i in range(0,N_classes)] + [0.05])
gs_bot = gs_left[2,:].subgridspec(1, 2)
ax_cm_train0 = fig.add_subplot(gs_left[0,0])
ax_cm_test0 = fig.add_subplot(gs_left[0,1])
ax_cm_train1 = fig.add_subplot(gs_left[1,0])
ax_cm_test1 = fig.add_subplot(gs_left[1,1])
ax_loss = fig.add_subplot(gs_bot[0])
ax_acc = fig.add_subplot(gs_bot[1])
ax_fields = [fig.add_subplot(gs_right[i]) for i in range(0, N_classes+1)]
history_mean = history.groupby('epoch').mean()
history_std = history.groupby('epoch').std()
epochs = history_mean.index.values
ax_loss.fill_between(epochs,
history_mean['loss_train'].values-history_std['loss_train'].values,
history_mean['loss_train'].values+history_std['loss_train'].values, color=COL_TRAIN, alpha=0.15)
ax_loss.plot(epochs, history_mean['loss_train'].values, "-", label="Training dataset", ms=4, color=COL_TRAIN)
ax_loss.fill_between(epochs,
history_mean['loss_test'].values-history_std['loss_test'].values,
history_mean['loss_test'].values+history_std['loss_test'].values, color=COL_TEST, alpha=0.15)
ax_loss.plot(epochs, history_mean['loss_test'].values, "-", label="Testing dataset", ms=4, color=COL_TEST)
ax_loss.set_ylabel('Loss')
ax_loss.set_xlabel('Training epoch \#')
ax_acc.plot(epochs, history_mean['acc_train'].values*100, "-", label="Training dataset", ms=4, color=COL_TRAIN)
ax_acc.fill_between(epochs,
history_mean['acc_train'].values*100-history_std['acc_train'].values*100,
history_mean['acc_train'].values*100+history_std['acc_train'].values*100, color=COL_TRAIN, alpha=0.15)
ax_acc.plot(epochs, history_mean['acc_test'].values*100, "-", label="Testing dataset", ms=4, color=COL_TEST)
ax_acc.fill_between(epochs,
history_mean['acc_test'].values*100-history_std['acc_test'].values*100,
history_mean['acc_test'].values*100+history_std['acc_test'].values*100, color=COL_TEST, alpha=0.15)
ax_acc.set_xlabel('Training epoch \#')
ax_acc.set_ylabel('Accuracy')
ax_acc.yaxis.set_major_locator(mpl.ticker.MultipleLocator(base=10))
# ax_acc.set_ylim([20,100])
ax_loss.yaxis.set_major_locator(mpl.ticker.MultipleLocator(base=0.1))
# ax_loss.set_ylim([0.7,1.2])
ax_acc.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%.0f\%%'))
ax_loss.legend(fontsize='small')
# ax_acc.annotate("%.1f%% training set accuracy" % (history_mean['acc_train'].tail(1).iloc[0]*100), xy=(0.1,0.1), xytext=(0,10), textcoords="offset points", xycoords="axes fraction", ha="left", va="bottom", color=COL_TRAIN)
# ax_acc.annotate("%.1f%% testing set accuracy" % (history_mean['acc_test'].tail(1).iloc[0]*100), xy=(0.1,0.1), xycoords="axes fraction", ha="left", va="bottom", color=COL_TEST)
ax_acc.annotate('%.1f\%%' % (history_mean['acc_train'].tail(1).iloc[0]*100),
xy=(epochs[-1], history_mean['acc_train'].tail(1).iloc[0]*100), xycoords='data',
xytext=(-1, 5), textcoords='offset points', ha='left', va='center', fontsize='small',
color=COL_TRAIN, bbox=wavetorch.plot.bbox_white)
ax_acc.annotate('%.1f\%%' % (history_mean['acc_test'].tail(1).iloc[0]*100),
xy=(epochs[-1], history_mean['acc_test'].tail(1).iloc[0]*100), xycoords='data',
xytext=(-1, -5), textcoords='offset points', ha='left', va='center', fontsize='small',
color=COL_TEST, bbox=wavetorch.plot.bbox_white)
print('Accuracy (train): %.1f%% +/- %.1f%%' % (history_mean['acc_train'].tail(1).iloc[0]*100, history_std['acc_train'].tail(1).iloc[0]*100))
print('Accuracy (test): %.1f%% +/- %.1f%%' % (history_mean['acc_test'].tail(1).iloc[0]*100, history_std['acc_test'].tail(1).iloc[0]*100))
cm_train = history.groupby('epoch')['cm_train'].apply(np.mean).head(1).iloc[0]
cm_test = history.groupby('epoch')['cm_test'].apply(np.mean).head(1).iloc[0]
wavetorch.plot.confusion_matrix(cm_train, title="Training dataset", normalize=True, ax=ax_cm_train0, labels=vowels)
wavetorch.plot.confusion_matrix(cm_test, title="Testing dataset", normalize=True, ax=ax_cm_test0, labels=vowels)
cm_train = history.groupby('epoch')['cm_train'].apply(np.mean).tail(1).iloc[0]
cm_test = history.groupby('epoch')['cm_test'].apply(np.mean).tail(1).iloc[0]
wavetorch.plot.confusion_matrix(cm_train, title="Training dataset", normalize=True, ax=ax_cm_train1, labels=vowels)
wavetorch.plot.confusion_matrix(cm_test, title="Testing dataset", normalize=True, ax=ax_cm_test1, labels=vowels)
X, Y, F = wavetorch.data.load_all_vowels(vowels, gender='both', sr=sr, random_state=0)
# model.load_state_dict(history_state[cfg['training']['N_epochs']])
for i in range(N_classes):
xb, yb = wavetorch.data.select_vowel_sample(X, Y, F, i, ind=args.vowel_samples[i] if args.vowel_samples is not None else None)
with torch.no_grad():
field_dist = model(xb, output_fields=True)
wavetorch.plot.total_field(model, field_dist, yb, ax=ax_fields[yb.argmax().item()], cbar=True, cax=ax_fields[-1], vmin=args.vmin, vmax=args.vmax)
if args.labels:
try:
from helpers.plot import apply_panel_labels
apply_panel_labels([ax_cm_train0, ax_cm_test0, ax_cm_train1, ax_cm_test1, ax_loss, ax_acc] + ax_fields[0:-1],
xy=[(-35,0), (-35,0), (-35,0), (-35,0), (-25,0), (-40,0), (8,-6), (8,-6), (8,-6)],
color=['k', 'k', 'k', 'k', 'k', 'k', 'w', 'w', 'w'],
case='upper')
except ImportError:
import warnings
warnings.warn('The helpers package is unavailable', ImportWarning)
plt.show()
if args.fig is not None:
fig.savefig(args.fig, dpi=300)
else:
fig.savefig(os.path.splitext(args.filename)[0]+"_summary.png", dpi=300)
|
nni/common/nas_utils.py | dutxubo/nni | 9,680 | 12762292 | <gh_stars>1000+
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import functools
import logging
from .. import trial
_logger = logging.getLogger(__name__)
_MUTABLE_LAYER_SPACE_PREFIX = "_mutable_layer"
_namespace = {}
_tf_variables = {}
_arch_logits_list = []
_optimizer = None
_train_op = None
def classic_mode(
mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size):
'''Execute the chosen function and inputs directly.
In this mode, the trial code is only running the chosen subgraph (i.e., the chosen ops and inputs),
without touching the full model graph.'''
if trial.get_current_parameter() is None:
trial.get_next_parameter()
chosen_layer, chosen_inputs = _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id,
list(optional_inputs.keys()))
real_chosen_inputs = [optional_inputs[input_name] for input_name in chosen_inputs]
layer_out = funcs[chosen_layer]([fixed_inputs, real_chosen_inputs], **funcs_args[chosen_layer])
return layer_out
def enas_mode(
mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf):
'''For enas mode, we build the full model graph in trial but only run a subgraph。
This is implemented by masking inputs and branching ops.
Specifically, based on the received subgraph (through nni.get_next_parameter),
it can be known which inputs should be masked and which op should be executed.'''
name_prefix = "{}_{}".format(mutable_id, mutable_layer_id)
# store namespace
_namespace[mutable_id] = True
_namespace[name_prefix] = dict()
_namespace[name_prefix]['funcs'] = list(funcs)
_namespace[name_prefix]['optional_inputs'] = list(optional_inputs)
# create tensorflow variables as 1/0 signals used to form subgraph
name_for_optional_inputs = name_prefix + '_optional_inputs'
name_for_funcs = name_prefix + '_funcs'
_tf_variables[name_prefix] = dict()
_tf_variables[name_prefix]['optional_inputs'] = tf.get_variable(
name_for_optional_inputs,
[len(optional_inputs)],
dtype=tf.bool,
trainable=False
)
_tf_variables[name_prefix]['funcs'] = tf.get_variable(
name_for_funcs, [], dtype=tf.int64, trainable=False)
# get real values using their variable names
real_optional_inputs_value = [optional_inputs[name]
for name in _namespace[name_prefix]['optional_inputs']]
real_func_value = [funcs[name]
for name in _namespace[name_prefix]['funcs']]
real_funcs_args = [funcs_args[name]
for name in _namespace[name_prefix]['funcs']]
# build tensorflow graph of geting chosen inputs by masking
real_chosen_inputs = tf.boolean_mask(
real_optional_inputs_value, _tf_variables[name_prefix]['optional_inputs'])
# build tensorflow graph of different branches by using tf.case
branches = dict()
func_output = None
for func_id in range(len(funcs)):
func_output = real_func_value[func_id]([fixed_inputs, real_chosen_inputs], **real_funcs_args[func_id])
branches[tf.equal(_tf_variables[name_prefix]['funcs'], func_id)] = lambda: func_output
layer_out = tf.case(branches, exclusive=True, default=lambda: func_output)
return layer_out
def oneshot_mode(
mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf):
'''Similar to enas mode, oneshot mode also builds the full model graph.
The difference is that oneshot mode does not receive subgraph.
Instead, it uses dropout to randomly dropout inputs and ops.'''
# NNI requires to get_next_parameter before report a result. But the parameter will not be used in this mode
if trial.get_current_parameter() is None:
trial.get_next_parameter()
optional_inputs = list(optional_inputs.values())
inputs_num = len(optional_inputs)
# Calculate dropout rate according to the formular r^(1/k), where r is a hyper-parameter and k is the number of inputs
if inputs_num > 0:
rate = 0.01 ** (1 / inputs_num)
noise_shape = [inputs_num] + [1] * len(optional_inputs[0].get_shape())
optional_inputs = tf.nn.dropout(
optional_inputs, rate=rate, noise_shape=noise_shape)
optional_inputs = [optional_inputs[idx] for idx in range(inputs_num)]
layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name])
for func_name, func in funcs.items()]
output_num = len(layer_outs)
rate = 0.01 ** (1 / output_num)
noise_shape = [output_num] + [1] * len(layer_outs[0].get_shape())
layer_outs = tf.nn.dropout(layer_outs, rate=rate, noise_shape=noise_shape)
layer_out = tf.reduce_sum(layer_outs, axis=0)
return layer_out
def darts_mode(
mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf):
optional_inputs = list(optional_inputs.values())
layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name])
for func_name, func in funcs.items()]
# Create architecture weights for every func(op)
var_name = "{}_{}_arch_weights".format(mutable_id, mutable_layer_id)
arch_logits = tf.get_variable(var_name, shape=[len(funcs)], trainable=False)
_arch_logits_list.append(arch_logits)
arch_weights = tf.nn.softmax(arch_logits)
layer_out = tf.add_n([arch_weights[idx] * out for idx, out in enumerate(layer_outs)])
return layer_out
def reload_tensorflow_variables(tf, session):
'''In Enas mode, this function reload every signal varaible created in `enas_mode` function so
the whole tensorflow graph will be changed into certain subgraph recerived from Tuner.
---------------
session: the tensorflow session created by users
tf: tensorflow module
'''
subgraph_from_tuner = trial.get_next_parameter()
mutable_layers = set()
for subgraph_key in subgraph_from_tuner:
if "/" in subgraph_key:
# has to remove the last, could be layer_choice or whatever
mutable_id, mutable_layer_id = _decompose_general_key(subgraph_key[:subgraph_key.rfind("/")])
if mutable_id is not None:
mutable_layers.add((mutable_id, mutable_layer_id))
mutable_layers = sorted(list(mutable_layers))
for mutable_id, mutable_layer_id in mutable_layers:
if mutable_id not in _namespace:
_logger.warning("%s not found in name space", mutable_id)
continue
name_prefix = "{}_{}".format(mutable_id, mutable_layer_id)
# get optional inputs names
optional_inputs = _namespace[name_prefix]['optional_inputs']
# extract layer information from the subgraph sampled by tuner
chosen_layer, chosen_inputs = _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inputs)
chosen_layer = _namespace[name_prefix]['funcs'].index(chosen_layer)
chosen_inputs = [1 if inp in chosen_inputs else 0 for inp in optional_inputs]
# load these information into pre-defined tensorflow variables
_tf_variables[name_prefix]['funcs'].load(chosen_layer, session)
_tf_variables[name_prefix]['optional_inputs'].load(
chosen_inputs, session)
def _construct_general_key(mutable_id, mutable_layer_id):
# Mutable layer key in a general (search space) format
# that is, prefix/mutable_id/mutable_layer_id
return _MUTABLE_LAYER_SPACE_PREFIX + "/" + mutable_id + "/" + mutable_layer_id
def _decompose_general_key(key):
# inverse operation of above
if not key.startswith(_MUTABLE_LAYER_SPACE_PREFIX):
return None, None
else:
_, mutable_id, mutable_layer_id = key.split("/", maxsplit=2)
return mutable_id, mutable_layer_id
def darts_training(tf, session, loss, feed_dict):
global _optimizer, _train_op
if _optimizer is None:
_optimizer = tf.MomentumOptimizer(learning_rate=0.025)
# TODO: Calculate loss
grads_and_vars = _optimizer.compute_gradients(loss, _arch_logits_list)
_train_op = _optimizer.apply_gradients(grads_and_vars)
session.run(_train_op)
def training_update(nas_mode, tf=None, session=None, loss=None, feed_dict=None):
if nas_mode == 'darts_mode':
darts_training(tf, session, loss, feed_dict)
elif nas_mode == 'enas_mode':
reload_tensorflow_variables(tf, session)
def _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inputs):
# optional_inputs should be name(key)s of the optional inputs
try:
mutable_block = trial.get_current_parameter(mutable_id)
# There is a NAS tuner
chosen_layer = mutable_block[mutable_layer_id]["chosen_layer"]
chosen_inputs = mutable_block[mutable_layer_id]["chosen_inputs"]
except KeyError:
# Try to find converted NAS parameters
params = trial.get_current_parameter()
expected_prefix = _construct_general_key(mutable_id, mutable_layer_id)
chosen_layer = params[expected_prefix + "/layer_choice"]
# find how many to choose
optional_input_size = int(params[expected_prefix + "/optional_input_size"]) # convert uniform to randint
# find who to choose, can duplicate
optional_input_state = params[expected_prefix + "/optional_input_chosen_state"]
chosen_inputs = []
# make sure dict -> list produce stable result by sorting
optional_inputs_keys = sorted(optional_inputs)
for _ in range(optional_input_size):
chosen_inputs.append(optional_inputs_keys[optional_input_state % len(optional_inputs)])
optional_input_state //= len(optional_inputs)
_logger.info("%s_%s: layer: %s, optional inputs: %s", mutable_id, mutable_layer_id, chosen_layer, chosen_inputs)
return chosen_layer, chosen_inputs
def convert_nas_search_space(search_space):
"""
Args:
param search_space: raw search space
return: the new search space, mutable_layers will be converted into choice
"""
if not isinstance(search_space, dict):
return search_space
ret = dict()
for k, v in search_space.items():
if "_type" not in v:
# this should not happen
_logger.warning("There is no _type in one of your search space values with key '%s'"
". Please check your search space", k)
ret[k] = v
elif v["_type"] != "mutable_layer":
ret[k] = v
else:
_logger.info("Converting mutable_layer search space with key '%s'", k)
# v["_value"] looks like {'mutable_layer_1': {'layer_choice': ...} ...}
values = v["_value"]
for layer_name, layer_data in values.items():
# there should be at most layer_choice, optional_inputs, optional_input_size in layer_data
# add "_mutable_layer" as prefix so that they can be recovered later
layer_key = _construct_general_key(k, layer_name)
if layer_data.get("layer_choice"): # filter out empty choice and no choice
layer_choice = layer_data["layer_choice"]
else:
raise ValueError("No layer choice found in %s" % layer_key)
if layer_data.get("optional_input_size"):
input_size = layer_data["optional_input_size"]
if isinstance(input_size, int):
input_size = [input_size, input_size]
if input_size[0] > input_size[1] or input_size[0] < 0:
_logger.error("Might not be able to handle optional_input_size < 0, please double check")
input_size[1] += 1
else:
_logger.info("Optional input choices are set to empty by default in %s", layer_key)
input_size = [0, 1]
if layer_data.get("optional_inputs"):
total_state_size = len(layer_data["optional_inputs"]) ** (input_size[1] - 1)
else:
_logger.info("Optional inputs not found in %s", layer_key)
total_state_size = 1
converted = {
layer_key + "/layer_choice": {
"_type": "choice", "_value": layer_choice
},
layer_key + "/optional_input_size": {
"_type": "randint", "_value": input_size
},
layer_key + "/optional_input_chosen_state": {
"_type": "randint", "_value": [0, total_state_size]
}
}
_logger.info(converted)
ret.update(converted)
return ret
def rewrite_nas_space(func):
@functools.wraps(func)
def wrap(self, search_space):
search_space = convert_nas_search_space(search_space)
return func(self, search_space)
return wrap
|
competitive_programming/programming_contests/interfatecs/1_2018/f.py | LeandroTk/Algorithms | 205 | 12762301 | codigo_set = set()
codido_set_saiu = set()
s = input()
codigos = input().split(' ')
for codigo in codigos:
codigo_set.add(codigo)
i = input()
saidas = input().split(' ')
A = 0
I = 0
R = 0
for saida in saidas:
if saida in codigo_set:
if saida in codido_set_saiu:
R += 1
else:
A += 1
codido_set_saiu.add(saida)
else:
if saida in codido_set_saiu:
R += 1
else:
I += 1
codido_set_saiu.add(saida)
print('%d A' % A)
print('%d I' % I)
print('%d R' % R)
|
fastseq/logging/logging_utils.py | nttcs-ds/fastseq | 346 | 12762305 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Logging related module."""
import os
import logging
from logging import _checkLevel
from fastseq.config import FASTSEQ_DEFAULT_LOG_LEVEL, FASTSEQ_LOG_LEVEL, FASTSEQ_LOG_FORMAT
def set_default_log_level():
"""Set the default log level from the environment variable"""
try:
fastseq_log_level = _checkLevel(FASTSEQ_LOG_LEVEL)
except (ValueError, TypeError) as e:
logging.error(
"Please input a valid value for FASTSEQ_LOG_LEVEL (e.g. "
"'DEBUG', 'INFO'): {}".format(e))
raise
logging.basicConfig(level=fastseq_log_level, format=FASTSEQ_LOG_FORMAT)
def get_logger(name=None, level=logging.INFO):
"""
Return a logger with the specific name, creating it if necessary.
If no name is specified, return the root logger.
Args:
name (str, optional): logger name. Defaults to None.
Returns:
Logger : the specified logger.
"""
level = _checkLevel(level)
if FASTSEQ_LOG_LEVEL != FASTSEQ_DEFAULT_LOG_LEVEL:
try:
level = _checkLevel(FASTSEQ_LOG_LEVEL)
except (ValueError, TypeError) as e:
logging.error(
"Please input a valid value for FASTSEQ_LOG_LEVEL (e.g. "
"'DEBUG', 'INFO'): {}".format(e))
raise
logger = logging.getLogger(name)
logger.setLevel(level)
return logger
def update_all_log_level(level=logging.INFO):
"""
Update all the loggers to use the specified level.
Args:
level (int/str, optional): the log level. Defaults to logging.INFO.
"""
loggers = [
logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
logger.setLevel(level)
|
chap6/bbox_labeling/detection_anno_bbox2voc.py | wang420349864/dlcv_for_beginners | 1,424 | 12762327 | <reponame>wang420349864/dlcv_for_beginners<filename>chap6/bbox_labeling/detection_anno_bbox2voc.py
import os
import sys
import xml.etree.ElementTree as ET
#import xml.dom.minidom as minidom
import cv2
from bbox_labeling import SimpleBBoxLabeling
input_dir = sys.argv[1].rstrip(os.sep)
bbox_filenames = [x for x in os.listdir(input_dir) if x.endswith('.bbox')]
for bbox_filename in bbox_filenames:
bbox_filepath = os.sep.join([input_dir, bbox_filename])
jpg_filepath = bbox_filepath[:-5]
if not os.path.exists(jpg_filepath):
print('Something is wrong with {}!'.format(bbox_filepath))
break
root = ET.Element('annotation')
filename = ET.SubElement(root, 'filename')
jpg_filename = jpg_filepath.split(os.sep)[-1]
filename.text = jpg_filename
img = cv2.imread(jpg_filepath)
h, w, c = img.shape
size = ET.SubElement(root, 'size')
width = ET.SubElement(size, 'width')
width.text = str(w)
height = ET.SubElement(size, 'height')
height.text = str(h)
depth = ET.SubElement(size, 'depth')
depth.text = str(c)
bboxes = SimpleBBoxLabeling.load_bbox(bbox_filepath)
for obj_name, coord in bboxes:
obj = ET.SubElement(root, 'object')
name = ET.SubElement(obj, 'name')
name.text = obj_name
bndbox = ET.SubElement(obj, 'bndbox')
xmin = ET.SubElement(bndbox, 'xmin')
xmax = ET.SubElement(bndbox, 'xmax')
ymin = ET.SubElement(bndbox, 'ymin')
ymax = ET.SubElement(bndbox, 'ymax')
(left, top), (right, bottom) = coord
xmin.text = str(left)
xmax.text = str(right)
ymin.text = str(top)
ymax.text = str(bottom)
xml_filepath = jpg_filepath[:jpg_filepath.rfind('.')] + '.xml'
with open(xml_filepath, 'w') as f:
anno_xmlstr = ET.tostring(root)
# In case a nicely formatted xml is needed
# uncomment the following 2 lines and minidom import
#anno_xml = minidom.parseString(anno_xmlstr)
#anno_xmlstr = anno_xml.toprettyxml()
f.write(anno_xmlstr)
|
pwncat/modules/linux/enumerate/user/__init__.py | Mitul16/pwncat | 1,454 | 12762338 | <reponame>Mitul16/pwncat<filename>pwncat/modules/linux/enumerate/user/__init__.py<gh_stars>1000+
#!/usr/bin/env python3
import pwncat
from pwncat.modules import Status, ModuleFailed
from pwncat.facts.linux import LinuxUser
from pwncat.platform.linux import Linux
from pwncat.modules.enumerate import Schedule, EnumerateModule
class Module(EnumerateModule):
"""Enumerate users from a linux target"""
PROVIDES = ["user"]
PLATFORM = [Linux]
SCHEDULE = Schedule.ONCE
def enumerate(self, session: "pwncat.manager.Session"):
passwd = session.platform.Path("/etc/passwd")
shadow = session.platform.Path("/etc/shadow")
users = {}
try:
with passwd.open("r") as filp:
for user_info in filp:
try:
# Extract the user fields
(
name,
hash,
uid,
gid,
comment,
home,
shell,
) = user_info.split(":")
# Build a user object
user = LinuxUser(
self.name,
name,
hash,
int(uid),
int(gid),
comment,
home,
shell,
)
users[name] = user
yield Status(user)
except Exception:
# Bad passwd line
continue
except (FileNotFoundError, PermissionError) as exc:
raise ModuleFailed(str(exc)) from exc
try:
with shadow.open("r") as filp:
for user_info in filp:
try:
(
name,
hash,
last_change,
min_age,
max_age,
warn_period,
inactive_period,
expir_date,
reserved,
) = user_info.split(":")
if users[name].hash is None:
users[name].hash = hash if hash != "" else None
if users[name].password is None and hash == "":
users[name].password = ""
users[name].last_change = int(last_change)
users[name].min_age = int(min_age)
users[name].max_age = int(max_age)
users[name].warn_period = int(warn_period)
users[name].inactive_period = int(inactive_period)
users[name].expiration = int(expir_date)
users[name].reserved = reserved
except (ValueError, IndexError):
continue
except (FileNotFoundError, PermissionError):
pass
except Exception as exc:
raise ModuleFailed(str(exc)) from exc
# Yield all the known users after attempting to parse /etc/shadow
yield from users.values()
|
examples/providers/factory_init_injections_underlying.py | whysage/python-dependency-injector | 1,997 | 12762345 | <reponame>whysage/python-dependency-injector<gh_stars>1000+
"""`Factory` provider - passing injections to the underlying providers example."""
from dependency_injector import containers, providers
class Regularizer:
def __init__(self, alpha: float) -> None:
self.alpha = alpha
class Loss:
def __init__(self, regularizer: Regularizer) -> None:
self.regularizer = regularizer
class ClassificationTask:
def __init__(self, loss: Loss) -> None:
self.loss = loss
class Algorithm:
def __init__(self, task: ClassificationTask) -> None:
self.task = task
class Container(containers.DeclarativeContainer):
algorithm_factory = providers.Factory(
Algorithm,
task=providers.Factory(
ClassificationTask,
loss=providers.Factory(
Loss,
regularizer=providers.Factory(
Regularizer,
),
),
),
)
if __name__ == '__main__':
container = Container()
algorithm_1 = container.algorithm_factory(
task__loss__regularizer__alpha=0.5,
)
assert algorithm_1.task.loss.regularizer.alpha == 0.5
algorithm_2 = container.algorithm_factory(
task__loss__regularizer__alpha=0.7,
)
assert algorithm_2.task.loss.regularizer.alpha == 0.7
|
ykdl/extractors/yizhibo.py | 592767809/ykdl | 136 | 12762349 | # -*- coding: utf-8 -*-
from ._common import *
class Yizhibo(Extractor):
name = 'Yizhibo (一直播)'
def prepare(self):
info = MediaInfo(self.name)
info.live = True
self.vid = self.url[self.url.rfind('/')+1:].split('.')[0]
data = get_response(
'http://www.yizhibo.com/live/h5api/get_basic_live_info',
params={'scid': self.vid}).json()
assert content['result'] == 1, 'Error : ' + data['result']
data = data['data']
info.title = data['live_title']
info.artist = data['nickname']
info.streams['current'] = {
'container': 'm3u8',
'video_profile': 'current',
'src' : [data['play_url']],
'size': float('inf')
}
return info
site = Yizhibo()
|
nemo/collections/asr/parts/numba/rnnt_loss/utils/global_constants.py | madhukarkm/NeMo | 4,145 | 12762354 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import numpy as np
from numba import float32
# Internal globals
_THREADS_PER_BLOCK = 32
_WARP_SIZE = 32
_DTYPE = float32
# Constants
FP32_INF = np.inf
FP32_NEG_INF = -np.inf
THRESHOLD = 1e-1
"""
Getters
"""
def threads_per_block():
global _THREADS_PER_BLOCK
return _THREADS_PER_BLOCK
def warp_size():
global _WARP_SIZE
return _WARP_SIZE
def dtype():
global _DTYPE
return _DTYPE
# RNNT STATUS
class RNNTStatus(enum.Enum):
RNNT_STATUS_SUCCESS = 0
RNNT_STATUS_INVALID_VALUE = 1
|
tests/roots/test-ext-autodoc/target/wrappedfunction.py | samdoran/sphinx | 4,973 | 12762372 | from contextlib import contextmanager
from functools import lru_cache
from typing import Generator
@lru_cache(maxsize=None)
def slow_function(message, timeout):
"""This function is slow."""
print(message)
@contextmanager
def feeling_good(x: int, y: int) -> Generator:
"""You'll feel better in this context!"""
yield
|
muddery/worldeditor/dao/image_resources_mapper.py | dongwudanci/muddery | 127 | 12762375 | """
Query and deal common tables.
"""
from evennia.utils import logger
from django.apps import apps
from django.conf import settings
class ImageResourcesMapper(object):
"""
Object's image.
"""
def __init__(self):
self.model_name = "image_resources"
self.model = apps.get_model(settings.WORLD_DATA_APP, self.model_name)
self.objects = self.model.objects
def get(self, resource):
"""
Get object's image.
Args:
resource: (string) resource's path.
"""
return self.objects.get(resource=resource)
def add(self, path, type, width, height):
"""
Add a new image record.
Args:
path: image's path
type: image's type
width: image's width
height: image's height
Return:
none
"""
record = {
"resource": path,
"type": type,
"image_width": width,
"image_height": height,
}
data = self.model(**record)
data.full_clean()
data.save()
IMAGE_RESOURCES = ImageResourcesMapper()
|
pywizlight/bulblibrary.py | UH-60/pywizlight | 221 | 12762379 | <reponame>UH-60/pywizlight<filename>pywizlight/bulblibrary.py<gh_stars>100-1000
"""Library with compatible bulb types.
Bulb Type detection:
ESP01_SHDW1C_31
ESP01 -- defines the module family (WiFi only bulb in this case)
SH -- Single Head light (most bulbs are single heads) / LED Strip
TW -- Tunable White - can only control CCT and dimming; no color
DW -- Dimmable White (most filament bulbs)
RGB -- Fullstack bulb
1C -- Specific to the hardware - defines PWM frequency + way of controlling CCT temperature
31 -- Related to the hardware revision
"""
import dataclasses
from enum import Enum
from typing import Optional, List
from pywizlight.exceptions import WizLightNotKnownBulb
@dataclasses.dataclass(frozen=True)
class Features:
"""Defines the supported features."""
color: bool
color_tmp: bool
effect: bool
brightness: bool
# RGB supports effects and tuneable white
RGB_FEATURES = Features(brightness=True, color=True, effect=True, color_tmp=True)
# TODO: TW supports effects but only "some"; improve the mapping to supported effects
TW_FEATURES = Features(brightness=True, color=False, effect=True, color_tmp=True)
# Dimmable white only supports brightness
DW_FEATURES = Features(brightness=True, color=False, effect=False, color_tmp=False)
@dataclasses.dataclass(frozen=True)
class KelvinRange:
"""Defines the kelvin range."""
max: int
min: int
class BulbClass(Enum):
"""Bulb Types."""
"""Have Cool White and Warm White LEDs."""
TW = "Tunable White"
"""Have only Dimmable white LEDs."""
DW = "Dimmable White"
"""Have RGB LEDs."""
RGB = "RGB Bulb"
@dataclasses.dataclass(frozen=True)
class BulbType:
"""BulbType object to define functions and features of the bulb."""
features: Features
name: str
kelvin_range: Optional[KelvinRange]
bulb_type: BulbClass
@staticmethod
def from_data(module_name: str, kelvin_list: Optional[List[float]]) -> "BulbType":
if kelvin_list:
kelvin_range: Optional[KelvinRange] = KelvinRange(
min=int(min(kelvin_list)), max=int(max(kelvin_list))
)
else:
kelvin_range = None
try:
# parse the features from name
_identifier = module_name.split("_")[1]
# Throw exception if index can not be found
except IndexError:
raise WizLightNotKnownBulb("The bulb type can not be determined!")
if "RGB" in _identifier: # full RGB bulb
features = RGB_FEATURES
bulb_type = BulbClass.RGB
elif "TW" in _identifier: # Non RGB but tunable white bulb
features = TW_FEATURES
bulb_type = BulbClass.TW
else: # Plain brightness-only bulb
features = DW_FEATURES
bulb_type = BulbClass.DW
return BulbType(
bulb_type=bulb_type,
name=module_name,
features=features,
kelvin_range=kelvin_range,
)
|
packyou/py2.py | llazzaro/packyou | 217 | 12762413 | <reponame>llazzaro/packyou<filename>packyou/py2.py
# -*- coding: utf-8 -*-
import imp
import ipdb
import logging
from sys import modules, meta_path
from os import mkdir
from os.path import (
isdir,
abspath,
dirname,
exists,
join,
)
import encodings.idna
import requests
from git import Repo
from packyou import find_module_path_in_cloned_repos
from packyou.utils import walklevel, memoize
MODULES_PATH = dirname(abspath(__file__))
LOGGER = logging.getLogger(__name__)
class GithubLoader(object):
"""
Import hook that will allow to import from a github repo.
"""
def __init__(self, repo_url=None, path=None, username=None, repository_name=None):
self.path = path
self.repo_url = repo_url
self.username = username
self.repository_name = repository_name
def check_root(self, fullname):
"""
#Sometimes the code is a python package or similar and there is a directory
#which contains all the code.
This method is used to search first on the root of the cloned repository for the
imported module.
"""
parent, _, module_name = fullname.rpartition('.')
if self.username and self.repository_name:
# REVISAR QUE PASE TODOS LOS PATHS
cloned_root = join(self.path[0], 'github', self.username, self.repository_name)
candidate_path = join(cloned_root, module_name)
if exists(candidate_path):
return candidate_path
for root, dirs, files in walklevel(cloned_root, level=1):
pass
def get_source(self, fullname):
filename = self.get_filename(fullname)
with open(filename, 'r') as source_file:
return source_file.read()
def get_code(self, fullname):
source = self.get_source(fullname)
return compile(source, self.get_filename(fullname), 'exec', dont_inherit=True)
def get_filename(self, fullname):
parent, _, current_module = fullname.rpartition('.')
filename = None
LOGGER.debug('Fullname {0} self.path {1}'.format(fullname, self.path))
for path in self.path:
package_path = join(path, '__init__.py')
if exists(package_path):
filename = package_path
module_path = '{0}.py'.format(join(path, current_module))
if exists(module_path):
filename = module_path
LOGGER.debug('get_filename({0}) is {1}'.format(fullname, filename))
return filename
def is_package(self, fullname):
filename = self.get_filename(fullname)
return not exists(filename) or isdir(filename)
def get_or_create_module(self, fullname):
"""
Given a name and a path it will return a module instance
if found.
When the module could not be found it will raise ImportError
"""
LOGGER.info('Loading module {0}'.format(fullname))
parent, _, module_name = fullname.rpartition('.')
if fullname in modules:
LOGGER.info('Found cache entry for {0}'.format(fullname))
return modules[fullname]
module = modules.setdefault(fullname, imp.new_module(fullname))
if len(fullname.strip('.')) > 3:
absolute_from_root = fullname.split('.', 3)[-1]
modules.setdefault(absolute_from_root, module)
if len(fullname.split('.')) == 4:
# add the root of the project
modules[fullname.split('.')[-1]] = module
# required by PEP 302
module.__file__ = self.get_filename(fullname)
LOGGER.info('Created module {0} with fullname {1}'.format(self.get_filename(fullname), fullname))
module.__name__ = fullname
module.__loader__ = self
module.__path__ = self.path
if self.is_package(fullname):
module.__path__ = self.path
module.__package__ = fullname
else:
module.__package__ = fullname.rpartition('.')[0]
LOGGER.debug('loading file {0}'.format(self.get_filename(fullname)))
source = self.get_source(fullname)
try:
exec(source, module.__dict__)
except Exception as ex:
ipdb.set_trace()
return module
def clone_github_repo(self):
"""
Clones a github repo with a username and repository_name
"""
if not (self.username and self.repository_name):
return
repository_local_destination = join(MODULES_PATH, 'github', self.username, self.repository_name)
if not exists(repository_local_destination):
Repo.clone_from(self.repo_url, repository_local_destination, branch='master')
init_filename = join(repository_local_destination, '__init__.py')
open(init_filename, 'a').close()
@property
def project_fullname(self):
return 'packyou.github.{0}.{1}'.format(self.username, self.repository_name)
def load_module(self, fullname):
"""
Given a name it will load the module from github.
When the project is not locally stored it will clone the
repo from github.
"""
module = None
splitted_names = fullname.split('.')
_, _, module_name = fullname.rpartition('.')
_, remaining = find_module_path_in_cloned_repos(fullname)
if 'github' in splitted_names and not remaining:
self.clone_github_repo()
if len(splitted_names) == 2:
module = self.get_or_create_module(fullname)
if len(splitted_names) == 3:
username_directory = join(MODULES_PATH, 'github', self.username)
if not exists(username_directory):
mkdir(username_directory)
username_init_filename = join(MODULES_PATH, 'github', self.username, '__init__.py')
open(username_init_filename, 'a').close()
module = self.get_or_create_module(fullname)
if len(splitted_names) >= 4:
module = self.get_or_create_module(fullname)
elif self.username and self.repository_name:
# relative import from project root.
fullname = 'packyou.github.{0}.{1}.{2}'.format(self.username, self.repository_name, remaining)
module = self.get_or_create_module(fullname)
if module:
modules[fullname] = module
if remaining is not None:
modules[remaining] = module
return module
class GithubFinder(object):
def __init__(self):
self.username = None
self.repository_name = None
@memoize
def check_repository_available(self, username, repository_name):
"""
Sometimes github has a - in the username or repository name.
The - can't be used in the import statement.
"""
repo_url = 'https://github.com/{0}/{1}.git'.format(username, repository_name)
response = requests.get(repo_url)
if response.status_code == 404:
if '_' in username:
repo_url = 'https://github.com/{0}/{1}.git'.format(username.replace('_', '-'), repository_name)
response = requests.get(repo_url)
if response.status_code == 200:
return repo_url
if '_' in repository_name:
repo_url = 'https://github.com/{0}/{1}.git'.format(username, repository_name.replace('_', '-'))
response = requests.get(repo_url)
if response.status_code == 200:
return repo_url
repo_url = 'https://github.com/{0}/{1}.git'.format(username.replace('_', '-'), repository_name.replace('_', '-'))
response = requests.get(repo_url)
if response.status_code == 200:
return repo_url
raise ImportError('Github repository not found.')
return repo_url
def find_module_in_cloned_repos(self, fullname):
return find_module_in_cloned_repos(fullname, GithubLoader)
def find_module(self, fullname, path=None):
"""
Finds a module and returns a module loader when
the import uses packyou
"""
LOGGER.info('Finding {0}'.format(fullname))
partent, _, module_name = fullname.rpartition('.')
path, _ = find_module_path_in_cloned_repos(fullname)
LOGGER.debug('FOUND PATH {0}'.format(path))
try:
# sometimes the project imported from github does an
# "import x" (absolute import), this translates to import github...x
# we try first to do an import x and cache the module in the sys.path.
# and return None if the imp.find_module was successful.
# This will allow python finders in the meta_path to do the import, and not packyou
# loaders.
if not path:
imp.find_module(module_name)
LOGGER.info('Absolute import: {0}. Original fullname {1}'.format(module_name, fullname))
return None
except ImportError:
LOGGER.debug('imp.find_module could not find {0}. this is ussually fine.'.format(module_name))
if 'packyou.github' in fullname:
fullname_parts = fullname.split('.')
repo_url = None
if len(fullname_parts) >= 3:
self.username = fullname.split('.')[2]
if len(fullname_parts) >= 4:
if not self.repository_name:
LOGGER.debug('FULLNAME -> {0} '.format(fullname))
self.repository_name = fullname.split('.')[3]
repo_url = self.check_repository_available(self.username, self.repository_name)
current_path = dirname(abspath(__file__))
repo_path = join(current_path, 'github', self.username, self.repository_name)
if repo_path not in path:
path.insert(0, repo_path)
LOGGER.info('Found {0} with path {1}'.format(fullname, path))
return GithubLoader(repo_url, path, self.username, self.repository_name)
elif self.username and self.repository_name and path:
LOGGER.info('Fullname {0} does not start with packyou, searching in cloned repos. Result was {1}'.format(fullname, path))
repo_url = self.check_repository_available(self.username, self.repository_name)
return GithubLoader(repo_url, path, self.username, self.repository_name)
LOGGER.info('Not found -> {0}'.format(fullname))
meta_path.append(GithubFinder())
|
pysd/py_backend/vensim/table2py.py | rogersamso/pysd_dev | 240 | 12762415 | import pandas as pd
import warnings
from ...pysd import read_vensim
from io import open
def read_tabular(table_file, sheetname='Sheet1'):
"""
Reads a vensim syntax model which has been formatted as a table.
This is useful in contexts where model building is performed
without the aid of Vensim.
Parameters
----------
table_file: .csv, .tab or .xls(x) file
Table should have columns titled as in the table below
| Variable | Equation | Units | Min | Max | Comment |
| :------- | :------- | :---- | :-- | :-- | :--------------- |
| Age | 5 | Yrs | 0 | inf | How old are you? |
| ... | ... | ... | ... | ... | ... |
sheetname: basestring
if the model is specified in an excel file, what sheet?
Returns
-------
PySD Model Object
Notes
-----
Creates an intermediate file in vensim `.mdl` syntax, just so that
the existing vensim parsing machinery can be used.
"""
if isinstance(table_file, str):
extension = table_file.split('.')[-1]
if extension in ['xls', 'xlsx']:
table = pd.read_excel(table_file, sheetname=sheetname)
elif extension == 'csv':
table = pd.read_csv(table_file, encoding='UTF-8')
elif extension == 'tab':
table = pd.read_csv(table_file, sep='\t', encoding='UTF-8')
else:
raise ValueError('Unknown file or table type')
else:
raise ValueError('Unknown file or table type')
if not set(table.columns).issuperset({'Variable', 'Equation'}):
raise ValueError('Table must contain at least columns "Variable" and "Equation"')
if "Units" not in set(table.columns):
warnings.warn('Column for "Units" not found', RuntimeWarning, stacklevel=2)
table['Units'] = ''
if "Min" not in set(table.columns):
warnings.warn('Column for "Min" not found', RuntimeWarning, stacklevel=2)
table['Min'] = ''
if "Max" not in set(table.columns):
warnings.warn('Column for "Max" not found', RuntimeWarning, stacklevel=2)
table['Max'] = ''
mdl_file = table_file.replace(extension, 'mdl')
with open(mdl_file, 'w', encoding='UTF-8') as outfile:
for element in table.to_dict(orient='records'):
outfile.write(
"%(Variable)s = \n"
"\t %(Equation)s \n"
"\t~\t %(Units)s [%(Min)s, %(Max)s] \n"
"\t~\t %(Comment)s \n\t|\n\n" % element
)
outfile.write(u'\\\---/// Sketch information - this is where sketch stuff would go.')
return read_vensim(mdl_file) |
brambox/boxes/formats.py | thesuperorange/task-conditioned | 331 | 12762416 | #
# Copyright EAVISE
#
from .annotations import annotation_formats
from .detections import detection_formats
__all__ = ['formats', 'annotation_formats', 'detection_formats']
formats = {}
for key in annotation_formats:
formats['anno_'+key] = annotation_formats[key]
for key in detection_formats:
formats['det_'+key] = detection_formats[key]
|
Code Templates/Google.py | cnm06/Competitive-Programming | 994 | 12762418 | f = open('sample-input.txt')
o = open('sample-output.txt', 'w')
t = int(f.readline().strip())
for i in xrange(1, t + 1):
o.write("Case #{}: ".format(i))
n = int(f.readline().strip())
x = [int(j) for j in f.readline().strip().split()]
y = [int(j) for j in f.readline().strip().split()]
o.write("\n")
|
examples/ahrs/python/ukf/__init__.py | rafaelrietmann/ukf | 320 | 12762431 | <reponame>rafaelrietmann/ukf
#Copyright (C) 2013 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
from ctypes import *
# Taken from c/cukf.h
UKF_PRECISION_FLOAT = 0
UKF_PRECISION_DOUBLE = 1
state = None
state_error = None
innovation = None
covariance = None
parameters = None
parameters_error = None
# Internal globals, set during init
_cukf = None
_REAL_T = None
# Internal classes, wrapping cukf structs directly
class _SensorParams(Structure):
pass
class _State(Structure):
def __repr__(self):
fields = {
"attitude": tuple(self.attitude),
"angular_velocity": tuple(self.angular_velocity),
"acceleration": tuple(self.angular_velocity)
}
return str(fields)
class _StateError(Structure):
def __repr__(self):
fields = {
"attitude": tuple(self.attitude),
"angular_velocity": tuple(self.angular_velocity)
}
return str(fields)
class _Innovation(Structure):
def __repr__(self):
fields = {
"accel": tuple(self.accel),
"gyro": tuple(self.gyro),
"mag": tuple(self.mag)
}
return str(fields)
class _Parameters(Structure):
def __repr__(self):
field = {
"accel_bias": tuple(self.accel_bias),
"gyro_bias": tuple(self.gyro_bias),
"mag_bias": tuple(self.mag_bias),
"mag_scale": tuple(self.mag_scale),
"mag_field_norm": tuple(self.mag_field_norm),
"mag_field_inclination": tuple(self.mag_field_inclination)
}
return std(fields)
# Public interface
def iterate(dt):
global _cukf, state, state_error, innovation, parameters, parameters_error
if not _cukf:
raise RuntimeError("Please call ukf.init()")
_cukf.ukf_set_state(state)
_cukf.ukf_iterate(dt)
_cukf.ukf_sensor_clear()
_cukf.ukf_get_state(state)
_cukf.ukf_get_state_error(state_error)
_cukf.ukf_get_innovation(innovation)
_cukf.ukf_get_parameters(parameters)
_cukf.ukf_get_parameters_error(parameters_error)
def set_sensors(accelerometer=None, gyroscope=None, magnetometer=None):
if accelerometer is not None:
_cukf.ukf_sensor_set_accelerometer(*accelerometer)
if gyroscope is not None:
_cukf.ukf_sensor_set_gyroscope(*gyroscope)
if magnetometer is not None:
_cukf.ukf_sensor_set_magnetometer(*magnetometer)
def configure_sensors(accelerometer_covariance=None,
gyroscope_covariance=None, magnetometer_covariance=None):
params = _SensorParams()
if getattr(accelerometer_covariance, '__iter__', False):
params.accel_covariance = accelerometer_covariance
elif accelerometer_covariance is not None:
params.accel_covariance = (accelerometer_covariance, ) * 3
else:
params.accel_covariance = (1.0, 1.0, 1.0)
if getattr(gyroscope_covariance, '__iter__', False):
params.gyro_covariance = gyroscope_covariance
elif gyroscope_covariance is not None:
params.gyro_covariance = (gyroscope_covariance, ) * 3
else:
params.gyro_covariance = (1.0, 1.0, 1.0)
if getattr(magnetometer_covariance, '__iter__', False):
params.mag_covariance = magnetometer_covariance
elif magnetometer_covariance is not None:
params.mag_covariance = (magnetometer_covariance, ) * 3
else:
params.mag_covariance = (1.0, 1.0, 1.0)
_cukf.ukf_set_params(params)
def configure_process_noise(process_noise_covariance):
_cukf.ukf_set_process_noise((_REAL_T * 6)(*process_noise_covariance))
def init():
global _cukf, _REAL_T, state, state_error, innovation, parameters, parameters_error
lib = os.path.join(os.path.dirname(__file__), "libahrs.dylib")
_cukf = cdll.LoadLibrary(lib)
_cukf.ukf_init.argtypes = []
_cukf.ukf_init.restype = None
_cukf.ukf_config_get_precision.argtypes = []
_cukf.ukf_config_get_precision.restype = c_long
_cukf.ukf_config_get_state_dim.argtypes = []
_cukf.ukf_config_get_state_dim.restype = c_long
_cukf.ukf_config_get_measurement_dim.argtypes = []
_cukf.ukf_config_get_measurement_dim.restype = c_long
_PRECISION = _cukf.ukf_config_get_precision()
_REAL_T = c_double if _PRECISION == UKF_PRECISION_DOUBLE else c_float
_STATE_DIM = _cukf.ukf_config_get_state_dim()
_MEASUREMENT_DIM = _cukf.ukf_config_get_measurement_dim()
_SensorParams._fields_ = [
("accel_covariance", _REAL_T * 3),
("gyro_covariance", _REAL_T * 3),
("mag_covariance", _REAL_T * 3)
]
_State._fields_ = [
("attitude", _REAL_T * 4),
("angular_velocity", _REAL_T * 3),
("acceleration", _REAL_T * 3)
]
_StateError._fields_ = [
("attitude", _REAL_T * 3),
("angular_velocity", _REAL_T * 3)
]
_Innovation._fields_ = [
("accel", _REAL_T * 3),
("gyro", _REAL_T * 3),
("mag", _REAL_T * 3)
]
_Parameters._fields_ = [
("accel_bias", _REAL_T * 3),
("gyro_bias", _REAL_T * 3),
("mag_bias", _REAL_T * 3),
("mag_scale", _REAL_T * 3),
("mag_field_norm", _REAL_T),
("mag_field_inclination", _REAL_T),
]
# Set up the function prototypes
_cukf.ukf_set_attitude.argtypes = [_REAL_T, _REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_attitude.restype = None
_cukf.ukf_set_angular_velocity.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_set_angular_velocity.restype = None
_cukf.ukf_get_state.argtypes = [POINTER(_State)]
_cukf.ukf_get_state.restype = None
_cukf.ukf_set_state.argtypes = [POINTER(_State)]
_cukf.ukf_set_state.restype = None
_cukf.ukf_get_state_error.argtypes = [POINTER(_StateError)]
_cukf.ukf_get_state_error.restype = None
_cukf.ukf_get_innovation.argtypes = [POINTER(_Innovation)]
_cukf.ukf_get_innovation.restype = None
_cukf.ukf_get_state_covariance.argtypes = [
POINTER(_REAL_T * (_STATE_DIM**2))]
_cukf.ukf_get_state_covariance.restype = None
_cukf.ukf_sensor_clear.argtypes = []
_cukf.ukf_sensor_clear.restype = None
_cukf.ukf_sensor_set_accelerometer.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_accelerometer.restype = None
_cukf.ukf_sensor_set_gyroscope.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_gyroscope.restype = None
_cukf.ukf_sensor_set_magnetometer.argtypes = [_REAL_T, _REAL_T, _REAL_T]
_cukf.ukf_sensor_set_magnetometer.restype = None
_cukf.ukf_set_params.argtypes = [POINTER(_SensorParams)]
_cukf.ukf_set_params.restype = None
_cukf.ukf_iterate.argtypes = [c_float]
_cukf.ukf_iterate.restype = None
_cukf.ukf_set_process_noise.argtypes = [POINTER(_REAL_T * _STATE_DIM)]
_cukf.ukf_set_process_noise.restype = None
_cukf.ukf_get_parameters.argtypes = [POINTER(_Parameters)]
_cukf.ukf_get_parameters.restype = None
_cukf.ukf_get_parameters_error.argtypes = [POINTER(_Parameters)]
_cukf.ukf_get_parameters_error.restype = None
# Initialize the library
_cukf.ukf_init()
# Set up the state
state = _State()
_cukf.ukf_get_state(state)
# Set up the state errors
state_error = _StateError()
_cukf.ukf_get_state_error(state_error)
# Set up the innovation
innovation = _Innovation()
# Set up the parameters
parameters = _Parameters()
_cukf.ukf_get_parameters(parameters)
# Set up the parameter errors
parameters_error = _Parameters()
_cukf.ukf_get_parameters_error(parameters_error)
|
kino/skills/card.py | DongjunLee/kino-bot | 109 | 12762448 | <filename>kino/skills/card.py<gh_stars>100-1000
import arrow
import re
from ..slack.resource import MsgResource
from ..utils.data_handler import DataHandler
from ..utils.member import Member
class BusinessCard(object):
def __init__(self, slackbot=None):
self.fname = "card.json"
self.data_handler = DataHandler()
if slackbot is None:
self.slackbot = SlackerAdapter()
else:
self.slackbot = slackbot
def read_holder(self):
card_data = self.data_handler.read_file(self.fname)
holder_names = ", ".join(card_data.get("holder", []))
holder_names = re.sub("([A-Z])+", r"\1-", holder_names)
self.slackbot.send_message(text=MsgResource.CARD_HOLDER(names=holder_names))
def read_history(self):
card_data = self.data_handler.read_file(self.fname)
historys = "\n - ".join(card_data.get("history", [])[-5:])
self.slackbot.send_message(text=MsgResource.CARD_HISTORY(historys=historys))
def forward(self, member):
if member is None:
self.slackbot.send_message(text=MsgResource.CARD_FORWARD_NONE)
return
elif len(member) > 2:
self.slackbot.send_message(text=MsgResource.CARD_FORWARD_NONE)
return
if len(member) == 2:
from_name = member[0]
to_name = member[1]
else: # len(member) == 1
member_util = Member()
from_name = member_util.get_name(self.slackbot.user)
to_name = member[0]
if from_name != to_name:
card_data = self.data_handler.read_file(self.fname)
holder_data = card_data.get("holder", [])
if from_name not in holder_data:
self.slackbot.send_message(
text=MsgResource.NOT_CARD_HOLDER(from_name=from_name)
)
return
holder_data.remove(from_name)
holder_data.append(to_name)
history_data = card_data.get("history", [])
history_data.append(
arrow.now().format("YYYY-MM-DD HH:mm") + f": {from_name} -> {to_name}"
)
card_data["holder"] = holder_data
card_data["history"] = history_data
self.data_handler.write_file(self.fname, card_data)
self.slackbot.send_message(
text=MsgResource.CARD_FORWARD(from_name=from_name, to_name=to_name)
)
|
extras/test_octasphere.py | BruegelN/svg3d | 286 | 12762449 | <reponame>BruegelN/svg3d<filename>extras/test_octasphere.py<gh_stars>100-1000
#!/usr/bin/env python3
import numpy as np
import svgwrite.utils
from octasphere import octasphere
import pyrr
from parent_folder import svg3d
from math import *
create_ortho = pyrr.matrix44.create_orthogonal_projection
create_perspective = pyrr.matrix44.create_perspective_projection
create_lookat = pyrr.matrix44.create_look_at
np.set_printoptions(formatter={'float': lambda x: "{0:+0.3f}".format(x)})
quaternion = pyrr.quaternion
SHININESS = 100
DIFFUSE = np.float32([1.0, 0.8, 0.2])
SPECULAR = np.float32([0.5, 0.5, 0.5])
SIZE = (512, 256)
def rgb(r, g, b):
r = max(0.0, min(r, 1.0))
g = max(0.0, min(g, 1.0))
b = max(0.0, min(b, 1.0))
return svgwrite.utils.rgb(r * 255, g * 255, b * 255)
def rotate_faces(faces):
q = quaternion.create_from_eulers([pi * -0.4, pi * 0.9, 0])
new_faces = []
for f in faces:
verts = [quaternion.apply_to_vector(q, v) for v in f]
new_faces.append(verts)
return np.float32(new_faces)
def translate_faces(faces, offset):
return faces + np.float32(offset)
def merge_faces(faces0, faces1):
return np.vstack([faces0, faces1])
projection = create_perspective(fovy=25, aspect=2, near=10, far=200)
view_matrix = create_lookat(eye=[25, 20, 60], target=[0, 0, 0], up=[0, 1, 0])
camera = svg3d.Camera(view_matrix, projection)
def make_octaspheres(ndivisions: int, radius: float, width=0, height=0, depth=0):
verts, indices = octasphere(ndivisions, radius, width, height, depth)
faces = verts[indices]
left = translate_faces(faces, [ -12, 0, 0])
right = translate_faces(rotate_faces(faces), [ 12, 0, 0])
faces = merge_faces(left, right)
ones = np.ones(faces.shape[:2] + (1,))
eyespace_faces = np.dstack([faces, ones])
eyespace_faces = np.dot(eyespace_faces, view_matrix)[:, :, :3]
L = pyrr.vector.normalize(np.float32([20, 20, 50]))
E = np.float32([0, 0, 1])
H = pyrr.vector.normalize(L + E)
def frontface_shader(face_index, winding):
if winding < 0:
return None
face = eyespace_faces[face_index]
p0, p1, p2 = face[0], face[1], face[2]
N = pyrr.vector3.cross(p1 - p0, p2 - p0)
l2 = pyrr.vector3.squared_length(N)
if l2 > 0:
N = N / np.sqrt(l2)
df = max(0, np.dot(N, L))
sf = pow(max(0, np.dot(N, H)), SHININESS)
color = df * DIFFUSE + sf * SPECULAR
color = np.power(color, 1.0 / 2.2)
return dict(fill=rgb(*color), stroke="black", stroke_width="0.001")
print(f"Generated octasphere: {ndivisions}, {radius}, {width}, {height}, {depth}")
return [svg3d.Mesh(faces, frontface_shader)]
vp = svg3d.Viewport(-1, -.5, 2, 1)
engine = svg3d.Engine([])
if False:
mesh = make_octaspheres(ndivisions=2, radius=8)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere3.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=7, width=16, height=16, depth=16)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere1.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=0, radius=7, width=16, height=16, depth=16)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere2.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=3, width=12, height=12, depth=12)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere4.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=1, width=12, height=12, depth=12)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere5.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=3, width=16, height=16, depth=0)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere6.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=3, width=16, height=0, depth=16)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere7.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=3, width=0, height=16, depth=16)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere8.svg", size=SIZE)
mesh = make_octaspheres(ndivisions=3, radius=0, width=16, height=16, depth=16)
engine.views = [svg3d.View(camera, svg3d.Scene(mesh), vp)]
engine.render("octasphere9.svg", size=SIZE)
def tile():
verts, indices = octasphere(ndivisions=3, radius=3, width=18, height=18, depth=0)
view_matrix = create_lookat(eye=[25, 20, 60], target=[0, 0, 0], up=[0, 1, 0])
faces = verts[indices]
ones = np.ones(faces.shape[:2] + (1,))
eyespace_faces = np.dstack([faces, ones])
eyespace_faces = np.dot(eyespace_faces, view_matrix)[:, :, :3]
L = pyrr.vector.normalize(np.float32([20, 20, 50]))
E = np.float32([0, 0, 1])
H = pyrr.vector.normalize(L + E)
def frontface_shader(face_index, winding):
if winding < 0:
return None
face = eyespace_faces[face_index]
p0, p1, p2 = face[0], face[1], face[2]
N = pyrr.vector3.cross(p1 - p0, p2 - p0)
l2 = pyrr.vector3.squared_length(N)
if l2 > 0:
N = N / np.sqrt(l2)
df = max(0, np.dot(N, L))
sf = pow(max(0, np.dot(N, H)), SHININESS)
color = df * DIFFUSE + sf * SPECULAR
color = np.power(color, 1.0 / 2.2)
return dict(fill=rgb(*color), stroke="black", stroke_width="0.001")
return svg3d.Mesh(faces, frontface_shader)
def rounded_cube():
verts, indices = octasphere(ndivisions=3, radius=1, width=15, height=15, depth=13)
view_matrix = create_lookat(eye=[25, 20, 60], target=[0, 0, 0], up=[0, 1, 0])
faces = verts[indices]
ones = np.ones(faces.shape[:2] + (1,))
eyespace_faces = np.dstack([faces, ones])
eyespace_faces = np.dot(eyespace_faces, view_matrix)[:, :, :3]
L = pyrr.vector.normalize(np.float32([20, 20, 50]))
E = np.float32([0, 0, 1])
H = pyrr.vector.normalize(L + E)
def frontface_shader(face_index, winding):
if winding < 0:
return None
face = eyespace_faces[face_index]
p0, p1, p2 = face[0], face[1], face[2]
N = pyrr.vector3.cross(p1 - p0, p2 - p0)
l2 = pyrr.vector3.squared_length(N)
if l2 > 0:
N = N / np.sqrt(l2)
df = max(0, np.dot(N, L))
sf = pow(max(0, np.dot(N, H)), SHININESS)
color = df * DIFFUSE + sf * SPECULAR
color = np.power(color, 1.0 / 2.2)
return dict(fill=rgb(*color), stroke="black", stroke_width="0.001")
return svg3d.Mesh(faces, frontface_shader)
def capsule():
verts, indices = octasphere(ndivisions=3, radius=4, width=18, height=0, depth=0)
view_matrix = create_lookat(eye=[25, 20, 60], target=[0, 0, 0], up=[0, 1, 0])
faces = verts[indices]
ones = np.ones(faces.shape[:2] + (1,))
eyespace_faces = np.dstack([faces, ones])
eyespace_faces = np.dot(eyespace_faces, view_matrix)[:, :, :3]
L = pyrr.vector.normalize(np.float32([20, 20, 50]))
E = np.float32([0, 0, 1])
H = pyrr.vector.normalize(L + E)
def frontface_shader(face_index, winding):
if winding < 0:
return None
face = eyespace_faces[face_index]
p0, p1, p2 = face[0], face[1], face[2]
N = pyrr.vector3.cross(p1 - p0, p2 - p0)
l2 = pyrr.vector3.squared_length(N)
if l2 > 0:
N = N / np.sqrt(l2)
df = max(0, np.dot(N, L))
sf = pow(max(0, np.dot(N, H)), SHININESS)
color = df * DIFFUSE + sf * SPECULAR
color = np.power(color, 1.0 / 2.2)
return dict(fill=rgb(*color), stroke="black", stroke_width="0.001")
return svg3d.Mesh(faces, frontface_shader)
view_matrix = create_lookat(eye=[25, 20, 60], target=[0, 0, 0], up=[0, 1, 0])
projection = create_perspective(fovy=25, aspect=1, near=10, far=200)
camera = svg3d.Camera(view_matrix, projection)
dx = .9
x = -.5
y = -.15
w, h = 1.3, 1.3
engine.views = [
svg3d.View(camera, svg3d.Scene([tile()]), svg3d.Viewport(x-1, y-.5, w, h)),
svg3d.View(camera, svg3d.Scene([rounded_cube()]), svg3d.Viewport(x-1+dx, y-.5, w, h)),
svg3d.View(camera, svg3d.Scene([capsule()]), svg3d.Viewport(x-1+dx*2, y-.5, w, h)),
]
engine.render("ThreeCuboids.svg", size=(600, 200))
|
users/urls.py | yileye/OpenSA | 280 | 12762462 | #!/usr/bin/env python
# ~*~ coding: utf-8 ~*~
from __future__ import absolute_import
from django.urls import path
from users.views import login, users, groups, project, permission,role,keys
app_name = 'users'
urlpatterns = [
# Login View
path('login/', login.UserLoginView.as_view(), name='login'),
path('logout/', login.UserLogoutView.as_view(), name='logout'),
# User View
path('users-list/', users.UsersListAll.as_view(), name='users_list'),
path('users-add/', users.UsersAdd.as_view(), name='users_add'),
path('users-update/<int:pk>/', users.UsersUpdate.as_view(), name='users_update'),
path('users-all-del/', users.UsersAllDel.as_view(), name='users_all_del'),
path('users-change-password/', users.UsersChangePassword.as_view(), name='users_change_password'),
path('users-detail/<int:pk>/', users.UsersDetail.as_view(), name='users_detail'),
# DepartMent View
path('groups-list/', groups.GroupsListAll.as_view(), name='groups_list'),
path('groups-add/', groups.GroupsAdd.as_view(), name='groups_add'),
path('groups-update/<int:pk>/', groups.GroupsUpdate.as_view(), name='groups_update'),
path('groups-all-del/', groups.GroupsAllDel.as_view(), name='groups_all_del'),
# Project View
path('project-list/', project.ProjectListAll.as_view(), name='project_list'),
path('project-add/', project.ProjectAdd.as_view(), name='project_add'),
path('project-update/<int:pk>/', project.ProjectUpdate.as_view(), name='project_update'),
path('project-all-del/', project.ProjectDel.as_view(), name='project_all_del'),
# KeyManage View
path('key-list/', keys.KeyListAll.as_view(), name='key_list'),
path('key-add/', keys.KeyAdd.as_view(), name='key_add'),
path('key-update/<uuid:pk>/', keys.KeyUpdate.as_view(), name='key_update'),
path('key-all-del/', keys.KeyAllDel.as_view(), name='key_all_del'),
# PermissionList View
path('permission-list/', permission.PermissionListAll.as_view(), name='permission_list'),
path('permission-add/', permission.PermissionAdd.as_view(), name='permission_add'),
path('permission-update/<int:pk>/', permission.PermissionUpdate.as_view(), name='permission_update'),
path('permission-all-del/', permission.PermissionAllDel.as_view(), name='permission_all_del'),
# RoleList View
path('role-list/', role.RoleAll.as_view(), name='role_list'),
path('role-edit/<int:pk>/', role.RoleEdit.as_view(), name='role_edit'),
path('role-all-del/', role.RoleAllDel.as_view(), name='role_all_del'),
] |
testing/business_lookup_responses.py | ricwillis98/yelp-python | 195 | 12762464 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import responses
from testing.util import read_json_file
YELP_SAN_FRANCISCO = responses.Response(
method="GET",
url="https://api.yelp.com/v3/businesses/yelp-san-francisco",
json=read_json_file("business_lookup_yelp_san_francisco.json"),
status=200,
)
SACRE_COEUR_PARIS = responses.Response(
method="GET",
url="https://api.yelp.com/v3/businesses/basilique-du-sacré-cœur-de-montmartre-paris-3", # noqa: E501
json=read_json_file("business_lookup_sacre_coeur_paris.json"),
status=200,
)
|
autoencoder/baseline/doc2vec.py | hugochan/K-Competitive-Autoencoder-for-Text-Analytics | 133 | 12762474 | '''
Created on Jan, 2017
@author: hugo
'''
from __future__ import absolute_import
import multiprocessing
from gensim.models import Doc2Vec
class MyDoc2Vec(object):
def __init__(self, dim, hs=0, window=5, negative=5, epoches=5, dm=1, dm_concat=1):
super(MyDoc2Vec, self).__init__()
self.dim = dim
self.hs = hs
self.window = window
self.negative = negative
self.epoches = epoches
self.dm = dm
self.dm_concat = dm_concat
def train(self, corpus):
self.model = Doc2Vec(min_count=1, window=self.window, size=self.dim, \
workers=multiprocessing.cpu_count(), hs=self.hs,\
negative=self.negative, iter=1, dm=self.dm, dm_concat=self.dm_concat)
self.model.build_vocab(corpus())
for each in range(self.epoches):
self.model.train(corpus())
return self
def predict(model, corpus):
doc_codes = {}
for doc_words, doc_name in corpus():
doc_codes[doc_name[0]] = model.infer_vector(doc_words).tolist()
return doc_codes
def save_doc2vec(model, outfile):
model.save(outfile)
def load_doc2vec(mod_file):
return Doc2Vec.load(mod_file)
|
Subsets and Splits