max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
aliyun/log/etl_core/trans_comp/trans_base.py | topdown618/aliyun-log-python-sdk | 130 | 11092592 | <filename>aliyun/log/etl_core/trans_comp/trans_base.py
import json
import six
from ..etl_util import u
from ..etl_util import get_re_full_match, get_set_mode_if_skip_fn
import logging
logger = logging.getLogger(__name__)
class trans_comp_base(object):
@property
def __name__(self):
return str(type(self))
@staticmethod
def _n(v):
"""
convert string to utf8 in Py2 or unicode in Py3
:param v:
:return:
"""
if v is None:
return ""
if isinstance(v, (dict, list)):
try:
v = json.dumps(v)
except Exception:
pass
elif six.PY2 and isinstance(v, six.text_type):
v = v.encode('utf8', "ignore")
elif six.PY3 and isinstance(v, six.binary_type):
v = v.decode('utf8', "ignore")
return str(v)
@staticmethod
def _u(d):
"""
convert string, string container or unicode
:param d:
:return:
"""
return u(d)
class trans_comp_check_mdoe_base(trans_comp_base):
DEFAULT_KEYWORD_PTN = u'[\u4e00-\u9fa5\u0800-\u4e00a-zA-Z][\u4e00-\u9fa5\u0800-\u4e00\\w\\.\\-]*'
SET_MODE = {
"fill": get_set_mode_if_skip_fn(False, True, False),
"add": get_set_mode_if_skip_fn(True, False, False),
"overwrite": get_set_mode_if_skip_fn(False, False, False),
"fill-auto": get_set_mode_if_skip_fn(False, True, True),
"add-auto": get_set_mode_if_skip_fn(True, False, True),
"overwrite-auto": get_set_mode_if_skip_fn(False, False, True)
}
DEFAULT_SET_MODE = 'fill-auto'
def __init__(self, mode=None):
super(trans_comp_check_mdoe_base, self).__init__()
self.kw_ptn = get_re_full_match(self.DEFAULT_KEYWORD_PTN)
self.skip_if = self.SET_MODE.get(mode, self.SET_MODE[self.DEFAULT_SET_MODE])
def set(self, e, k, v, real_k=None, check_kw_name=False):
if not check_kw_name or (check_kw_name and self.kw_ptn(k)):
real_k = real_k or k
if k and not self.skip_if(e, k, v):
e[real_k] = v
return True
logger.debug("{1}: skip detected k-v due to current mode: {0}".format((k, v), type(self)))
return False
def sets(self, e, e_new, check_kw_name=False):
has_update = False
for k, v in six.iteritems(e_new):
has_update = self.set(e, k, v, check_kw_name=check_kw_name) or has_update
return has_update
|
MNIST_with_centerloss.py | jxgu1016/MNIST_with_centerloss.pytorch | 346 | 11092603 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.optim.lr_scheduler as lr_scheduler
from CenterLoss import CenterLoss
import matplotlib.pyplot as plt
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1_1 = nn.Conv2d(1, 32, kernel_size=5, padding=2)
self.prelu1_1 = nn.PReLU()
self.conv1_2 = nn.Conv2d(32, 32, kernel_size=5, padding=2)
self.prelu1_2 = nn.PReLU()
self.conv2_1 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
self.prelu2_1 = nn.PReLU()
self.conv2_2 = nn.Conv2d(64, 64, kernel_size=5, padding=2)
self.prelu2_2 = nn.PReLU()
self.conv3_1 = nn.Conv2d(64, 128, kernel_size=5, padding=2)
self.prelu3_1 = nn.PReLU()
self.conv3_2 = nn.Conv2d(128, 128, kernel_size=5, padding=2)
self.prelu3_2 = nn.PReLU()
self.preluip1 = nn.PReLU()
self.ip1 = nn.Linear(128*3*3, 2)
self.ip2 = nn.Linear(2, 10, bias=False)
def forward(self, x):
x = self.prelu1_1(self.conv1_1(x))
x = self.prelu1_2(self.conv1_2(x))
x = F.max_pool2d(x,2)
x = self.prelu2_1(self.conv2_1(x))
x = self.prelu2_2(self.conv2_2(x))
x = F.max_pool2d(x,2)
x = self.prelu3_1(self.conv3_1(x))
x = self.prelu3_2(self.conv3_2(x))
x = F.max_pool2d(x,2)
x = x.view(-1, 128*3*3)
ip1 = self.preluip1(self.ip1(x))
ip2 = self.ip2(ip1)
return ip1, F.log_softmax(ip2, dim=1)
def visualize(feat, labels, epoch):
plt.ion()
c = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',
'#ff00ff', '#990000', '#999900', '#009900', '#009999']
plt.clf()
for i in range(10):
plt.plot(feat[labels == i, 0], feat[labels == i, 1], '.', c=c[i])
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc = 'upper right')
plt.xlim(xmin=-8,xmax=8)
plt.ylim(ymin=-8,ymax=8)
plt.text(-7.8,7.3,"epoch=%d" % epoch)
plt.savefig('./images/epoch=%d.jpg' % epoch)
plt.draw()
plt.pause(0.001)
def train(epoch):
print "Training... Epoch = %d" % epoch
ip1_loader = []
idx_loader = []
for i,(data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
ip1, pred = model(data)
loss = nllloss(pred, target) + loss_weight * centerloss(target, ip1)
optimizer4nn.zero_grad()
optimzer4center.zero_grad()
loss.backward()
optimizer4nn.step()
optimzer4center.step()
ip1_loader.append(ip1)
idx_loader.append((target))
feat = torch.cat(ip1_loader, 0)
labels = torch.cat(idx_loader, 0)
visualize(feat.data.cpu().numpy(),labels.data.cpu().numpy(),epoch)
use_cuda = torch.cuda.is_available() and True
device = torch.device("cuda" if use_cuda else "cpu")
# Dataset
trainset = datasets.MNIST('../MNIST', download=True,train=True, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
train_loader = DataLoader(trainset, batch_size=128, shuffle=True, num_workers=4)
# Model
model = Net().to(device)
# NLLLoss
nllloss = nn.NLLLoss().to(device) #CrossEntropyLoss = log_softmax + NLLLoss
# CenterLoss
loss_weight = 1
centerloss = CenterLoss(10, 2).to(device)
# optimzer4nn
optimizer4nn = optim.SGD(model.parameters(),lr=0.001,momentum=0.9, weight_decay=0.0005)
sheduler = lr_scheduler.StepLR(optimizer4nn,20,gamma=0.8)
# optimzer4center
optimzer4center = optim.SGD(centerloss.parameters(), lr =0.5)
for epoch in range(100):
sheduler.step()
# print optimizer4nn.param_groups[0]['lr']
train(epoch+1)
|
idaes/commands/examples.py | carldlaird/idaes-pse | 112 | 11092671 | <filename>idaes/commands/examples.py<gh_stars>100-1000
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Install IDAES example files locally.
By default, this will download the examples from "examples-pse/src" on Github,
with a release matching the version of the currently installed idaes package,
into a sub-directory of the current directory named "examples". It will
also, unless directed otherwise, install all Python modules from the downloaded
directory into a package called "idaes_examples".
Options let the user choose a different version, directory, and
whether to actually download or install.
"""
# Pyomo utility for delayed import
from pyomo.common.dependencies import attempt_import
# stdlib
from collections import namedtuple
from datetime import datetime
from io import StringIO
import logging
from operator import attrgetter
import os
import re
from pathlib import Path
import shutil
import sys
from typing import List
from uuid import uuid4
from zipfile import ZipFile
import json
# third-party
import click
# third-party slow
nb_exporters= attempt_import("nbconvert.exporters")[0]
nb_writers = attempt_import("nbconvert.writers")[0]
traitlets_config = attempt_import("traitlets.config")[0]
nbformat = attempt_import("nbformat")[0]
requests = attempt_import("requests")[0]
# package
from idaes.commands import cb
from idaes.commands.base import how_to_report_an_error
from idaes.ver import package_version as V
__author__ = "<NAME>"
_log = logging.getLogger("idaes.commands.examples")
# Constants
GITHUB = "https://github.com"
GITHUB_API = "https://api.github.com"
REPO_ORG = "idaes"
REPO_NAME = "examples-pse"
REPO_DIR = "src"
PKG_VERSION = f"{V.major}.{V.minor}.{V.micro}"
INSTALL_PKG = "idaes_examples"
JUPYTER_NB_VERSION = 4 # for parsing
REMOVE_CELL_TAG = "remove_cell" # for stripping Jupyter notebook cells
SOLUTION_CELL_TAG = "solution" # ditto
EXERCISE_CELL_TAG = "exercise" # ditto
STRIPPED_NOTEBOOK_SUFFIX = "_s"
# Global vars
g_tempdir, g_egg = None, None
# Exceptions
class DownloadError(Exception):
"""Used for errors downloading the release files.
"""
pass
class CopyError(Exception):
"""Used for errors copying files.
"""
pass
class InstallError(Exception):
"""Used for errors installing the source as a Python package.
"""
pass
class GithubError(Exception):
pass
Release = namedtuple("Release", ["date", "tag", "info"])
@cb.command(
name="get-examples", help="Fetch example scripts and Jupyter Notebooks."
)
@click.option(
"--dir", "-d", "directory", help="installation target directory", default="examples",
type=str,
)
@click.option(
"--local", "local_dir",
help="For developers: instead of downloading, copy from an "
"idaes-examples repository on local disk"
)
@click.option(
"--no-install", "-I", "no_install", help="Do *not* install examples into 'idaes_examples' package",
is_flag=True
)
@click.option(
"--list",
"-l",
"list_releases",
help="List all available released versions, and stop",
is_flag=True,
)
@click.option(
"--no-download",
"-N",
"no_download",
help="Do not download anything",
is_flag=True
)
@click.option(
"--unstable",
"-U",
help="Allow and list unstable/pre-release versions",
is_flag=True
)
@click.option(
"--version",
"-V",
help=f"Version of examples to download",
default=None,
show_default=False,
)
def get_examples(directory, no_install, list_releases, no_download, version,
unstable, local_dir):
"""Get the examples from Github and put them in a local directory.
"""
# list-releases mode
if list_releases:
try:
releases = get_releases(unstable)
except GithubError as err:
click.echo(f"Error getting data from Github: {err}")
sys.exit(-1)
print_releases(releases, unstable)
sys.exit(0)
# otherwise..
target_dir = Path(directory)
# do nothing?
if no_download and no_install:
click.echo("Download and installation disabled. Done.")
sys.exit(0)
# download?
if no_download: # no download
_log.info("skipping download step (use existing examples)")
click.echo("Skip download")
else: # download
if target_dir.exists():
click.echo(f"Target directory '{target_dir}' already exists. Please "
f"remove it, or choose a different directory.")
sys.exit(-1)
if local_dir is not None:
click.echo(f"Copying from local directory: {local_dir}")
local_path = Path(local_dir)
if not local_path.exists() or not local_path.is_dir():
click.echo(f"Cannot copy from local directory '{local_dir}': "
f"directory does not exist, or not a directory")
sys.exit(-1)
try:
copy_contents(target_dir, local_path)
except CopyError as err:
click.echo(f"Failed to copy from '{local_dir}' to '{target_dir}': "
f"{err}")
sys.exit(-1)
ex_version = version if version else PKG_VERSION
else:
try:
releases = get_releases(unstable)
except GithubError as err:
click.echo(f"Error getting data from Github: {err}")
sys.exit(-1)
# check that unstable flag is given before downloading unstable version
if version is not None:
stable_ver = re.match(r".*-\w+$", version) is None
if not stable_ver and not unstable:
click.echo(f"Cannot download unstable version {version} unless you add "
f"the -U/--unstable flag")
sys.exit(-1)
# set version
if version is None:
ex_version = get_examples_version(PKG_VERSION)
else:
ex_version = version
# give an error if selected version does not exist
if ex_version not in [r.tag for r in releases]:
if version is None:
click.echo(f"Internal Error: Could not find an examples release\n"
f"matching IDAES version {PKG_VERSION}\n"
f"You can manually pick version with '-V/--version'\n"
f"or install from a local directory with '--local'.\n"
f"Use '-l/--list-releases' to see all versions.\n"
f"{how_to_report_an_error()}\n")
else:
click.echo(f"Could not find an examples release matching IDAES version "
f"{version}.\n Use -l/--list-releases to see all "
f"available versions.")
sys.exit(-1)
click.echo("Downloading...")
try:
download(target_dir, ex_version)
except DownloadError as err:
_log.warning(f"abort due to failed download: {err}")
clean_up_temporary_files()
sys.exit(-1)
full_dir = os.path.realpath(target_dir)
_log.info(f"downloaded examples to: '{full_dir}'")
# install
if no_install:
_log.info("skipping installation step")
click.echo("Skip install")
else:
if not target_dir.exists():
if no_download:
click.echo(f"Target directory '{target_dir}' does not exist")
sys.exit(-1)
else:
click.echo(f"Internal error: After download, directory '{target_dir}'\n"
f"does not exist.\n"
f"{how_to_report_an_error()}")
sys.exit(-1)
click.echo("Installing...")
try:
install_src(ex_version, target_dir)
except InstallError as err:
click.echo(f"Install error: {err}")
clean_up_temporary_files()
sys.exit(-1)
_log.info(f"Installed examples as package {INSTALL_PKG}")
click.echo("Cleaning up...")
# strip notebooks
_log.info("Stripping test/solution cells from notebooks")
strip_special_cells(target_dir)
# temporary files
_log.info("Removing temporary files")
clean_up_temporary_files()
# Done
print_summary( ex_version, target_dir, not no_install)
def print_summary(version, dirname, installed):
sep1, sep2 = "-" * 40, "=" * 40
print(f"{sep1}\nIDAES Examples {version}\n{sep2}")
print(f"Path : {dirname}")
if installed:
print(f"Package: {INSTALL_PKG}")
else:
print(f"Package: not installed")
print(sep2)
def get_examples_version(idaes_version: str):
"""Given the specified 'idaes-pse' repository release version,
identify the matching 'examples-pse' repository release version.
Args:
idaes_version: IDAES version, e.g. "1.5.0" or "1.5.0.dev0+e1bbb[...]"
Returns:
Examples version, or if there is no match, return None.
"""
# Fetch the idaes:examples version mapping from Github
compat_file = 'idaes-compatibility.json'
url = f"{GITHUB_API}/repos/{REPO_ORG}/{REPO_NAME}/contents/{compat_file}"
headers = {'Accept': 'application/vnd.github.v3.raw'}
_log.debug(f'About to call requests.get({url}, {headers})')
res = requests.get(url, headers=headers)
if not res.ok:
_log.debug(f'Problem getting mapping file: {res.json()}')
raise DownloadError(res.json())
try:
compat_mapping = json.loads(res.text)['mapping']
except KeyError:
# return the latest version instead
_log.warning('Ill-formed compatibility mapping file for examples repository:')
_log.debug(f'compat_mapping: {res.text}')
_log.info('Defaulting to latest released version of examples.')
return None
idaes_version_num = idaes_version
version_numbers = idaes_version.split('.')
if len(version_numbers) > 3:
idaes_version_num = '.'.join(version_numbers[:3])
click.echo(f"Warning: non-release version of IDAES detected. "
f"Using IDAES {idaes_version_num} as reference; "
f"examples version compatibility is not guaranteed.")
try:
examples_version = compat_mapping[idaes_version_num]
except KeyError:
# return the latest version instead, as above
_log.warning('IDAES version not found in compatibility mapping file. \
Defaulting to latest released version of examples.')
return None
_log.debug(f'get_examples_version({idaes_version}: {examples_version}')
return examples_version
def download(target_dir: Path, version: str):
"""Download `version` into `target_dir`.
Raises:
DownloadError
"""
# check target directory
if target_dir.exists():
click.echo(f"Directory '{target_dir}' exists. Please move or delete and "
f"try this command again")
raise DownloadError("directory exists")
# download
try:
download_contents(target_dir, version)
except DownloadError as err:
click.echo(f"Download failed: {err}")
raise
def is_illegal_dir(d: Path):
"""Refuse to remove directories for some situations, for safety.
"""
if (d / ".git").exists():
return ".git file found"
if d.absolute() == Path.home().absolute():
return "cannot replace home directory"
if d.absolute == Path("/").absolute():
return "cannot replace root directory"
return None
def download_contents(target_dir, version):
"""Download the given version from the Github releases and make
its `REPO_DIR` subdirectory be the `target_dir`.
Raises:
DownloadError: if the GET on the release URL returns non-200 status
"""
global g_tempdir
url = archive_file_url(version)
_log.info(f"get examples from: {url}")
# stream out to a big .zip file
req = requests.get(url, stream=True)
if req.status_code != 200:
if req.status_code in (400, 404):
raise DownloadError(f"file not found")
raise DownloadError(f"status={req.status_code}")
# note: mkdtemp() creates a directory that seems un-removable on Windows.
# So, instead, just create the directory yourself in the current directory
random_name = str(uuid4())
try:
os.mkdir(random_name)
except Exception as err:
_log.fatal(f"making directory '{random_name}': {err}")
click.echo("Cannot make temporary directory in current directory. Abort.")
sys.exit(-1)
g_tempdir = tempdir = Path(random_name)
_log.debug(f"created temporary directory '{tempdir.name}'")
tempfile = tempdir / "examples.zip"
with tempfile.open("wb") as f:
for chunk in req.iter_content(chunk_size=65536):
f.write(chunk)
_log.info(f"downloaded zipfile to {tempfile}")
# open as a zip file, and extract all files into the temporary directory
_log.debug(f"open zip file: {tempfile}")
zipf = ZipFile(str(tempfile))
zipf.extractall(path=tempdir.name)
# move the REPO_DIR subdirectory into the target dir
subdir = Path(tempdir.name) / f"{REPO_NAME}-{version}" / REPO_DIR
_log.debug(f"move {subdir} -> {target_dir}")
shutil.move(str(subdir), str(target_dir))
zipf.close()
def copy_contents(target_dir, repo_root):
subdir = repo_root / REPO_DIR
if not subdir.is_dir():
raise CopyError(f"Could not copy from '{subdir}': not a directory")
_log.info(f"copy.local.start from={subdir} to={target_dir}")
try:
shutil.copytree(subdir, target_dir)
except shutil.Error as err:
raise CopyError(err)
except FileNotFoundError:
raise CopyError(f"Could not find file '{subdir}'")
except Exception as err:
raise CopyError(f"Unknown problem copying: {err}")
_log.info(f"copy.local.end from={subdir} to={target_dir}")
def clean_up_temporary_files():
# temporary directory created for unzipping and renaming
if g_tempdir:
d = g_tempdir
_log.debug(f"remove temporary directory.start name='{d}'")
try:
shutil.rmtree(d)
except Exception as err: # WTF
_log.warning(f"remove temporary directory.error name='{d}' msg='{err}'")
else:
_log.debug(f"removed temporary directory.end name='{d}'")
# egg file created by setuptools
if g_egg and g_egg.exists() and g_egg.is_dir():
_log.debug(f"remove setuptools egg path='{g_egg}'")
try:
shutil.rmtree(g_egg.name)
except Exception as err:
_log.warning(f"remove temporary directory.error name='{g_egg}' msg='{err}'")
# dist directory created by setuptools
d = Path("dist")
if d.exists() and d.is_dir():
for f in d.glob("idaes_examples-*.egg"):
try:
f.unlink()
except Exception as err:
_log.warning(f"could not remove distribution file {f}: {err}")
# remove directory, if now empty
num_files = len(list(d.glob("*")))
if num_files == 0:
_log.info(f"removing dist directory '{d.absolute()}'")
try:
d.rmdir()
except Exception as err:
_log.warning(f"could not remove distribution directory {d}: {err}")
def archive_file_url(version, org=REPO_ORG, repo=REPO_NAME):
"""Build & return URL for a given release version.
"""
return f"{GITHUB}/{org}/{repo}/archive/{version}.zip"
def get_releases(unstable) -> List[Release]:
"""Returns a list of releases.
The list is sorted in ascending order by date.
"""
releases = []
url = f"{GITHUB_API}/repos/{REPO_ORG}/{REPO_NAME}/releases"
resp = requests.get(url)
data, headers = resp.json(), resp.headers
check_github_response(data, headers)
for rel in data:
if not unstable and rel["prerelease"]:
continue
releases.append(Release(rel["published_at"], rel["tag_name"], rel["name"]))
releases.sort(key=attrgetter("date")) # sort by publication date
return releases
def check_github_response(data, headers):
"""Check whether GitHub gave an error message. If so, raise a GithubError
with a hopefully informative and useful message.
"""
if isinstance(data, list):
return # lists are assumed to be the releases
if isinstance(data, dict) and "message" in data:
if "rate limit exceeded" in data["message"]:
reset_ts = int(headers["X-RateLimit-Reset"])
now = datetime.now()
now_ts, tzinfo = now.timestamp(), now.astimezone().tzinfo
wait_min = int((reset_ts - now_ts) // 60) + 1
reset_dt = datetime.fromtimestamp(reset_ts)
datestr = reset_dt.astimezone(tzinfo).isoformat()
raise GithubError(f"API rate limit exceeded.\n"
f"You will need to wait {wait_min} minutes,"
f" until {datestr}, to try again from "
f"this computer.")
else:
raise GithubError(f"Error connecting to Github: {data['message']}")
else:
raise GithubError(f"Invalid result from Github: data={data} headers={headers}")
def print_releases(releases: List[Release], unstable):
"""Print the releases, as returned by `get_releases()`, as a table
to standard output.
"""
if len(releases) == 0:
if unstable:
print("No releases found")
else:
print("No stable releases found. Add -U/--unstable to also look "
"for pre-releases.")
return
# determine column widths
widths = [4, 7, 7] # widths of column titles: date,version,details
widths[0] = len(releases[0].date) # dates are all the same
# tags and names can have different widths
for rel in releases:
for i in range(1, 3):
widths[i] = max(widths[i], len(rel[i]))
# make row format
pad = " "
fmt = f"{{date:{widths[0]}s}}{pad}{{tag:{widths[1]}s}}{pad}{{name:{widths[2]}s}}"
# print header
print("")
print(fmt.format(date="Date", tag="Version", name="Details"))
print(fmt.format(date="-" * widths[0], tag="-" * widths[1], name="-" * widths[2]))
# print rows
for rel in releases:
print(fmt.format(date=rel.date, tag=rel.tag, name=rel.info))
# print footer
print("")
def install_src(version, target_dir):
"""Install the 'src' subdirectory as a package, given by `INSTALL_PKG`,
by renaming the directory, adding '__init__.py' files,
and then running `setuptools.setup()` on the directory tree.
When done, name the directory back to 'src', and remove '__init__.py' files.
Then clean up whatever cruft is left behind..
"""
from setuptools import setup, find_packages # import here due to slowness
global g_egg
orig_dir = Path(os.curdir).absolute()
target_dir = Path(target_dir.absolute())
root_dir = target_dir.parent
examples_dir = root_dir.absolute() / INSTALL_PKG
if examples_dir.exists():
raise InstallError(f"package directory {examples_dir} already exists")
_log.info(f"install into {INSTALL_PKG} package")
# set the args to make it look like the 'install' command has been invoked
saved_args = sys.argv[:]
sys.argv = ["setup.py", "install"]
# add some empty __init__.py files
_log.debug("add temporary __init__.py files")
pydirs = find_python_directories(target_dir)
pydirs.append(target_dir) # include top-level dir
try:
for d in pydirs:
init_py = d / "__init__.py"
init_py.open("w")
except IOError as err:
raise InstallError(f"error writing temporary __init__.py files: {err}")
# temporarily rename target directory to the package name
_log.info(f"rename {target_dir} -> {examples_dir}")
shutil.move(target_dir, examples_dir)
# if there is a 'build' directory, move it aside
build_dir = root_dir / 'build'
if build_dir.exists():
from uuid import uuid1
random_letters = str(uuid1())
moved_build_dir = f"{build_dir}.{random_letters}"
_log.debug(f"move existing build dir to {moved_build_dir}")
shutil.move(str(build_dir), moved_build_dir)
else:
_log.debug("no existing build directory (nothing to do)")
moved_build_dir = None
# run setuptools' setup command (in root directory)
_log.info(f"run setup command in directory {root_dir}")
os.chdir(root_dir)
packages = [d for d in find_packages() if d.startswith(INSTALL_PKG)]
_log.debug(f"install packages: {packages}")
# before running, grab stdout
orig_stdout = sys.stdout
sys.stdout = setup_out = StringIO()
# run setup
setup(
name=INSTALL_PKG,
version=version,
# description='IDAES examples',
packages=packages,
python_requires=">=3.5, <4",
zip_safe=False
)
# restore stdout
sys.stdout = orig_stdout
# print/log output
output_str = setup_out.getvalue()
if _log.isEnabledFor(logging.DEBUG):
for line in output_str.split("\n"):
_log.debug(f"(setup) {line}")
# name the target directory back to original
_log.info(f"rename '{examples_dir}' to '{target_dir}'")
shutil.move(examples_dir, target_dir)
# remove the empty __init__.py files
_log.info("remove temporary __init__.py files")
for d in pydirs:
init_py = d / "__init__.py"
_log.debug(f"remove '{init_py}")
init_py.unlink()
# remove build dir, and restore any moved build dir
_log.info(f"remove build directory '{build_dir}'")
try:
shutil.rmtree(build_dir)
except Exception as err:
_log.warning(f"failed to remove build directory {build_dir}: {err}")
if moved_build_dir is not None:
_log.info(f"restore build dir '{build_dir}' from '{moved_build_dir}'")
shutil.move(moved_build_dir, build_dir)
# restore previous args
sys.argv = saved_args
# change back to previous directory
os.chdir(orig_dir)
# save name of egg file, for later cleanup
f = root_dir / (INSTALL_PKG + ".egg-info")
if f.exists():
g_egg = f
else:
_log.warning(f"egg-info file not found path='{f}'")
def find_python_directories(target_dir: Path) -> List[Path]:
"""Find all directories from target_dir, on down, that contain a
Python module or sub-package.
"""
# get directories that contain python files -> pydirs
pydirs = set((x.parent for x in target_dir.rglob("*.py")))
# get all directories in the tree leading to the 'pydirs'
alldirs = set()
for d in pydirs:
while d != target_dir:
alldirs.add(d)
d = d.parent
return list(alldirs)
def find_notebook_files(target_dir: Path) -> List[Path]:
"""Find all files ending in ".ipynb", at or below target_dir.
"""
return list(target_dir.rglob("*.ipynb"))
def strip_special_cells(target_dir: Path):
"""Strip 'special' cells from notebooks below `target_dir`.
See `strip_tags()` for how that is done.
"""
nb_files = find_notebook_files(target_dir)
for nb_file in nb_files:
if strip_tags(nb_file):
_log.info(f"removing original file '{nb_file}'")
nb_file.unlink()
def strip_tags(nb: Path) -> bool:
"""Strip tags from notebook, if there are any.
Behavior depends on filename (case-insensitive).
- if the notebook file name ends with "_solution_testing", create two files:
1. chop off "_testing" from name, remove test and exercise cells
2. chop off "_solution_testing" from name & add "_exercise", remove solution and test cells
- if the file ends with "_testing" only, create one file:
1. chop off "_testing" from name, remove test cells
- otherwise: do nothing
Returns:
Was anything stripped (and new files created)?
Raises:
IOError: If the desired target for the stripped notebook already exists
"""
_log.info(f"stripping tags from notebook {nb}")
nb_name = nb.stem
# Figure out which tags to strip:
# - check for case (1), solution_testing.ipynb
if nb_name.lower().endswith("_solution_testing"):
pre = nb_name[:-17]
names = [f"{pre}_solution.ipynb", f"{pre}_exercise.ipynb"]
tags_to_strip = [(REMOVE_CELL_TAG, EXERCISE_CELL_TAG), (REMOVE_CELL_TAG, SOLUTION_CELL_TAG)]
# - check for case (2), _testing.ipynb
elif nb_name.lower().endswith("_testing"):
pre = nb_name[:-8]
names = [f"{pre}.ipynb"]
tags_to_strip = [(REMOVE_CELL_TAG,)]
# - if neither, we are done here
else:
_log.debug(f"notebook '{nb}' does not need to have tags stripped")
return False
# Create all desired tag-stripped copies
for name, remove_tags in zip(names, tags_to_strip):
target_nb = nb.parent / name
_log.info(f"creating new notebook '{target_nb}'")
# Don't overwrite an existing file
if target_nb.exists():
_log.warning(f"cannot create new notebook '{target_nb}': file exists")
continue
# Set up configuration for removing specially tagged cells
conf = traitlets_config.Config()
conf.TagRemovePreprocessor.remove_cell_tags = remove_tags
conf.NotebookExporter.preprocessors = [
# this requires the full module path
"nbconvert.preprocessors.TagRemovePreprocessor"
]
# Convert from Notebook format to Notebook format, stripping tags
(body, resources) = nb_exporters.NotebookExporter(
config=conf).from_filename(str(nb))
# Set up output destination
wrt = nb_writers.FilesWriter()
wrt.build_directory = str(target_nb.parent)
# Write stripped notebook to output file
wrt.write(body, resources, notebook_name=target_nb.stem)
return True
def has_tagged_cells(nb: Path):
"""Quickly check whether this notebook has any cells with the "special" tag.
Returns:
True = yes, it does; False = no specially tagged cells
Raises:
NotebookFormatError, if notebook at 'entry' is not parseable
"""
# parse the notebook (assuming this is fast; otherwise should cache it)
try:
nb = nbformat.read(str(nb), as_version=JUPYTER_NB_VERSION)
except nbformat.reader.NotJSONError:
raise ValueError(f"Notebook '{nb}' is not valid JSON")
# look for tagged cells; return immediately if one is found
for i, c in enumerate(nb.cells):
if "tags" in c.metadata and REMOVE_CELL_TAG in c.metadata.tags:
_log.debug(f"Found {REMOVE_CELL_TAG} tag in cell {i}")
return True # can stop now, one is enough
# no tagged cells
return False
|
tests/test_simulator.py | eugenpt/seagull | 162 | 11092673 | # -*- coding: utf-8 -*-
# Import modules
import pytest
import numpy as np
from matplotlib import animation
# Import from package
from seagull import lifeforms as lf
import seagull as sg
def test_simulator_run():
"""Test if the run() method returns the computed statistics"""
board = sg.Board(size=(10, 10))
board.add(lf.Blinker(length=3), loc=(0, 1))
sim = sg.Simulator(board)
stats = sim.run(sg.rules.conway_classic, iters=10)
assert isinstance(stats, dict)
@pytest.mark.parametrize("exclude_init", [True, False])
def test_simulator_get_history_shape(exclude_init):
"""Test if get_history() will return the expected shape"""
board = sg.Board(size=(10, 10))
board.add(lf.Blinker(length=3), loc=(0, 1))
sim = sg.Simulator(board)
sim.run(sg.rules.conway_classic, iters=10)
hist = sim.get_history(exclude_init)
expected_depth = 10 if exclude_init else 11
assert hist.shape == (expected_depth, 10, 10)
def test_simulator_animate():
"""Test if animate() method returns a FuncAnimation"""
board = sg.Board(size=(10, 10))
board.add(lf.Blinker(length=3), loc=(0, 1))
sim = sg.Simulator(board)
sim.run(sg.rules.conway_classic, iters=10)
anim = sim.animate()
assert isinstance(anim, animation.FuncAnimation)
def test_simulator_animate_without_run():
"""Test if animate() method throws an error when called before run()"""
board = sg.Board(size=(10, 10))
board.add(lf.Blinker(length=3), loc=(0, 1))
sim = sg.Simulator(board)
with pytest.raises(ValueError):
sim.animate()
def test_compute_statistics():
"""Test if compute_statistics() returns a dictionary"""
board = sg.Board(size=(10, 10))
board.add(lf.Blinker(length=3), loc=(0, 1))
sim = sg.Simulator(board)
sim.run(sg.rules.conway_classic, iters=10)
stats = sim.compute_statistics(sim.get_history())
assert isinstance(stats, dict)
def test_simulator_inplace():
"""Test if board state didn't change after a simulation run"""
board = sg.Board(size=(10, 10))
board.add(lf.Glider(), loc=(0, 0))
# Initial board state, must be the same always
init_board = board.state.copy()
# Run simulator
sim = sg.Simulator(board)
sim.run(sg.rules.conway_classic, iters=10)
assert np.array_equal(board.state, init_board)
|
sharppy/sharptab/params.py | skovic/SHARPpy | 163 | 11092730 | <filename>sharppy/sharptab/params.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
''' Thermodynamic Parameter Routines '''
from __future__ import division
import numpy as np
import numpy.ma as ma
from sharppy.sharptab import interp, utils, thermo, winds
from sharppy.sharptab.constants import *
'''
This file contains various functions to perform the calculation of various convection indices.
Because of this, parcel lifting routines are also found in this file.
Functions denoted with a (*) in the docstring refer to functions that were added to the SHARPpy package that
were not ported from the Storm Prediction Center. They have been included as they have been used by the
community in an effort to expand SHARPpy to support the many parameters used in atmospheric science.
While the logic for these functions are based in the scientific literature, validation
of the output from these functions is occasionally difficult to perform. Although we have made an effort
to resolve code issues when they arise, values from these functions may be erronious and may require additional
inspection by the user. We appreciate any contributions by the meteorological community that can
help better validate these SHARPpy functions!
'''
__all__ = ['DefineParcel', 'Parcel', 'inferred_temp_advection']
__all__ += ['k_index', 't_totals', 'c_totals', 'v_totals', 'precip_water']
__all__ += ['temp_lvl', 'max_temp', 'mean_mixratio', 'mean_theta', 'mean_thetae', 'mean_relh']
__all__ += ['lapse_rate', 'max_lapse_rate', 'most_unstable_level', 'parcelx', 'bulk_rich']
__all__ += ['bunkers_storm_motion', 'effective_inflow_layer']
__all__ += ['convective_temp', 'esp', 'pbl_top', 'precip_eff', 'dcape', 'sig_severe']
__all__ += ['dgz', 'ship', 'stp_cin', 'stp_fixed', 'scp', 'mmp', 'wndg', 'sherb', 'tei', 'cape']
__all__ += ['mburst', 'dcp', 'ehi', 'sweat', 'hgz', 'lhp', 'integrate_parcel']
class DefineParcel(object):
'''
Create a parcel from a supplied profile object.
Parameters
----------
prof : profile object
Profile object
Optional Keywords
flag : int (default = 1)
Parcel Selection
1 - Observed Surface Parcel
2 - Forecast Surface Parcel
3 - Most Unstable Parcel
4 - Mean Mixed Layer Parcel
5 - User Defined Parcel
6 - Mean Effective Layer Parcel
Optional Keywords (Depending on Parcel Selected)
Parcel (flag) == 1: Observed Surface Parcel
None
Parcel (flag) == 2: Forecast Surface Parcel
pres : number (default = 100 hPa)
Depth over which to mix the boundary layer; only changes
temperature; does not affect moisture
Parcel (flag) == 3: Most Unstable Parcel
pres : number (default = 400 hPa)
Depth over which to look for the the most unstable parcel
starting from the surface pressure
Parcel (flag) == 4: Mixed Layer Parcel
pres : number (default = 100 hPa)
Depth over which to mix the surface parcel
Parcel (flag) == 5: User Defined Parcel
pres : number (default = SFC - 100 hPa)
Pressure of the parcel to lift
tmpc : number (default = Temperature at the provided pressure)
Temperature of the parcel to lift
dwpc : number (default = Dew Point at the provided pressure)
Dew Point of the parcel to lift
Parcel (flag) == 6: Effective Inflow Layer
ecape : number (default = 100)
The minimum amount of CAPE a parcel needs to be considered
part of the inflow layer
ecinh : number (default = -250)
The maximum amount of CINH allowed for a parcel to be
considered as part of the inflow layer
'''
def __init__(self, prof, flag, **kwargs):
self.flag = flag
if flag == 1:
self.presval = prof.pres[prof.sfc]
self.__sfc(prof)
elif flag == 2:
self.presval = kwargs.get('pres', 100)
self.__fcst(prof, **kwargs)
elif flag == 3:
self.presval = kwargs.get('pres', 300)
self.__mu(prof, **kwargs)
elif flag == 4:
self.presval = kwargs.get('pres', 100)
self.__ml(prof, **kwargs)
elif flag == 5:
self.presval = kwargs.get('pres', prof.pres[prof.sfc])
self.__user(prof, **kwargs)
elif flag == 6:
self.presval = kwargs.get('pres', 100)
self.__effective(prof, **kwargs)
else:
self.presval = kwargs.get('pres', prof.gSndg[prof.sfc])
self.__sfc(prof)
def __sfc(self, prof):
'''
Create a parcel using surface conditions
'''
self.desc = 'Surface Parcel'
self.pres = prof.pres[prof.sfc]
self.tmpc = prof.tmpc[prof.sfc]
self.dwpc = prof.dwpc[prof.sfc]
def __fcst(self, prof, **kwargs):
'''
Create a parcel using forecast conditions.
'''
self.desc = 'Forecast Surface Parcel'
self.tmpc = max_temp(prof)
self.pres = prof.pres[prof.sfc]
pbot = self.pres; ptop = self.pres - 100.
self.dwpc = thermo.temp_at_mixrat(mean_mixratio(prof, pbot, ptop, exact=True), self.pres)
def __mu(self, prof, **kwargs):
'''
Create the most unstable parcel within the lowest XXX hPa, where
XXX is supplied. Default XXX is 400 hPa.
'''
self.desc = 'Most Unstable Parcel in Lowest %.2f hPa' % self.presval
pbot = prof.pres[prof.sfc]
ptop = pbot - self.presval
self.pres = most_unstable_level(prof, pbot=pbot, ptop=ptop)
self.tmpc = interp.temp(prof, self.pres)
self.dwpc = interp.dwpt(prof, self.pres)
def __ml(self, prof, **kwargs):
'''
Create a mixed-layer parcel with mixing within the lowest XXX hPa,
where XXX is supplied. Default is 100 hPa.
If
'''
self.desc = '%.2f hPa Mixed Layer Parcel' % self.presval
pbot = kwargs.get('pbot', prof.pres[prof.sfc])
ptop = pbot - self.presval
self.pres = pbot
mtheta = mean_theta(prof, pbot, ptop, exact=True)
self.tmpc = thermo.theta(1000., mtheta, self.pres)
mmr = mean_mixratio(prof, pbot, ptop, exact=True)
self.dwpc = thermo.temp_at_mixrat(mmr, self.pres)
def __user(self, prof, **kwargs):
'''
Create a user defined parcel.
'''
self.desc = '%.2f hPa Parcel' % self.presval
self.pres = self.presval
self.tmpc = kwargs.get('tmpc', interp.temp(prof, self.pres))
self.dwpc = kwargs.get('dwpc', interp.dwpt(prof, self.pres))
def __effective(self, prof, **kwargs):
'''
Create the mean-effective layer parcel.
'''
ecape = kwargs.get('ecape', 100)
ecinh = kwargs.get('ecinh', -250)
pbot, ptop = effective_inflow_layer(prof, ecape, ecinh)
if utils.QC(pbot) and pbot > 0:
self.desc = '%.2f hPa Mean Effective Layer Centered at %.2f' % ( pbot-ptop, (pbot+ptop)/2.)
mtha = mean_theta(prof, pbot, ptop)
mmr = mean_mixratio(prof, pbot, ptop)
self.pres = (pbot+ptop)/2.
self.tmpc = thermo.theta(1000., mtha, self.pres)
self.dwpc = thermo.temp_at_mixrat(mmr, self.pres)
else:
self.desc = 'Defaulting to Surface Layer'
self.pres = prof.pres[prof.sfc]
self.tmpc = prof.tmpc[prof.sfc]
self.dwpc = prof.dwpc[prof.sfc]
if utils.QC(pbot): self.pbot = pbot
else: self.pbot = ma.masked
if utils.QC(ptop): self.ptop = ptop
else: self.pbot = ma.masked
class Parcel(object):
'''
Initialize the parcel variables
Parameters
----------
pbot : number
Lower-bound (pressure; hPa) that the parcel is lifted
ptop : number
Upper-bound (pressure; hPa) that the parcel is lifted
pres : number
Pressure of the parcel to lift (hPa)
tmpc : number
Temperature of the parcel to lift (C)
dwpc : number
Dew Point of the parcel to lift (C)
Attributes
----------
pres : number
parcel beginning pressure (mb)
tmpc : number
parcel beginning temperature (C)
dwpc : number
parcel beginning dewpoint (C)
ptrace : array
parcel trace pressure (mb)
ttrace : array
parcel trace temperature (C)
blayer : number
Pressure of the bottom of the layer the parcel is lifted (mb)
tlayer : number
Pressure of the top of the layer the parcel is lifted (mb)
entrain : number
Parcel entrainment fraction (not yet implemented)
lclpres : number
Parcel LCL (lifted condensation level) pressure (mb)
lclhght : number
Parcel LCL height (m AGL)
lfcpres : number
Parcel LFC (level of free convection) pressure (mb)
lfchght: number
Parcel LCL height (m AGL)
elpres : number
Parcel EL (equilibrium level) pressure (mb)
elhght : number
Parcel EL height (m AGL)
mplpres : number
Maximum Parcel Level (mb)
mplhght : number
Maximum Parcel Level (m AGL)
bplus : number
Parcel CAPE (J/kg)
bminus : number
Parcel CIN below 500 mb (J/kg)
bfzl : number
Parcel CAPE up to freezing level (J/kg)
b3km : number
Parcel CAPE up to 3 km (J/kg)
b6km : number
Parcel CAPE up to 6 km (J/kg)
p0c: number
Pressure value at 0 C (mb)
pm10c : number
Pressure value at -10 C (mb)
pm20c : number
Pressure value at -20 C (mb)
pm30c : number
Pressure value at -30 C (mb)
hght0c : number
Height value at 0 C (m AGL)
hghtm10c : number
Height value at -10 C (m AGL)
hghtm20c : number
Height value at -20 C (m AGL)
hghtm30c : number
Height value at -30 C (m AGL)
wm10c : number
Wetbulb at -10 C (C)
wm20c : number
Wetbulb at -20 C (C)
wm30c : number
Wetbulb at -30 C (C)
li5 : number
500-mb lifted index (C)
li3 : number
300-mb lifted index (C)
brnshear : number
Bulk Richardson Number Shear (kts)
brnu : number
U-component Bulk Richardson Number Shear (kts)
brnv : number
V-component Bulk Richardson Number Shear (kts)
brn : number
Bulk Richardson Number (unitless)
limax : number
Maximum lifted index value (C)
limaxpres : number
Pressure at Maximum lifted index (mb)
cap : number
Cap strength (C)
cappres : number
Cap strength pressure (mb)
bmin : number
Buoyancy minimum (C)
bminpres : number
Pressure at the buoyancy minimum (mb)
'''
def __init__(self, **kwargs):
self.pres = ma.masked # Parcel beginning pressure (mb)
self.tmpc = ma.masked # Parcel beginning temperature (C)
self.dwpc = ma.masked # Parcel beginning dewpoint (C)
self.ptrace = ma.masked # Parcel trace pressure (mb)
self.ttrace = ma.masked # Parcel trace temperature (C)
self.blayer = ma.masked # Pressure of the bottom of the layer the parcel is lifted (mb)
self.tlayer = ma.masked # Pressure of the top of the layer the parcel is lifted (mb)
self.entrain = 0. # A parcel entrainment setting (not yet implemented)
self.lclpres = ma.masked # Parcel LCL (lifted condensation level) pressure (mb)
self.lclhght = ma.masked # Parcel LCL height (m AGL)
self.lfcpres = ma.masked # Parcel LFC (level of free convection) pressure (mb)
self.lfchght = ma.masked # Parcel LFC height (m AGL)
self.elpres = ma.masked # Parcel EL (equilibrium level) pressure (mb)
self.elhght = ma.masked # Parcel EL height (m AGL)
self.mplpres = ma.masked # Maximum Parcel Level (mb)
self.mplhght = ma.masked # Maximum Parcel Level (m AGL)
self.bplus = ma.masked # Parcel CAPE (J/kg)
self.bminus = ma.masked # Parcel CIN (J/kg)
self.bfzl = ma.masked # Parcel CAPE up to freezing level (J/kg)
self.b3km = ma.masked # Parcel CAPE up to 3 km (J/kg)
self.b6km = ma.masked # Parcel CAPE up to 6 km (J/kg)
self.p0c = ma.masked # Pressure value at 0 C (mb)
self.pm10c = ma.masked # Pressure value at -10 C (mb)
self.pm20c = ma.masked # Pressure value at -20 C (mb)
self.pm30c = ma.masked # Pressure value at -30 C (mb)
self.hght0c = ma.masked # Height value at 0 C (m AGL)
self.hghtm10c = ma.masked # Height value at -10 C (m AGL)
self.hghtm20c = ma.masked # Height value at -20 C (m AGL)
self.hghtm30c = ma.masked # Height value at -30 C (m AGL)
self.wm10c = ma.masked # w velocity at -10 C ?
self.wm20c = ma.masked # w velocity at -20 C ?
self.wm30c = ma.masked # Wet bulb at -30 C ?
self.li5 = ma.masked # Lifted Index at 500 mb (C)
self.li3 = ma.masked # Lifted Index at 300 mb (C)
self.brnshear = ma.masked # Bulk Richardson Number Shear
self.brnu = ma.masked # Bulk Richardson Number U (kts)
self.brnv = ma.masked # Bulk Richardson Number V (kts)
self.brn = ma.masked # Bulk Richardson Number (unitless)
self.limax = ma.masked # Maximum Lifted Index (C)
self.limaxpres = ma.masked # Pressure at Maximum Lifted Index (mb)
self.cap = ma.masked # Cap Strength (C)
self.cappres = ma.masked # Cap strength pressure (mb)
self.bmin = ma.masked # Buoyancy minimum in profile (C)
self.bminpres = ma.masked # Buoyancy minimum pressure (mb)
for kw in kwargs: setattr(self, kw, kwargs.get(kw))
def hgz(prof):
'''
Hail Growth Zone Levels
This function finds the pressure levels for the dendritic
growth zone (from -10 C to -30 C). If either temperature cannot be found,
it is set to be the surface pressure.
Parameters
----------
prof : profile object
Profile Object
Returns
-------
pbot : number
Pressure of the bottom level (mb)
ptop : number
Pressure of the top level (mb)
'''
pbot = temp_lvl(prof, -10)
ptop = temp_lvl(prof, -30)
if not utils.QC(pbot):
pbot = prof.pres[prof.sfc]
if not utils.QC(ptop):
ptop = prof.pres[prof.sfc]
return pbot, ptop
def dgz(prof):
'''
Dendritic Growth Zone Levels
This function finds the pressure levels for the dendritic
growth zone (from -12 C to -17 C). If either temperature cannot be found,
it is set to be the surface pressure.
Parameters
----------
prof : profile object
Profile Object
Returns
-------
pbot : number
Pressure of the bottom level (mb)
ptop : number
Pressure of the top level (mb)
'''
pbot = temp_lvl(prof, -12)
ptop = temp_lvl(prof, -17)
if not utils.QC(pbot):
pbot = prof.pres[prof.sfc]
if not utils.QC(ptop):
ptop = prof.pres[prof.sfc]
return pbot, ptop
def lhp(prof):
'''
Large Hail Parameter
From Johnson and Sugden (2014), EJSSM
.. warning::
This code has not been compared directly against an SPC version.
Parameters
----------
prof : profile object
ConvectiveProfile object
Returns
-------
lhp : number
large hail parameter (unitless)
'''
mag06_shr = utils.KTS2MS(utils.mag(*prof.sfc_6km_shear))
if prof.mupcl.bplus >= 400 and mag06_shr >= 14:
lr75 = prof.lapserate_700_500
zbot, ztop = interp.hght(prof, hgz(prof))
thk_hgz = ztop - zbot
term_a = (((prof.mupcl.bplus - 2000.)/1000.) +\
((3200 - thk_hgz)/500.) +\
((lr75 - 6.5)/2.))
if term_a < 0:
term_a = 0
p_1km, p_3km, p_6km = interp.pres(prof, interp.to_msl(prof, [1000, 3000, 6000]))
shear_el = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=prof.pres[prof.sfc], ptop=prof.mupcl.elpres)))
grw_el_dir = interp.vec(prof, prof.mupcl.elpres)[0]
grw_36_dir = utils.comp2vec(*winds.mean_wind(prof, pbot=p_3km, ptop=p_6km))[0]
grw_alpha_el = grw_el_dir - grw_36_dir
if grw_alpha_el > 180:
grw_alpha_el = -10
srw_01_dir = utils.comp2vec(*winds.sr_wind(prof, pbot=prof.pres[prof.sfc], ptop=p_1km, stu=prof.srwind[0], stv=prof.srwind[1]))[0]
srw_36_dir = utils.comp2vec(*winds.sr_wind(prof, pbot=p_3km, ptop=p_6km, stu=prof.srwind[0], stv=prof.srwind[1]))[0]
srw_alpha_mid = srw_36_dir - srw_01_dir
term_b = (((shear_el - 25.)/5.) +\
((grw_alpha_el + 5.)/20.) +\
((srw_alpha_mid - 80.)/10.))
if term_b < 0:
term_b = 0
lhp = term_a * term_b + 5
else:
lhp = 0
return lhp
def ship(prof, **kwargs):
'''
Calculate the Sig Hail Parameter (SHIP)
<NAME> (SPC) helped in correcting this equation as the SPC
sounding help page version did not have the correct information
of how SHIP was calculated.
The significant hail parameter (SHIP; SPC 2014) is
an index developed in-house at the SPC. (Johnson and Sugden 2014)
Parameters
----------
prof : profile object
Profile object
mupcl : parcel object, optional
Most Unstable Parcel object
lr75 : float, optional
700 - 500 mb lapse rate (C/km)
h5_temp : float, optional
500 mb temperature (C)
shr06 : float, optional
0-6 km shear (m/s)
frz_lvl : float, optional
freezing level (m)
Returns
-------
ship : number
significant hail parameter (unitless)
'''
mupcl = kwargs.get('mupcl', None)
sfc6shr = kwargs.get('sfc6shr', None)
frz_lvl = kwargs.get('frz_lvl', None)
h5_temp = kwargs.get('h5_temp', None)
lr75 = kwargs.get('lr75', None)
if not mupcl:
try:
mupcl = prof.mupcl
except:
mulplvals = DefineParcel(prof, flag=3, pres=300)
mupcl = cape(prof, lplvals=mulplvals)
mucape = mupcl.bplus
mumr = thermo.mixratio(mupcl.pres, mupcl.dwpc)
if not frz_lvl:
frz_lvl = interp.hght(prof, temp_lvl(prof, 0))
if not h5_temp:
h5_temp = interp.temp(prof, 500.)
if not lr75:
lr75 = lapse_rate(prof, 700., 500., pres=True)
if not sfc6shr:
try:
sfc_6km_shear = prof.sfc_6km_shear
except:
sfc = prof.pres[prof.sfc]
p6km = interp.pres(prof, interp.to_msl(prof, 6000.))
sfc_6km_shear = winds.wind_shear(prof, pbot=sfc, ptop=p6km)
sfc_6km_shear = utils.mag(sfc_6km_shear[0], sfc_6km_shear[1])
shr06 = utils.KTS2MS(sfc_6km_shear)
if shr06 > 27:
shr06 = 27.
elif shr06 < 7:
shr06 = 7.
if mumr > 13.6:
mumr = 13.6
elif mumr < 11.:
mumr = 11.
if h5_temp > -5.5:
h5_temp = -5.5
ship = -1. * (mucape * mumr * lr75 * h5_temp * shr06) / 42000000.
if mucape < 1300:
ship = ship*(mucape/1300.)
if lr75 < 5.8:
ship = ship*(lr75/5.8)
if frz_lvl < 2400:
ship = ship * (frz_lvl/2400.)
return ship
def stp_cin(mlcape, esrh, ebwd, mllcl, mlcinh):
'''
Significant Tornado Parameter (w/CIN)
Formulated using the methodology outlined in [1]_. Used to detect environments where significant tornadoes
are possible within the United States. Uses the effective inflow layer calculations in [3]_ and was created
as an alternative to [2]_.
.. [1] <NAME>., <NAME>, <NAME>, <NAME>, and <NAME>, 2012: Convective modes for significant severe thunderstorms in the contiguous United States.Part II: Supercell and QLCS tornado environments. Wea. Forecasting, 27, 1136–1154,doi:https://doi.org/10.1175/WAF-D-11-00116.1.
.. [3] <NAME>., <NAME>, and <NAME>, 2007: Effective storm-relative helicity and bulk shear in supercell thunderstorm environments. Wea. Forecasting, 22, 102–115, doi:https://doi.org/10.1175/WAF969.1.
Parameters
----------
mlcape : float
Mixed-layer CAPE from the parcel class (J/kg)
esrh : float
effective storm relative helicity (m2/s2)
ebwd : float
effective bulk wind difference (m/s)
mllcl : float
mixed-layer lifted condensation level (m)
mlcinh : float
mixed-layer convective inhibition (J/kg)
Returns
-------
stp_cin : number
significant tornado parameter (unitless)
See Also
--------
stp_fixed
'''
cape_term = mlcape / 1500.
eshr_term = esrh / 150.
if ebwd < 12.5:
ebwd_term = 0.
elif ebwd > 30.:
ebwd_term = 1.5
else:
ebwd_term = ebwd / 20.
if mllcl < 1000.:
lcl_term = 1.0
elif mllcl > 2000.:
lcl_term = 0.0
else:
lcl_term = ((2000. - mllcl) / 1000.)
if mlcinh > -50:
cinh_term = 1.0
elif mlcinh < -200:
cinh_term = 0
else:
cinh_term = ((mlcinh + 200.) / 150.)
stp_cin = np.maximum(cape_term * eshr_term * ebwd_term * lcl_term * cinh_term, 0)
return stp_cin
def stp_fixed(sbcape, sblcl, srh01, bwd6):
'''
Significant Tornado Parameter (fixed layer)
Formulated using the methodology in [2]_. Used to detect environments where significant tornadoes
are possible within the United States.
.. [2] <NAME>., <NAME>, <NAME>, <NAME>, and <NAME>, 2003: Close proximity soundings within supercell environments obtained from the Rapid Update Cycle. Wea. Forecasting, 18, 1243–1261, doi:https://doi.org/10.1175/1520-0434(2003)018<1243:CPSWSE>2.0.CO;2
Parameters
----------
sbcape : number
Surface based CAPE from the parcel class (J/kg)
sblcl : number
Surface based lifted condensation level (LCL) (m)
srh01 : number
Surface to 1 km storm relative helicity (m2/s2)
bwd6 : number
Bulk wind difference between 0 to 6 km (m/s)
Returns
-------
stp_fixed : number
signifcant tornado parameter (fixed-layer)
'''
# Calculate SBLCL term
if sblcl < 1000.: # less than 1000. meters
lcl_term = 1.0
elif sblcl > 2000.: # greater than 2000. meters
lcl_term = 0.0
else:
lcl_term = ((2000.-sblcl)/1000.)
# Calculate 6BWD term
if bwd6 > 30.: # greater than 30 m/s
bwd6 = 30
elif bwd6 < 12.5:
bwd6 = 0.0
bwd6_term = bwd6 / 20.
cape_term = sbcape / 1500.
srh_term = srh01 / 150.
stp_fixed = cape_term * lcl_term * srh_term * bwd6_term
return stp_fixed
def scp(mucape, srh, ebwd):
'''
Supercell Composite Parameter
From Thompson et al. 2004, updated from the methodology in [2]_ and uses
the effective inflow layer.
Parameters
----------
prof : profile object
Profile object
mucape : number, optional
Most Unstable CAPE from the parcel class (J/kg) (optional)
srh : number, optional
the effective SRH from the winds.helicity function (m2/s2)
ebwd : number, optional
effective bulk wind difference (m/s)
Returns
-------
scp : number
supercell composite parameter
'''
if ebwd > 20:
ebwd = 20.
elif ebwd < 10:
ebwd = 0.
muCAPE_term = mucape / 1000.
esrh_term = srh / 50.
ebwd_term = ebwd / 20.
scp = muCAPE_term * esrh_term * ebwd_term
return scp
def k_index(prof):
'''
Calculates the K-Index from a profile object
Parameters
----------
prof : profile object
Profile Object
Returns
-------
k_index : number
K-Index
'''
t8 = interp.temp(prof, 850.)
t7 = interp.temp(prof, 700.)
t5 = interp.temp(prof, 500.)
td7 = interp.dwpt(prof, 700.)
td8 = interp.dwpt(prof, 850.)
return t8 - t5 + td8 - (t7 - td7)
def t_totals(prof):
'''
Calculates the Total Totals Index from a profile object
Parameters
----------
prof : profile object
Profile Object
Returns
-------
t_totals : number
Total Totals Index
'''
return c_totals(prof) + v_totals(prof)
def c_totals(prof):
'''
Calculates the Cross Totals Index from a profile object
Parameters
----------
prof : profile object
Profile Object
Returns
-------
c_totals : number
Cross Totals Index
'''
return interp.dwpt(prof, 850.) - interp.temp(prof, 500.)
def v_totals(prof):
'''
Calculates the Vertical Totals Index from a profile object
Parameters
----------
prof : profile object
Profile Object
Returns
-------
v_totals : number
Vertical Totals Index
'''
return interp.temp(prof, 850.) - interp.temp(prof, 500.)
def precip_water(prof, pbot=None, ptop=400, dp=-1, exact=False):
'''
Calculates the precipitable water from a profile object within the
specified layer. The default layer (lower=-1 & upper=-1) is defined to
be surface to 400 hPa.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa).
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
pwat : number,
Precipitable Water (in)
'''
if not pbot: pbot = prof.pres[prof.sfc]
if prof.pres[-1] > ptop:
ptop = prof.pres[-1]
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
dwpt1 = interp.dwpt(prof, pbot)
dwpt2 = interp.dwpt(prof, ptop)
mask = ~prof.dwpc.mask[ind1:ind2+1] * ~prof.pres.mask[ind1:ind2+1]
dwpt = np.concatenate([[dwpt1], prof.dwpc[ind1:ind2+1][mask], [dwpt2]])
p = np.concatenate([[pbot], prof.pres[ind1:ind2+1][mask], [ptop]])
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
dwpt = interp.dwpt(prof, p)
w = thermo.mixratio(p, dwpt)
return (((w[:-1]+w[1:])/2 * (p[:-1]-p[1:])) * 0.00040173).sum()
def inferred_temp_adv(prof, dp=-100, lat=35):
'''
Inferred Temperature Advection
SHARP code deduced by <NAME>. Not based on actual SPC code.
Calculates the inferred temperature advection from the surface pressure
and up every 100 mb assuming all winds are geostrophic. The units returned are
in C/hr. If no latitude is specified the function defaults to 35 degrees North.
This code uses Equation 4.1.139 from Bluestein's "Synoptic-Dynamic Meteorology in Midlatitudes (Volume I)"
.. important::
While this code compares well qualitatively to the version at SPC, the SPC output is much larger. Scale
analysis suggests that the values provided by this function are much more reasonable (10 K/day is typical
for synoptic scale values).
Parameters
----------
prof : profile object
Profile object
dp : number, optional
layer size to compute temperature advection over
lat : number, optional
latitude in decimal degrees
Returns
-------
temp_adv : array
an array of temperature advection values (C/hr)
pressure_bounds: array
a 2D array indicating the top and bottom bounds of the temperature advection layers (mb)
'''
if prof.wdir.count() == 0:
return ma.masked, ma.masked
if np.ma.max(prof.pres) <= 100:
return ma.masked, ma.masked
omega = (2. * np.pi) / (86164.)
pres_idx = np.where(prof.pres >= 100.)[0]
pressures = np.arange(prof.pres[prof.get_sfc()], prof.pres[pres_idx][-1], dp, dtype=type(prof.pres[prof.get_sfc()])) # Units: mb
temps = thermo.ctok(interp.temp(prof, pressures))
heights = interp.hght(prof, pressures)
temp_adv = np.empty(len(pressures) - 1)
dirs = interp.vec(prof, pressures)[0]
pressure_bounds = np.empty((len(pressures) - 1, 2))
if utils.QC(lat):
f = 2. * omega * np.sin(np.radians(lat)) # Units: (s**-1)
else:
temp_adv[:] = np.nan
return temp_adv, pressure_bounds
multiplier = (f / 9.81) * (np.pi / 180.) # Units: (s**-1 / (m/s**2)) * (radians/degrees)
for i in range(1, len(pressures)):
bottom_pres = pressures[i-1]
top_pres = pressures[i]
# Get the temperatures from both levels (in Kelvin)
btemp = temps[i-1]
ttemp = temps[i]
# Get the two heights of the top and bottom layer
bhght = heights[i-1] # Units: meters
thght = heights[i] # Units: meters
bottom_wdir = dirs[i-1] # Meteorological degrees (degrees from north)
top_wdir = dirs[i] # same units as top_wdir
# Calculate the average temperature
avg_temp = (ttemp + btemp) * 2.
# Calculate the mean wind between the two levels (this is assumed to be geostrophic)
mean_u, mean_v = winds.mean_wind(prof, pbot=bottom_pres, ptop=top_pres)
mean_wdir, mean_wspd = utils.comp2vec(mean_u, mean_v) # Wind speed is in knots here
mean_wspd = utils.KTS2MS(mean_wspd) # Convert this geostrophic wind speed to m/s
# Here we calculate the change in wind direction with height (thanks to <NAME> for help with this)
# The sign of d_theta will dictate whether or not it is warm or cold advection
mod = 180 - bottom_wdir
top_wdir = top_wdir + mod
if top_wdir < 0:
top_wdir = top_wdir + 360
elif top_wdir >= 360:
top_wdir = top_wdir - 360
d_theta = top_wdir - 180.
# Here we calculate t_adv (which is -V_g * del(T) or the local change in temperature term)
# K/s s * rad/m * deg m^2/s^2 K degrees / m
t_adv = multiplier * np.power(mean_wspd,2) * avg_temp * (d_theta / (thght - bhght)) # Units: Kelvin / seconds
# Append the pressure bounds so the person knows the pressure
pressure_bounds[i-1, :] = bottom_pres, top_pres
temp_adv[i-1] = t_adv*60.*60. # Converts Kelvin/seconds to Kelvin/hour (or Celsius/hour)
return temp_adv, pressure_bounds
def temp_lvl(prof, temp, wetbulb=False):
'''
Calculates the level (hPa) of the first occurrence of the specified
temperature.
Parameters
----------
prof : profile object
Profile Object
temp : number
Temperature being searched (C)
wetbulb : boolean
Flag to indicate whether or not the wetbulb profile should be used instead
Returns
-------
First Level of the temperature (hPa) : number
'''
if wetbulb is False:
profile = prof.tmpc
else:
profile = prof.wetbulb
difft = profile - temp
if not np.any(difft <= 0) or not np.any(difft >= 0):
# Temp doesn't occur anywhere; return masked
return ma.masked
elif np.any(difft == 0):
# Temp is one of the data points; don't bother interpolating
return prof.pres[difft == 0][0]
mask = difft.mask | prof.logp.mask
difft = difft[~mask]
profile = profile[~mask]
logp = prof.logp[~mask]
# Find where subsequent values of difft are of opposite sign (i.e. when multiplied together, the result is negative)
ind = np.where((difft[:-1] * difft[1:]) < 0)[0]
try:
ind = ind.min()
except:
ind = ind1[-1]
return np.power(10, np.interp(temp, [profile[ind+1], profile[ind]],
[logp[ind+1], logp[ind]]))
def max_temp(prof, mixlayer=100):
'''
Calculates a maximum temperature forecast based on the depth of the mixing
layer and low-level temperatures
Parameters
----------
prof : profile object
Profile Object
mixlayer : number (optional; default = 100)
Top of layer over which to "mix" (hPa)
Returns
-------
mtemp : number
Forecast Maximum Temperature
'''
mixlayer = prof.pres[prof.sfc] - mixlayer
temp = thermo.ctok(interp.temp(prof, mixlayer)) + 2
return thermo.ktoc(temp * (prof.pres[prof.sfc] / mixlayer)**ROCP)
def mean_relh(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean relative humidity from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding (mb)
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Relative Humidity : number
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
dwpt1 = interp.dwpt(prof, pbot)
dwpt2 = interp.dwpt(prof, ptop)
mask = ~prof.dwpc.mask[ind1:ind2+1] * ~prof.pres.mask[ind1:ind2+1]
dwpt = np.concatenate([[dwpt1], prof.dwpc[ind1:ind2+1][mask],
[dwpt2]])
p = np.concatenate([[pbot], prof.pres[ind1:ind2+1][mask], [ptop]])
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
tmp = interp.temp(prof, p)
dwpt = interp.dwpt(prof, p)
rh = thermo.relh(p, tmp, dwpt)
return ma.average(rh, weights=p)
def mean_omega(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean omega from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding (mb)
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Omega : number
'''
if hasattr(prof, 'omeg'):
if prof.omeg.all() is np.ma.masked:
return prof.missing
else:
return prof.missing
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.omeg(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.omeg(prof, ptop)): return ma.masked
if exact:
# This condition of the if statement is not tested
omeg = prof.omeg
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
omeg1 = interp.omeg(prof, pbot)
omeg2 = interp.omeg(prof, ptop)
omeg = omeg[ind1:ind2+1]
mask = ~omeg.mask
omeg = np.concatenate([[omeg1], omeg[mask], omeg[mask], [omeg2]])
tott = omeg.sum() / 2.
num = float(len(omeg)) / 2.
thta = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
omeg = interp.omeg(prof, p)
omeg = ma.average(omeg, weights=p)
return omeg
def mean_mixratio(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean mixing ratio from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding (mb)
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Mixing Ratio : number
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
dwpt1 = interp.dwpt(prof, pbot)
dwpt2 = interp.dwpt(prof, ptop)
mask = ~prof.dwpc.mask[ind1:ind2+1] * ~prof.pres.mask[ind1:ind2+1]
dwpt = np.concatenate([[dwpt1], prof.dwpc[ind1:ind2+1][mask], prof.dwpc[ind1:ind2+1][mask], [dwpt2]])
p = np.concatenate([[pbot], prof.pres[ind1:ind2+1][mask],prof.pres[ind1:ind2+1][mask], [ptop]])
totd = dwpt.sum() / 2.
totp = p.sum() / 2.
num = float(len(dwpt)) / 2.
w = thermo.mixratio(totp/num, totd/num)
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
dwpt = interp.dwpt(prof, p)
w = ma.average(thermo.mixratio(p, dwpt))
return w
def mean_thetae(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean theta-e from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding (mb)
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Theta-E : number
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
thetae1 = thermo.thetae(pbot, interp.temp(prof, pbot), interp.dwpt(prof, pbot))
thetae2 = thermo.thetae(ptop, interp.temp(prof, ptop), interp.dwpt(prof, pbot))
thetae = np.ma.empty(prof.pres[ind1:ind2+1].shape)
for i in np.arange(0, len(thetae), 1):
thetae[i] = thermo.thetae(prof.pres[ind1:ind2+1][i], prof.tmpc[ind1:ind2+1][i], prof.dwpc[ind1:ind2+1][i])
mask = ~thetae.mask
thetae = np.concatenate([[thetae1], thetae[mask], thetae[mask], [thetae2]])
tott = thetae.sum() / 2.
num = float(len(thetae)) / 2.
thtae = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
#temp = interp.temp(prof, p)
#dwpt = interp.dwpt(prof, p)
#thetae = np.empty(p.shape)
#for i in np.arange(0, len(thetae), 1):
# thetae[i] = thermo.thetae(p[i], temp[i], dwpt[i])
thetae = interp.thetae(prof, p)
thtae = ma.average(thetae, weights=p)
return thtae
def mean_theta(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Calculates the mean theta from a profile object within the
specified layer.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding (mb)
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Mean Theta : number
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 100.
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
theta1 = thermo.theta(pbot, interp.temp(prof, pbot))
theta2 = thermo.theta(ptop, interp.temp(prof, ptop))
theta = thermo.theta(prof.pres[ind1:ind2+1], prof.tmpc[ind1:ind2+1])
mask = ~theta.mask
theta = np.concatenate([[theta1], theta[mask], theta[mask], [theta2]])
tott = theta.sum() / 2.
num = float(len(theta)) / 2.
thta = tott / num
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
temp = interp.temp(prof, p)
theta = thermo.theta(p, temp)
thta = ma.average(theta, weights=p)
return thta
def lapse_rate(prof, lower, upper, pres=True):
'''
Calculates the lapse rate (C/km) from a profile object
Parameters
----------
prof : profile object
Profile Object
lower : number
Lower Bound of lapse rate (mb or m AGL)
upper : number
Upper Bound of lapse rate (mb or m AGL)
pres : bool (optional; default = True)
Flag to determine if lower/upper are pressure [True]
or height [False]
Returns
-------
lapse rate (C/km) : number
'''
if pres:
if (prof.pres[-1] > upper): return ma.masked
p1 = lower
p2 = upper
z1 = interp.hght(prof, lower)
z2 = interp.hght(prof, upper)
else:
z1 = interp.to_msl(prof, lower)
z2 = interp.to_msl(prof, upper)
p1 = interp.pres(prof, z1)
p2 = interp.pres(prof, z2)
tv1 = interp.vtmp(prof, p1)
tv2 = interp.vtmp(prof, p2)
return (tv2 - tv1) / (z2 - z1) * -1000.
def max_lapse_rate(prof, lower=2000, upper=6000, interval=250, depth=2000):
'''
Calculates the maximum lapse rate (C/km) between a layer at a specified interval
Parameters
----------
prof: profile object
Profile object
lower : number
Lower bound in height (m)
upper : number
Upper bound in height (m)
interval : number
Interval to assess the lapse rate at (m)
depth : number
Depth of the layer to assess the lapse rate over (m)
Returns
-------
max lapse rate (C/km) : float
lower pressure of max lapse rate (mb) : number
upper pressure of max lapse rate (mb) : number
'''
bottom_levels = interp.to_msl(prof, np.arange(lower, upper-depth+interval, interval))
top_levels = interp.to_msl(prof, np.arange(lower+depth, upper+interval, interval))
bottom_pres = interp.pres(prof, bottom_levels)
top_pres = interp.pres(prof, top_levels)
all_lapse_rates = (interp.vtmp(prof, top_pres) - interp.vtmp(prof, bottom_pres)) * -1000.
max_lapse_rate_idx = np.ma.argmax(all_lapse_rates)
return all_lapse_rates[max_lapse_rate_idx]/depth, bottom_pres[max_lapse_rate_idx], top_pres[max_lapse_rate_idx]
def most_unstable_level(prof, pbot=None, ptop=None, dp=-1, exact=False):
'''
Finds the most unstable level between the lower and upper levels.
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding (mb)
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
Returns
-------
Pressure level of most unstable level (hPa) : number
'''
if not pbot: pbot = prof.pres[prof.sfc]
if not ptop: ptop = prof.pres[prof.sfc] - 400
if not utils.QC(interp.temp(prof, pbot)): pbot = prof.pres[prof.sfc]
if not utils.QC(interp.temp(prof, ptop)): return ma.masked
if exact:
ind1 = np.where(pbot > prof.pres)[0].min()
ind2 = np.where(ptop < prof.pres)[0].max()
t1 = interp.temp(prof, pbot)
t2 = interp.temp(prof, ptop)
d1 = interp.dwpt(prof, pbot)
d2 = interp.dwpt(prof, ptop)
t = prof.tmpc[ind1:ind2+1]
d = prof.dwpc[ind1:ind2+1]
p = prof.pres[ind1:ind2+1]
mask = ~t.mask * ~d.mask * ~p.mask
t = np.concatenate([[t1], t[mask], [t2]])
d = np.concatenate([[d1], d[mask], [d2]])
p = np.concatenate([[pbot], p[mask], [ptop]])
else:
dp = -1
p = np.arange(pbot, ptop+dp, dp, dtype=type(pbot))
t = interp.temp(prof, p)
d = interp.dwpt(prof, p)
p2, t2 = thermo.drylift(p, t, d)
mt = thermo.wetlift(p2, t2, 1000.) # Essentially this is making the Theta-E profile, which we are already doing in the Profile object!
ind = np.where(np.fabs(mt - np.nanmax(mt)) < TOL)[0]
return p[ind[0]]
def parcelTraj(prof, parcel, smu=None, smv=None):
'''
Parcel Trajectory Routine (Storm Slinky)
Coded by <NAME>
This routine is a simple 3D thermodynamic parcel trajectory model that
takes a thermodynamic profile and a parcel trace and computes the
trajectory of a parcel that is lifted to its LFC, then given a 5 m/s
nudge upwards, and then left to accelerate up to the EL. (Based on a description
in the AWIPS 2 Online Training.)
This parcel is assumed to be moving horizontally via the storm motion vector, which
if not supplied is taken to be the Bunkers Right Mover storm motion vector.
As the parcel accelerates upwards, it is advected by the storm relative winds.
The environmental winds are assumed to be steady-state.
This simulates the path a parcel in a storm updraft would take using pure parcel theory.
.. important::
The code for this function was not directly ported from SPC.
Parameters
----------
prof : profile object
Profile object
parcel : parcel object
Parcel object
smu : number, optional
U-component of the storm motion vector (kts)
smv: number, optional
V-component of the storm motion vector (kts)
Returns
-------
pos_vector : list
a list of tuples, where each element of the list is a location of the parcel in time (m)
theta : number
the tilt of the updraft measured by the angle of the updraft with respect to the horizon (degrees)
'''
t_parcel = parcel.ttrace # temperature
p_parcel = parcel.ptrace # mb
elhght = parcel.elhght # meter
y_0 = 0 # meter
x_0 = 0 # meter
z_0 = parcel.lfchght # meter
p_0 = parcel.lfcpres # meter
g = 9.8 # m/s**2
t_0 = 0 # seconds
w_0 = 5 # m/s (the initial parcel nudge)
u_0 = 0 # m/s
v_0 = 0 # m/s (initial parcel location, which is storm motion relative)
delta_t = 25 # the trajectory increment
pos_vector = [(x_0,y_0,z_0)]
speed_vector = [(u_0, v_0, w_0)]
if smu==None or smv==None:
smu = prof.srwind[0] # Expected to be in knots
smv = prof.srwind[1] # Is expected to be in knots
if parcel.bplus < 1e-3:
# The parcel doesn't have any positively buoyant areas.
return np.ma.masked, np.nan
if not utils.QC(elhght):
elhght = prof.hght[-1]
while z_0 < elhght:
t_1 = delta_t + t_0 # the time step increment
# Compute the vertical acceleration
env_tempv = interp.vtmp(prof, p_0) + 273.15
pcl_tempv = interp.generic_interp_pres(np.log10(p_0), np.log10(p_parcel.copy())[::-1], t_parcel[::-1]) + 273.15
accel = g * ((pcl_tempv - env_tempv) / env_tempv)
# Compute the vertical displacement
z_1 = (.5 * accel * np.power(t_1 - t_0, 2)) + (w_0 * (t_1 - t_0)) + z_0
w_1 = accel * (t_1 - t_0) + w_0
# Compute the parcel-relative winds
u, v = interp.components(prof, p_0)
u_0 = utils.KTS2MS(u - smu)
v_0 = utils.KTS2MS(v - smv)
# Compute the horizontal displacements
x_1 = u_0 * (t_1 - t_0) + x_0
y_1 = v_0 * (t_1 - t_0) + y_0
pos_vector.append((x_1, y_1, z_1))
speed_vector.append((u_0, v_0, w_1))
# Update parcel position
z_0 = z_1
y_0 = y_1
x_0 = x_1
t_0 = t_1
p_0 = interp.pres(prof, interp.to_msl(prof, z_1))
# Update parcel vertical velocity
w_0 = w_1
# Compute the angle tilt of the updraft
r = np.sqrt(np.power(pos_vector[-1][0], 2) + np.power(pos_vector[-1][1], 2))
theta = np.degrees(np.arctan2(pos_vector[-1][2],r))
return pos_vector, theta
def cape(prof, pbot=None, ptop=None, dp=-1, new_lifter=False, trunc=False, **kwargs):
'''
Lifts the specified parcel, calculates various levels and parameters from
the profile object. Only B+/B- are calculated based on the specified layer.
This is a convenience function for effective_inflow_layer and convective_temp,
as well as any function that needs to lift a parcel in an iterative process.
This function is a stripped back version of the parcelx function, that only
handles bplus and bminus. The intention is to reduce the computation time in
the iterative functions by reducing the calculations needed.
This method of creating a stripped down parcelx function for CAPE/CIN calculations
was developed by <NAME> and <NAME> and later implemented in
SPC's version of SHARP to speed up their program.
For full parcel objects, use the parcelx function.
!! All calculations use the virtual temperature correction unless noted. !!
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
pres : number (optional)
Pressure of parcel to lift (hPa)
tmpc : number (optional)
Temperature of parcel to lift (C)
dwpc : number (optional)
Dew Point of parcel to lift (C)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding (mb)
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
flag : number (optional; default = 5)
Flag to determine what kind of parcel to create; See DefineParcel for
flag values
lplvals : lifting parcel layer object (optional)
Contains the necessary parameters to describe a lifting parcel
Returns
-------
pcl : parcel object
Parcel Object
'''
flag = kwargs.get('flag', 5)
pcl = Parcel(pbot=pbot, ptop=ptop)
pcl.lplvals = kwargs.get('lplvals', DefineParcel(prof, flag))
if prof.pres.compressed().shape[0] < 1: return pcl
# Variables
pres = kwargs.get('pres', pcl.lplvals.pres)
tmpc = kwargs.get('tmpc', pcl.lplvals.tmpc)
dwpc = kwargs.get('dwpc', pcl.lplvals.dwpc)
pcl.pres = pres
pcl.tmpc = tmpc
pcl.dwpc = dwpc
totp = 0.
totn = 0.
cinh_old = 0.
# See if default layer is specified
if not pbot:
pbot = prof.pres[prof.sfc]
pcl.blayer = pbot
pcl.pbot = pbot
if not ptop:
ptop = prof.pres[prof.pres.shape[0]-1]
pcl.tlayer = ptop
pcl.ptop = ptop
# Make sure this is a valid layer
if pbot > pres:
pbot = pres
pcl.blayer = pbot
if type(interp.vtmp(prof, pbot)) == type(ma.masked) or type(interp.vtmp(prof, ptop)) == type(ma.masked):
return pcl
# Begin with the Mixing Layer
pe1 = pbot
h1 = interp.hght(prof, pe1)
tp1 = thermo.virtemp(pres, tmpc, dwpc)
# Lift parcel and return LCL pres (hPa) and LCL temp (C)
pe2, tp2 = thermo.drylift(pres, tmpc, dwpc)
if np.ma.is_masked(pe2) or not utils.QC(pe2) or np.isnan(pe2):
return pcl
blupper = pe2
# Calculate lifted parcel theta for use in iterative CINH loop below
# RECALL: lifted parcel theta is CONSTANT from LPL to LCL
theta_parcel = thermo.theta(pe2, tp2, 1000.)
# Environmental theta and mixing ratio at LPL
blmr = thermo.mixratio(pres, dwpc)
# ACCUMULATED CINH IN THE MIXING LAYER BELOW THE LCL
# This will be done in 'dp' increments and will use the virtual
# temperature correction where possible
pp = np.arange(pbot, blupper+dp, dp, dtype=type(pbot))
hh = interp.hght(prof, pp)
tmp_env_theta = thermo.theta(pp, interp.temp(prof, pp), 1000.)
tmp_env_dwpt = interp.dwpt(prof, pp)
tv_env = thermo.virtemp(pp, tmp_env_theta, tmp_env_dwpt)
tmp1 = thermo.virtemp(pp, theta_parcel, thermo.temp_at_mixrat(blmr, pp))
tdef = (tmp1 - tv_env) / thermo.ctok(tv_env)
lyre = G * (tdef[:-1]+tdef[1:]) / 2 * (hh[1:]-hh[:-1])
totn = lyre[lyre < 0].sum()
if not totn: totn = 0.
# TODO: Because this function is used often to search for parcels that meet a certain
# CAPE/CIN threshold, we can add a few statments here and there in the code
# that checks to see if these thresholds are met and if they are, return a flag.
# We don't need to call wetlift() anymore than we need to. This is one location
# where we can do this. If the CIN is too large, return here...don't have to worry
# about ever entering the loop!
# Move the bottom layer to the top of the boundary layer
if pbot > pe2:
pbot = pe2
pcl.blayer = pbot
if pbot < prof.pres[-1]:
# Check for the case where the LCL is above the
# upper boundary of the data (e.g. a dropsonde)
return pcl
# Find lowest observation in layer
lptr = ma.where(pbot > prof.pres)[0].min()
uptr = ma.where(ptop < prof.pres)[0].max()
# START WITH INTERPOLATED BOTTOM LAYER
# Begin moist ascent from lifted parcel LCL (pe2, tp2)
pe1 = pbot
h1 = interp.hght(prof, pe1)
te1 = interp.vtmp(prof, pe1)
tp1 = tp2
lyre = 0
if new_lifter:
env_temp = prof.vtmp[lptr:]
try:
keep = ~env_temp.mask * np.ones(env_temp.shape, dtype=bool)
except AttributeError:
keep = np.ones(env_temp.shape, dtype=bool)
env_temp = np.append(te1, env_temp[keep])
env_pres = np.append(pe1, prof.pres[lptr:][keep])
env_hght = np.append(h1, prof.hght[lptr:][keep])
pcl_temp = integrate_parcel(env_pres, tp1)
tdef = (thermo.virtemp(env_pres, pcl_temp, pcl_temp) - env_temp) / thermo.ctok(env_temp)
lyre = G * (tdef[1:] + tdef[:-1]) / 2 * (env_hght[1:] - env_hght[:-1])
totp = lyre[lyre > 0].sum()
neg_layers = (lyre <= 0) & (env_pres[1:] > 500)
if np.any(neg_layers):
totn += lyre[neg_layers].sum()
if lyre[-1] > 0:
pcl.bplus = totp - lyre[-1]
pcl.bminus = totn
else:
pcl.bplus = totp
if env_pres[-1] > 500.:
pcl.bminus = totn + lyre[-1]
else:
pcl.bminus = totn
if pcl.bplus == 0: pcl.bminus = 0.
else:
for i in range(lptr, prof.pres.shape[0]):
if not utils.QC(prof.tmpc[i]): continue
pe2 = prof.pres[i]
h2 = prof.hght[i]
te2 = prof.vtmp[i]
tp2 = thermo.wetlift(pe1, tp1, pe2)
tdef1 = (thermo.virtemp(pe1, tp1, tp1) - te1) / thermo.ctok(te1)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / thermo.ctok(te2)
lyre = G * (tdef1 + tdef2) / 2. * (h2 - h1)
# Add layer energy to total positive if lyre > 0
if lyre > 0: totp += lyre
# Add layer energy to total negative if lyre < 0, only up to EL
else:
if pe2 > 500.: totn += lyre
pe1 = pe2
h1 = h2
te1 = te2
tp1 = tp2
# Is this the top of the specified layer
# Because CIN is only computed below 500 mb, we can cut off additional lifting when
# computing convective temperature!
if (trunc is True and pe2 <= 500) or (i >= uptr and not utils.QC(pcl.bplus)):
pe3 = pe1
h3 = h1
te3 = te1
tp3 = tp1
lyrf = lyre
if lyrf > 0:
pcl.bplus = totp - lyrf
pcl.bminus = totn
else:
pcl.bplus = totp
if pe2 > 500.: pcl.bminus = totn + lyrf
else: pcl.bminus = totn
pe2 = ptop
h2 = interp.hght(prof, pe2)
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h2 - h3)
if lyrf > 0: pcl.bplus += lyrf
else:
if pe2 > 500.: pcl.bminus += lyrf
if pcl.bplus == 0: pcl.bminus = 0.
break
return pcl
def integrate_parcel(pres, tbot):
pcl_tmpc = np.empty(pres.shape, dtype=pres.dtype)
pcl_tmpc[0] = tbot
for idx in range(1, len(pres)):
pcl_tmpc[idx] = thermo.wetlift(pres[idx - 1], pcl_tmpc[idx - 1], pres[idx])
return pcl_tmpc
def parcelx(prof, pbot=None, ptop=None, dp=-1, **kwargs):
'''
Lifts the specified parcel, calculates various levels and parameters from
the profile object. B+/B- are calculated based on the specified layer.
Such parameters include CAPE, CIN, LCL height, LFC height, buoyancy minimum,
EL height, MPL height.
!! All calculations use the virtual temperature correction unless noted. !!
Parameters
----------
prof : profile object
Profile Object
pbot : number (optional; default surface)
Pressure of the bottom level (hPa)
ptop : number (optional; default 400 hPa)
Pressure of the top level (hPa)
pres : number (optional)
Pressure of parcel to lift (hPa)
tmpc : number (optional)
Temperature of parcel to lift (C)
dwpc : number (optional)
Dew Point of parcel to lift (C)
dp : negative integer (optional; default = -1)
The pressure increment for the interpolated sounding (mb)
exact : bool (optional; default = False)
Switch to choose between using the exact data (slower) or using
interpolated sounding at 'dp' pressure levels (faster)
flag : number (optional; default = 5)
Flag to determine what kind of parcel to create; See DefineParcel for
flag values
lplvals : lifting parcel layer object (optional)
Contains the necessary parameters to describe a lifting parcel
Returns
-------
Parcel Object
'''
flag = kwargs.get('flag', 5)
pcl = Parcel(pbot=pbot, ptop=ptop)
pcl.lplvals = kwargs.get('lplvals', DefineParcel(prof, flag))
if prof.pres.compressed().shape[0] < 1: return pcl
# Variables
pres = kwargs.get('pres', pcl.lplvals.pres)
tmpc = kwargs.get('tmpc', pcl.lplvals.tmpc)
dwpc = kwargs.get('dwpc', pcl.lplvals.dwpc)
pcl.pres = pres
pcl.tmpc = tmpc
pcl.dwpc = dwpc
cap_strength = -9999.
cap_strengthpres = -9999.
li_max = -9999.
li_maxpres = -9999.
totp = 0.
totn = 0.
tote = 0.
cinh_old = 0.
# See if default layer is specified
if not pbot:
pbot = prof.pres[prof.sfc]
pcl.blayer = pbot
pcl.pbot = pbot
if not ptop:
ptop = prof.pres[prof.pres.shape[0]-1]
pcl.tlayer = ptop
pcl.ptop = ptop
# Make sure this is a valid layer
if pbot > pres:
pbot = pres
pcl.blayer = pbot
#if type(interp.vtmp(prof, pbot)) == type(ma.masked) or type(interp.vtmp(prof, ptop)) == type(ma.masked):
# return pcl
# Begin with the Mixing Layer
pe1 = pbot
h1 = interp.hght(prof, pe1)
tp1 = thermo.virtemp(pres, tmpc, dwpc)
ttrace = [tp1]
ptrace = [pe1]
# Lift parcel and return LCL pres (hPa) and LCL temp (C)
pe2, tp2 = thermo.drylift(pres, tmpc, dwpc)
if type(pe2) == type(ma.masked) or np.isnan(pe2):
return pcl
blupper = pe2
h2 = interp.hght(prof, pe2)
te2 = interp.vtmp(prof, pe2)
pcl.lclpres = min(pe2, prof.pres[prof.sfc]) # Make sure the LCL pressure is
# never below the surface
pcl.lclhght = interp.to_agl(prof, h2)
ptrace.append(pe2)
ttrace.append(thermo.virtemp(pe2, tp2, tp2))
# Calculate lifted parcel theta for use in iterative CINH loop below
# RECALL: lifted parcel theta is CONSTANT from LPL to LCL
theta_parcel = thermo.theta(pe2, tp2, 1000.)
# Environmental theta and mixing ratio at LPL
bltheta = thermo.theta(pres, interp.temp(prof, pres), 1000.)
blmr = thermo.mixratio(pres, dwpc)
# ACCUMULATED CINH IN THE MIXING LAYER BELOW THE LCL
# This will be done in 'dp' increments and will use the virtual
# temperature correction where possible
pp = np.arange(pbot, blupper+dp, dp, dtype=type(pbot))
hh = interp.hght(prof, pp)
tmp_env_theta = thermo.theta(pp, interp.temp(prof, pp), 1000.)
tmp_env_dwpt = interp.dwpt(prof, pp)
tv_env = thermo.virtemp(pp, tmp_env_theta, tmp_env_dwpt)
tmp1 = thermo.virtemp(pp, theta_parcel, thermo.temp_at_mixrat(blmr, pp))
tdef = (tmp1 - tv_env) / thermo.ctok(tv_env)
tidx1 = np.arange(0, len(tdef)-1, 1)
tidx2 = np.arange(1, len(tdef), 1)
lyre = G * (tdef[tidx1]+tdef[tidx2]) / 2 * (hh[tidx2]-hh[tidx1])
totn = lyre[lyre < 0].sum()
if not totn: totn = 0.
# Move the bottom layer to the top of the boundary layer
if pbot > pe2:
pbot = pe2
pcl.blayer = pbot
# Calculate height of various temperature levels
p0c = temp_lvl(prof, 0.)
pm10c = temp_lvl(prof, -10.)
pm20c = temp_lvl(prof, -20.)
pm30c = temp_lvl(prof, -30.)
hgt0c = interp.hght(prof, p0c)
hgtm10c = interp.hght(prof, pm10c)
hgtm20c = interp.hght(prof, pm20c)
hgtm30c = interp.hght(prof, pm30c)
pcl.p0c = p0c
pcl.pm10c = pm10c
pcl.pm20c = pm20c
pcl.pm30c = pm30c
pcl.hght0c = hgt0c
pcl.hghtm10c = hgtm10c
pcl.hghtm20c = hgtm20c
pcl.hghtm30c = hgtm30c
if pbot < prof.pres[-1]:
# Check for the case where the LCL is above the
# upper boundary of the data (e.g. a dropsonde)
return pcl
# Find lowest observation in layer
lptr = ma.where(pbot >= prof.pres)[0].min()
uptr = ma.where(ptop <= prof.pres)[0].max()
# START WITH INTERPOLATED BOTTOM LAYER
# Begin moist ascent from lifted parcel LCL (pe2, tp2)
pe1 = pbot
h1 = interp.hght(prof, pe1)
te1 = interp.vtmp(prof, pe1)
tp1 = thermo.wetlift(pe2, tp2, pe1)
lyre = 0
lyrlast = 0
iter_ranges = np.arange(lptr, prof.pres.shape[0])
ttraces = ma.zeros(len(iter_ranges))
ptraces = ma.zeros(len(iter_ranges))
ttraces[:] = ptraces[:] = ma.masked
for i in iter_ranges:
if not utils.QC(prof.tmpc[i]): continue
pe2 = prof.pres[i]
h2 = prof.hght[i]
te2 = prof.vtmp[i]
#te2 = thermo.virtemp(prof.pres[i], prof.tmpc[i], prof.dwpc[i])
tp2 = thermo.wetlift(pe1, tp1, pe2)
tdef1 = (thermo.virtemp(pe1, tp1, tp1) - te1) / thermo.ctok(te1)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / thermo.ctok(te2)
ptraces[i-iter_ranges[0]] = pe2
ttraces[i-iter_ranges[0]] = thermo.virtemp(pe2, tp2, tp2)
lyrlast = lyre
lyre = G * (tdef1 + tdef2) / 2. * (h2 - h1)
#print(pe1, pe2, te1, te2, tp1, tp2, lyre, totp, totn, pcl.lfcpres)
# Add layer energy to total positive if lyre > 0
if lyre > 0: totp += lyre
# Add layer energy to total negative if lyre < 0, only up to EL
else:
if pe2 > 500.: totn += lyre
# Check for Max LI
mli = thermo.virtemp(pe2, tp2, tp2) - te2
if mli > li_max:
li_max = mli
li_maxpres = pe2
# Check for Max Cap Strength
mcap = te2 - mli
if mcap > cap_strength:
cap_strength = mcap
cap_strengthpres = pe2
tote += lyre
pelast = pe1
pe1 = pe2
te1 = te2
tp1 = tp2
# Is this the top of the specified layer
if i >= uptr and not utils.QC(pcl.bplus):
pe3 = pe1
h3 = h2
te3 = te1
tp3 = tp1
lyrf = lyre
if lyrf > 0:
pcl.bplus = totp - lyrf
pcl.bminus = totn
else:
pcl.bplus = totp
if pe2 > 500.: pcl.bminus = totn + lyrf
else: pcl.bminus = totn
pe2 = ptop
h2 = interp.hght(prof, pe2)
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h2 - h3)
if lyrf > 0: pcl.bplus += lyrf
else:
if pe2 > 500.: pcl.bminus += lyrf
if pcl.bplus == 0: pcl.bminus = 0.
# Is this the freezing level
if te2 < 0. and not utils.QC(pcl.bfzl):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0.: pcl.bfzl = totp - lyrf
else: pcl.bfzl = totp
if not utils.QC(p0c) or p0c > pe3:
pcl.bfzl = 0
elif utils.QC(pe2):
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (hgt0c - h3)
if lyrf > 0: pcl.bfzl += lyrf
# Is this the -10C level
if te2 < -10. and not utils.QC(pcl.wm10c):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0.: pcl.wm10c = totp - lyrf
else: pcl.wm10c = totp
if not utils.QC(pm10c) or pm10c > pcl.lclpres:
pcl.wm10c = 0
elif utils.QC(pe2):
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (hgtm10c - h3)
if lyrf > 0: pcl.wm10c += lyrf
# Is this the -20C level
if te2 < -20. and not utils.QC(pcl.wm20c):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0.: pcl.wm20c = totp - lyrf
else: pcl.wm20c = totp
if not utils.QC(pm20c) or pm20c > pcl.lclpres:
pcl.wm20c = 0
elif utils.QC(pe2):
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (hgtm20c - h3)
if lyrf > 0: pcl.wm20c += lyrf
# Is this the -30C level
if te2 < -30. and not utils.QC(pcl.wm30c):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0.: pcl.wm30c = totp - lyrf
else: pcl.wm30c = totp
if not utils.QC(pm30c) or pm30c > pcl.lclpres:
pcl.wm30c = 0
elif utils.QC(pe2):
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (hgtm30c - h3)
if lyrf > 0: pcl.wm30c += lyrf
# Is this the 3km level
if pcl.lclhght < 3000.:
if interp.to_agl(prof, h1) <=3000. and interp.to_agl(prof, h2) >= 3000. and not utils.QC(pcl.b3km):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0: pcl.b3km = totp - lyrf
else: pcl.b3km = totp
h4 = interp.to_msl(prof, 3000.)
pe4 = interp.pres(prof, h4)
if utils.QC(pe2):
te2 = interp.vtmp(prof, pe4)
tp2 = thermo.wetlift(pe3, tp3, pe4)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe4, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h4 - h3)
if lyrf > 0: pcl.b3km += lyrf
else: pcl.b3km = 0.
# Is this the 6km level
if pcl.lclhght < 6000.:
if interp.to_agl(prof, h1) <=6000. and interp.to_agl(prof, h2) >= 6000. and not utils.QC(pcl.b6km):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
lyrf = lyre
if lyrf > 0: pcl.b6km = totp - lyrf
else: pcl.b6km = totp
h4 = interp.to_msl(prof, 6000.)
pe4 = interp.pres(prof, h4)
if utils.QC(pe2):
te2 = interp.vtmp(prof, pe4)
tp2 = thermo.wetlift(pe3, tp3, pe4)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe4, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h4 - h3)
if lyrf > 0: pcl.b6km += lyrf
else: pcl.b6km = 0.
h1 = h2
# LFC Possibility
if lyre >= 0. and lyrlast <= 0.:
tp3 = tp1
#te3 = te1
pe2 = pe1
pe3 = pelast
if interp.vtmp(prof, pe3) < thermo.virtemp(pe3, thermo.wetlift(pe2, tp3, pe3), thermo.wetlift(pe2, tp3, pe3)):
# Found an LFC, store height/pres and reset EL/MPL
pcl.lfcpres = pe3
pcl.lfchght = interp.to_agl(prof, interp.hght(prof, pe3))
pcl.elpres = ma.masked
pcl.elhght = ma.masked
pcl.mplpres = ma.masked
else:
while interp.vtmp(prof, pe3) > thermo.virtemp(pe3, thermo.wetlift(pe2, tp3, pe3), thermo.wetlift(pe2, tp3, pe3)) and pe3 > 0:
pe3 -= 5
if pe3 > 0:
# Found a LFC, store height/pres and reset EL/MPL
pcl.lfcpres = pe3
pcl.lfchght = interp.to_agl(prof, interp.hght(prof, pe3))
cinh_old = totn
tote = 0.
li_max = -9999.
if cap_strength < 0.: cap_strength = 0.
pcl.cap = cap_strength
pcl.cappres = cap_strengthpres
pcl.elpres = ma.masked
pcl.elhght = ma.masked
pcl.mplpres = ma.masked
# Hack to force LFC to be at least at the LCL
if pcl.lfcpres >= pcl.lclpres:
pcl.lfcpres = pcl.lclpres
pcl.lfchght = pcl.lclhght
# EL Possibility
if lyre <= 0. and lyrlast >= 0.:
tp3 = tp1
#te3 = te1
pe2 = pe1
pe3 = pelast
while interp.vtmp(prof, pe3) < thermo.virtemp(pe3, thermo.wetlift(pe2, tp3, pe3), thermo.wetlift(pe2, tp3, pe3)):
pe3 -= 5
pcl.elpres = pe3
pcl.elhght = interp.to_agl(prof, interp.hght(prof, pcl.elpres))
pcl.mplpres = ma.masked
pcl.limax = -li_max
pcl.limaxpres = li_maxpres
# MPL Possibility
if tote < 0. and not utils.QC(pcl.mplpres) and utils.QC(pcl.elpres):
pe3 = pelast
h3 = interp.hght(prof, pe3)
te3 = interp.vtmp(prof, pe3)
tp3 = thermo.wetlift(pe1, tp1, pe3)
totx = tote - lyre
pe2 = pelast
while totx > 0:
pe2 -= 1
te2 = interp.vtmp(prof, pe2)
tp2 = thermo.wetlift(pe3, tp3, pe2)
h2 = interp.hght(prof, pe2)
tdef3 = (thermo.virtemp(pe3, tp3, tp3) - te3) / \
thermo.ctok(te3)
tdef2 = (thermo.virtemp(pe2, tp2, tp2) - te2) / \
thermo.ctok(te2)
lyrf = G * (tdef3 + tdef2) / 2. * (h2 - h3)
totx += lyrf
tp3 = tp2
te3 = te2
pe3 = pe2
pcl.mplpres = pe2
pcl.mplhght = interp.to_agl(prof, interp.hght(prof, pe2))
# 500 hPa Lifted Index
if prof.pres[i] <= 500. and not utils.QC(pcl.li5):
a = interp.vtmp(prof, 500.)
b = thermo.wetlift(pe1, tp1, 500.)
pcl.li5 = a - thermo.virtemp(500, b, b)
# 300 hPa Lifted Index
if prof.pres[i] <= 300. and not utils.QC(pcl.li3):
a = interp.vtmp(prof, 300.)
b = thermo.wetlift(pe1, tp1, 300.)
pcl.li3 = a - thermo.virtemp(300, b, b)
# pcl.bminus = cinh_old
if not utils.QC(pcl.bplus): pcl.bplus = totp
# Calculate BRN if available
bulk_rich(prof, pcl)
# Save params
if np.floor(pcl.bplus) == 0: pcl.bminus = 0.
pcl.ptrace = ma.concatenate((ptrace, ptraces))
pcl.ttrace = ma.concatenate((ttrace, ttraces))
# Find minimum buoyancy from Trier et al. 2014, Part 1
idx = np.ma.where(pcl.ptrace >= 500.)[0]
if len(idx) != 0:
b = pcl.ttrace[idx] - interp.vtmp(prof, pcl.ptrace[idx])
idx2 = np.ma.argmin(b)
pcl.bmin = b[idx2]
pcl.bminpres = pcl.ptrace[idx][idx2]
return pcl
def bulk_rich(prof, pcl):
'''
Calculates the Bulk Richardson Number for a given parcel.
Parameters
----------
prof : profile object
Profile object
pcl : parcel object
Parcel object
Returns
-------
Bulk Richardson Number : number
'''
# Make sure parcel is initialized
if not utils.QC(pcl.lplvals):
pbot = ma.masked
elif pcl.lplvals.flag > 0 and pcl.lplvals.flag < 4:
ptop = interp.pres(prof, interp.to_msl(prof, 6000.))
pbot = prof.pres[prof.sfc]
else:
h0 = interp.hght(prof, pcl.pres)
try:
pbot = interp.pres(prof, h0-500.)
except:
pbot = ma.masked
if utils.QC(pbot): pbot = prof.pres[prof.sfc]
h1 = interp.hght(prof, pbot)
ptop = interp.pres(prof, h1+6000.)
if not utils.QC(pbot) or not utils.QC(ptop):
pcl.brnshear = ma.masked
pcl.brn = ma.masked
pcl.brnu = ma.masked
pcl.brnv = ma.masked
return pcl
# Calculate the lowest 500m mean wind
p = interp.pres(prof, interp.hght(prof, pbot)+500.)
#print(p, pbot)
mnlu, mnlv = winds.mean_wind(prof, pbot, p)
# Calculate the 6000m mean wind
mnuu, mnuv = winds.mean_wind(prof, pbot, ptop)
# Make sure CAPE and Shear are available
if not utils.QC(pcl.bplus) or not utils.QC(mnlu) or not utils.QC(mnuu):
pcl.brnshear = ma.masked
pcl.brnu = ma.masked
pcl.brnv = ma.masked
pcl.brn = ma.masked
return pcl
# Calculate shear between levels
dx = mnuu - mnlu
dy = mnuv - mnlv
pcl.brnu = dx
pcl.brnv = dy
pcl.brnshear = utils.KTS2MS(utils.mag(dx, dy))
pcl.brnshear = pcl.brnshear**2 / 2.
pcl.brn = pcl.bplus / pcl.brnshear
return pcl
def effective_inflow_layer(prof, ecape=100, ecinh=-250, **kwargs):
'''
Calculates the top and bottom of the effective inflow layer based on
research by [3]_.
Parameters
----------
prof : profile object
Profile object
ecape : number (optional; default=100)
Minimum amount of CAPE in the layer to be considered part of the
effective inflow layer.
echine : number (optional; default=250)
Maximum amount of CINH in the layer to be considered part of the
effective inflow layer
mupcl : parcel object
Most Unstable Layer parcel
Returns
-------
pbot : number
Pressure at the bottom of the layer (hPa)
ptop : number
Pressure at the top of the layer (hPa)
'''
mupcl = kwargs.get('mupcl', None)
if not mupcl:
try:
mupcl = prof.mupcl
except:
mulplvals = DefineParcel(prof, flag=3, pres=300)
mupcl = cape(prof, lplvals=mulplvals)
mucape = mupcl.bplus
mucinh = mupcl.bminus
pbot = ma.masked
ptop = ma.masked
if mucape != 0:
if mucape >= ecape and mucinh > ecinh:
# Begin at surface and search upward for effective surface
for i in range(prof.sfc, prof.top):
pcl = cape(prof, pres=prof.pres[i], tmpc=prof.tmpc[i], dwpc=prof.dwpc[i])
if pcl.bplus >= ecape and pcl.bminus > ecinh:
pbot = prof.pres[i]
break
if not utils.QC(pbot):
return ma.masked, ma.masked
bptr = i
# Keep searching upward for the effective top
for i in range(bptr+1, prof.top):
if not prof.dwpc[i] or not prof.tmpc[i]:
continue
pcl = cape(prof, pres=prof.pres[i], tmpc=prof.tmpc[i], dwpc=prof.dwpc[i])
if pcl.bplus < ecape or pcl.bminus <= ecinh: #Is this a potential "top"?
j = 1
while not utils.QC(prof.dwpc[i-j]) and not utils.QC(prof.tmpc[i-j]):
j += 1
ptop = prof.pres[i-j]
if ptop > pbot: ptop = pbot
break
return pbot, ptop
def _binary_cape(prof, ibot, itop, ecape=100, ecinh=-250):
if ibot == itop:
return prof.pres[ibot]
elif ibot == itop - 1:
pcl = cape(prof, pres=prof.pres[ibot], tmpc=prof.tmpc[ibot], dwpc=prof.dwpc[ibot])
if pcl.bplus < ecape or pcl.bminus <= ecinh:
return prof.pres[ibot]
else:
return prof.pres[itop]
else:
i = ibot + (itop - ibot) // 2
pcl = cape(prof, pres=prof.pres[i], tmpc=prof.tmpc[i], dwpc=prof.dwpc[i])
if pcl.bplus < ecape or pcl.bminus <= ecinh:
return _binary_cape(prof, ibot, i, ecape=ecape, ecinh=ecinh)
else:
return _binary_cape(prof, i, itop, ecape=ecape, ecinh=ecinh)
def effective_inflow_layer_binary(prof, ecape=100, ecinh=-250, **kwargs):
'''
Calculates the top and bottom of the effective inflow layer based on
research by [3]_. Uses a binary search.
Parameters
----------
prof : profile object
Profile object
ecape : number (optional; default=100)
Minimum amount of CAPE in the layer to be considered part of the
effective inflow layer.
echine : number (optional; default=250)
Maximum amount of CINH in the layer to be considered part of the
effective inflow layer
mupcl : parcel object
Most Unstable Layer parcel
Returns
-------
pbot : number
Pressure at the bottom of the layer (hPa)
ptop : number
Pressure at the top of the layer (hPa)
'''
mupcl = kwargs.get('mupcl', None)
if not mupcl:
try:
mupcl = prof.mupcl
except:
mulplvals = DefineParcel(prof, flag=3, pres=300)
mupcl = cape(prof, lplvals=mulplvals)
mucape = mupcl.bplus
mucinh = mupcl.bminus
pbot = ma.masked
ptop = ma.masked
if mucape >= ecape and mucinh > ecinh:
istart = np.argmin(np.abs(mupcl.lplvals.pres - prof.pres))
itop = np.argmin(np.abs(300 - prof.pres))
pbot = _binary_cape(prof, istart, prof.sfc, ecape=ecape, ecinh=ecinh)
ptop = _binary_cape(prof, istart, itop, ecape=ecape, ecinh=ecinh)
return pbot, ptop
def bunkers_storm_motion(prof, **kwargs):
'''
Compute the Bunkers Storm Motion for a right moving supercell using a
parcel based approach. This code is consistent with the findings in
Bunkers et. al 2014, using the Effective Inflow Base as the base, and
65% of the most unstable parcel equilibrium level height using the
pressure weighted mean wind.
Parameters
----------
prof : profile object
Profile Object
pbot : float (optional)
Base of effective-inflow layer (hPa)
mupcl : parcel object (optional)
Most Unstable Layer parcel
Returns
-------
rstu : number
Right Storm Motion U-component (kts)
rstv : number
Right Storm Motion V-component (kts)
lstu : number
Left Storm Motion U-component (kts)
lstv : number
Left Storm Motion V-component (kts)
'''
d = utils.MS2KTS(7.5) # Deviation value emperically derived at 7.5 m/s
mupcl = kwargs.get('mupcl', None)
pbot = kwargs.get('pbot', None)
if not mupcl:
try:
mupcl = prof.mupcl
except:
mulplvals = DefineParcel(prof, flag=3, pres=400)
mupcl = parcelx(prof, lplvals=mulplvals)
mucape = mupcl.bplus
mucinh = mupcl.bminus
muel = mupcl.elhght
if not pbot:
pbot, ptop = effective_inflow_layer(prof, 100, -250, mupcl=mupcl)
base = interp.to_agl(prof, interp.hght(prof, pbot))
if mucape > 100. and utils.QC(muel):
depth = muel - base
htop = base + ( depth * (65./100.) )
ptop = interp.pres(prof, interp.to_msl(prof, htop))
mnu, mnv = winds.mean_wind(prof, pbot, ptop)
sru, srv = winds.wind_shear(prof, pbot, ptop)
srmag = utils.mag(sru, srv)
uchg = d / srmag * srv
vchg = d / srmag * sru
rstu = mnu + uchg
rstv = mnv - vchg
lstu = mnu - uchg
lstv = mnv + vchg
else:
rstu, rstv, lstu, lstv = winds.non_parcel_bunkers_motion(prof)
return rstu, rstv, lstu, lstv
def convective_temp(prof, **kwargs):
'''
Computes the convective temperature, assuming no change in the moisture
profile. Parcels are iteratively lifted until only mincinh is left as a
cap. The first guess is the observed surface temperature.
Parameters
----------
prof : profile object
Profile Object
mincinh : parcel object (optional; default -1)
Amount of CINH left at CI
pres : number (optional)
Pressure of parcel to lift (hPa)
tmpc : number (optional)
Temperature of parcel to lift (C)
dwpc : number (optional)
Dew Point of parcel to lift (C)
Returns
-------
Convective Temperature (C) : number
'''
mincinh = kwargs.get('mincinh', 0.)
mmr = mean_mixratio(prof)
pres = kwargs.get('pres', prof.pres[prof.sfc])
tmpc = kwargs.get('tmpc', prof.tmpc[prof.sfc])
dwpc = kwargs.get('dwpc', thermo.temp_at_mixrat(mmr, pres))
# Do a quick search to fine whether to continue. If you need to heat
# up more than 25C, don't compute.
pcl = cape(prof, flag=5, pres=pres, tmpc=tmpc+25., dwpc=dwpc, trunc=True)
if pcl.bplus == 0. or not utils.QC(pcl.bminus) or pcl.bminus < mincinh: return ma.masked
excess = dwpc - tmpc
if excess > 0: tmpc = tmpc + excess + 4.
pcl = cape(prof, flag=5, pres=pres, tmpc=tmpc, dwpc=dwpc, trunc=True)
if pcl.bplus == 0. or not utils.QC(pcl.bminus): pcl.bminus = ma.masked
while not utils.QC(pcl.bminus) or pcl.bminus < mincinh:
if pcl.bminus < -100: tmpc += 2.
else: tmpc += 0.5
pcl = cape(prof, flag=5, pres=pres, tmpc=tmpc, dwpc=dwpc, trunc=True)
if pcl.bplus == 0.: pcl.bminus = ma.masked
return tmpc
def tei(prof):
'''
Theta-E Index (TEI)
TEI is the difference between the surface theta-e and the minimum theta-e value
in the lowest 400 mb AGL
Note: This is the definition of TEI on the SPC help page,
but these calculations do not match up with the TEI values on the SPC Online Soundings.
The TEI values online are more consistent with the max Theta-E
minus the minimum Theta-E found in the lowest 400 mb AGL.
Parameters
----------
prof : profile object
Profile object
Returns
-------
tei : number
Theta-E Index
'''
sfc_theta = prof.thetae[prof.sfc]
sfc_pres = prof.pres[prof.sfc]
top_pres = sfc_pres - 400.
layer_idxs = ma.where(prof.pres >= top_pres)[0]
min_thetae = ma.min(prof.thetae[layer_idxs])
max_thetae = ma.max(prof.thetae[layer_idxs])
#tei = sfc_theta - min_thetae
tei = max_thetae - min_thetae
return tei
def esp(prof, **kwargs):
'''
Enhanced Stretching Potential (ESP)
This composite parameter identifies areas where low-level buoyancy
and steep low-level lapse rates are co-located, which may
favor low-level vortex stretching and tornado potential.
REQUIRES: 0-3 km MLCAPE (from MLPCL)
Parameters
----------
prof : profile object
Profile object
mlpcl : parcel object, optional
Mixed-Layer Parcel object
Returns
-------
ESP Index : number
'''
mlpcl = kwargs.get('mlpcl', None)
if not mlpcl:
try:
mlpcl = prof.mlpcl
except:
mlpcl = parcelx(prof, flag=4)
mlcape = mlpcl.b3km
lr03 = prof.lapserate_3km # C/km
if lr03 < 7. or mlpcl.bplus < 250.:
return 0
esp = (mlcape / 50.) * ((lr03 - 7.0) / (1.0))
return esp
def sherb(prof, **kwargs):
'''
Severe Hazards In Environments with Reduced Buoyancy (SHERB) Parameter (*)
A composite parameter designed to assist forecasters in the High-Shear
Low CAPE (HSLC) environment. This allows better discrimination
between significant severe and non-severe convection in HSLC enviroments.
It can detect significant tornadoes and significant winds. Values above
1 are more likely associated with significant severe.
See Sherburn et. al. 2014 WAF for more information
REQUIRES (if effective==True): The effective inflow layer be defined
.. warning::
This function has not been evaluated or tested against the version used at SPC.
Parameters
----------
prof : profile object
Profile object
effective : bool, optional
Use the effective layer computation or not
the effective bulk wind difference (prof.ebwd) must exist first
if not specified it will (Default is False)
ebottom : number, optional
bottom of the effective inflow layer (mb)
etop : number, optional
top of the effective inflow layer (mb)
mupcl : parcel object, optional
Most-Unstable Parcel
Returns
-------
SHERB : number
'''
effective = kwargs.get('effective', False)
ebottom = kwargs.get('ebottom', None)
etop = kwargs.get('etop', None)
lr03 = lapse_rate(prof, 0, 3000, pres=False)
lr75 = lapse_rate(prof, 700, 500, pres=True)
if effective == False:
p3km = interp.pres(prof, interp.to_msl(prof, 3000))
sfc_pres = prof.pres[prof.get_sfc()]
shear = utils.KTS2MS(utils.mag(*winds.wind_shear(prof, pbot=sfc_pres, ptop=p3km)))
sherb = ( shear / 26. ) * ( lr03 / 5.2 ) * ( lr75 / 5.6 )
else:
if hasattr(prof, 'ebwd'):
# If the Profile object has the attribute "ebwd"
shear = utils.KTS2MS(utils.mag( prof.ebwd[0], prof.ebwd[1] ))
elif ((not ebottom) or (not etop)) or \
((not hasattr(prof, 'ebottom') or not hasattr(prof, 'etop'))):
# if the effective inflow layer hasn't been specified via the function arguments
# or doesn't exist in the Profile object we need to calculate it, but we need mupcl
if ebottom is None or etop is None:
#only calculate ebottom and etop if they're not supplied by the kwargs
if not hasattr(prof, 'mupcl') or not kwargs.get('mupcl', None):
# If the mupcl attribute doesn't exist in the Profile
# or the mupcl hasn't been passed as an argument
# compute the mupcl
mulplvals = DefineParcel(prof, flag=3, pres=300)
mupcl = cape(prof, lplvals=mulplvals)
else:
mupcl = prof.mupcl
# Calculate the effective inflow layer
ebottom, etop = effective_inflow_layer( prof, mupcl=mupcl )
if ebottom is np.masked or etop is np.masked:
# If the inflow layer doesn't exist, return missing
return prof.missing
else:
# Calculate the Effective Bulk Wind Difference
ebotm = interp.to_agl(prof, interp.hght(prof, ebottom))
depth = ( mupcl.elhght - ebotm ) / 2
elh = interp.pres(prof, interp.to_msl(prof, ebotm + depth))
ebwd = winds.wind_shear(prof, pbot=ebottom, ptop=elh)
else:
# If there's no way to compute the effective SHERB
# because there's no information about how to get the
# inflow layer, return missing.
return prof.missing
shear = utils.KTS2MS(utils.mag( prof.ebwd[0], prof.ebwd[1] ))
sherb = ( shear / 27. ) * ( lr03 / 5.2 ) * ( lr75 / 5.6 )
return sherb
def mmp(prof, **kwargs):
"""
MCS Maintenance Probability (MMP)
The probability that a mature MCS will maintain peak intensity
for the next hour.
This equation was developed using proximity soundings and a regression equation
Uses MUCAPE, 3-8 km lapse rate, maximum bulk shear, 3-12 km mean wind speed. Derived
in [4]_.
.. [4] <NAME>., <NAME>, and <NAME>, 2006: Effects of upper-level shear on the structure and maintenance of strong quasi-linear mesoscale convective systems. J. Atmos. Sci., 63, 1231–1251, doi:https://doi.org/10.1175/JAS3681.1.
Note:
Per <NAME> (personal comm.), the maximum deep shear value is computed by
computing the shear vector between all the wind vectors
in the lowest 1 km and all the wind vectors in the 6-10 km layer.
The maximum speed shear from this is the max_bulk_shear value (m/s).
Parameters
----------
prof : profile object
Profile object
mupcl : parcel object, optional
Most-Unstable Parcel object
Returns
-------
MMP index (%): number
"""
mupcl = kwargs.get('mupcl', None)
if not mupcl:
try:
mupcl = prof.mupcl
except:
mulplvals = DefineParcel(prof, flag=3, pres=300)
mupcl = cape(prof, lplvals=mulplvals)
mucape = mupcl.bplus
if mucape < 100.:
return 0.
agl_hght = interp.to_agl(prof, prof.hght)
lowest_idx = np.where(agl_hght <= 1000)[0]
highest_idx = np.where((agl_hght >= 6000) & (agl_hght < 10000))[0]
if len(lowest_idx) == 0 or len(highest_idx) == 0:
return ma.masked
possible_shears = np.empty((len(lowest_idx),len(highest_idx)))
pbots = interp.pres(prof, prof.hght[lowest_idx])
ptops = interp.pres(prof, prof.hght[highest_idx])
if len(lowest_idx) == 0 or len(highest_idx) == 0:
return np.ma.masked
for b in range(len(pbots)):
for t in range(len(ptops)):
if b < t: continue
u_shear, v_shear = winds.wind_shear(prof, pbot=pbots[b], ptop=ptops[t])
possible_shears[b,t] = utils.mag(u_shear, v_shear)
max_bulk_shear = utils.KTS2MS(np.nanmax(possible_shears.ravel()))
lr38 = lapse_rate(prof, 3000., 8000., pres=False)
plower = interp.pres(prof, interp.to_msl(prof, 3000.))
pupper = interp.pres(prof, interp.to_msl(prof, 12000.))
mean_wind_3t12 = winds.mean_wind( prof, pbot=plower, ptop=pupper)
mean_wind_3t12 = utils.KTS2MS(utils.mag(mean_wind_3t12[0], mean_wind_3t12[1]))
a_0 = 13.0 # unitless
a_1 = -4.59*10**-2 # m**-1 * s
a_2 = -1.16 # K**-1 * km
a_3 = -6.17*10**-4 # J**-1 * kg
a_4 = -0.17 # m**-1 * s
mmp = 1. / (1. + np.exp(a_0 + (a_1 * max_bulk_shear) + (a_2 * lr38) + (a_3 * mucape) + (a_4 * mean_wind_3t12)))
return mmp
def wndg(prof, **kwargs):
'''
Wind Damage Parameter (WNDG)
A non-dimensional composite parameter that identifies areas
where large CAPE, steep low-level lapse rates,
enhanced flow in the low-mid levels, and minimal convective
inhibition are co-located.
WNDG values > 1 favor an enhanced risk for scattered damaging
outflow gusts with multicell thunderstorm clusters, primarily
during the afternoon in the summer.
Parameters
----------
prof : profile object
Profile object
mlpcl : parcel object, optional
Mixed-Layer Parcel object (optional)
Returns
-------
WNDG Index : number
'''
mlpcl = kwargs.get('mlpcl', None)
if not mlpcl:
try:
mlpcl = prof.mlpcl
except:
mllplvals = DefineParcel(prof, flag=4)
mlpcl = cape(prof, lplvals=mllplvals)
mlcape = mlpcl.bplus
lr03 = lapse_rate( prof, 0, 3000., pres=False ) # C/km
bot = interp.pres( prof, interp.to_msl( prof, 1000. ) )
top = interp.pres( prof, interp.to_msl( prof, 3500. ) )
mean_wind = winds.mean_wind(prof, pbot=bot, ptop=top) # needs to be in m/s
mean_wind = utils.KTS2MS(utils.mag(mean_wind[0], mean_wind[1]))
mlcin = mlpcl.bminus # J/kg
if lr03 < 7:
lr03 = 0.
if mlcin < -50:
mlcin = -50.
wndg = (mlcape / 2000.) * (lr03 / 9.) * (mean_wind / 15.) * ((50. + mlcin)/40.)
return wndg
def sig_severe(prof, **kwargs):
'''
Significant Severe (SigSevere)
Craven and Brooks, 2004
Parameters
----------
prof : profile object
Profile object
mlpcl : parcel object, optional
Mixed-Layer Parcel object
Returns
-------
significant severe parameter (m3/s3) : number
'''
mlpcl = kwargs.get('mlpcl', None)
sfc6shr = kwargs.get('sfc6shr', None)
if not mlpcl:
try:
mlpcl = prof.mlpcl
except:
mllplvals = DefineParcel(prof, flag=4)
mlpcl = cape(prof, lplvals=mllplvals)
mlcape = mlpcl.bplus
if not sfc6shr:
try:
sfc_6km_shear = prof.sfc_6km_shear
except:
sfc = prof.pres[prof.sfc]
p6km = interp.pres(prof, interp.to_msl(prof, 6000.))
sfc_6km_shear = winds.wind_shear(prof, pbot=sfc, ptop=p6km)
sfc_6km_shear = utils.mag(sfc_6km_shear[0], sfc_6km_shear[1])
shr06 = utils.KTS2MS(sfc_6km_shear)
sigsevere = mlcape * shr06
return sigsevere
def dcape(prof):
'''
Downdraft CAPE (DCAPE)
Adapted from <NAME>'s (SPC) DCAPE code in NSHARP donated by <NAME> (SPC)
Calculates the downdraft CAPE value using the downdraft parcel source found in the lowest
400 mb of the sounding. This downdraft parcel is found by identifying the minimum 100 mb layer
averaged Theta-E.
Afterwards, this parcel is lowered to the surface moist adiabatically (w/o virtual temperature
correction) and the energy accumulated is called the DCAPE.
Future adaptations of this function may utilize the Parcel/DefineParcel object.
Parameters
----------
prof : profile object
Profile object
Returns
-------
dcape : number
downdraft CAPE (J/kg)
ttrace : array
downdraft parcel trace temperature (C)
ptrace : array
downdraft parcel trace pressure (mb)
'''
sfc_pres = prof.pres[prof.sfc]
prof_thetae = prof.thetae
prof_wetbulb = prof.wetbulb
mask1 = prof_thetae.mask
mask2 = prof.pres.mask
mask = np.maximum( mask1, mask2 )
prof_thetae = prof_thetae[~mask]
prof_wetbulb = prof_wetbulb[~mask]
pres = prof.pres[~mask]
hght = prof.hght[~mask]
dwpc = prof.dwpc[~mask]
tmpc = prof.tmpc[~mask]
idx = np.where(pres >= sfc_pres - 400.)[0]
# Find the minimum average theta-e in a 100 mb layer
mine = 1000.0
minp = -999.0
for i in idx:
thta_e_mean = mean_thetae(prof, pbot=pres[i], ptop=pres[i]-100.)
if utils.QC(thta_e_mean) and thta_e_mean < mine:
minp = pres[i] - 50.
mine = thta_e_mean
upper = minp
uptr = np.where(pres >= upper)[0]
uptr = uptr[-1]
# Define parcel starting point
tp1 = thermo.wetbulb(upper, interp.temp(prof, upper), interp.dwpt(prof, upper))
pe1 = upper
te1 = interp.temp(prof, pe1)
h1 = interp.hght(prof, pe1)
tote = 0
lyre = 0
# To keep track of the parcel trace from the downdraft
ttrace = [tp1]
ptrace = [upper]
# Lower the parcel to the surface moist adiabatically and compute
# total energy (DCAPE)
iter_ranges = range(uptr, -1, -1)
ttraces = ma.zeros(len(iter_ranges))
ptraces = ma.zeros(len(iter_ranges))
ttraces[:] = ptraces[:] = ma.masked
for i in iter_ranges:
pe2 = pres[i]
te2 = tmpc[i]
h2 = hght[i]
tp2 = thermo.wetlift(pe1, tp1, pe2)
if utils.QC(te1) and utils.QC(te2):
tdef1 = (tp1 - te1) / (thermo.ctok(te1))
tdef2 = (tp2 - te2) / (thermo.ctok(te2))
lyrlast = lyre
lyre = 9.8 * (tdef1 + tdef2) / 2.0 * (h2 - h1)
tote += lyre
ttraces[i] = tp2
ptraces[i] = pe2
pe1 = pe2
te1 = te2
h1 = h2
tp1 = tp2
drtemp = tp2 # Downrush temp in Celsius
return tote, ma.concatenate((ttrace, ttraces[::-1])), ma.concatenate((ptrace, ptraces[::-1]))
def precip_eff(prof, **kwargs):
'''
Precipitation Efficiency (*)
This calculation comes from Noel and Dobur 2002, published
in NWA Digest Vol 26, No 34.
The calculation multiplies the PW from the whole atmosphere
by the 1000 - 700 mb mean relative humidity (in decimal form)
Values on the SPC Mesoanalysis range from 0 to 2.6.
Larger values means that the precipitation is more efficient.
.. warning::
This function has not been directly compared with a version at SPC.
Parameters
----------
prof : profile object
Profile object
pwat : number, optional
precomputed precipitable water vapor (inch)
pbot : number, optional
the bottom pressure of the RH layer (mb)
ptop : number, optional
the top pressure of the RH layer (mb)
Returns
-------
precip_efficency (inches) : number
'''
pw = kwargs.get('pwat', None)
pbot = kwargs.get('pbot', 1000)
ptop = kwargs.get('ptop', 700)
if pw is None or not hasattr(prof, 'pwat'):
pw = precip_water(prof)
else:
pw = prof.pwat
mean_rh = mean_relh(prof, pbot=pbot, ptop=ptop) / 100.
return pw*mean_rh
def pbl_top(prof):
'''
Planetary Boundary Layer Depth
Adapted from NSHARP code donated by <NAME> (SPC)
Calculates the planetary boundary layer depth by calculating the
virtual potential temperature of the surface parcel + .5 K, and then searching
for the location above the surface where the virtual potential temperature of the profile
is greater than the surface virtual potential temperature.
While this routine suggests a parcel lift, this Python adaptation does not use loop
like parcelx().
Parameters
----------
prof : profile object
Profile object
Returns
-------
ppbl_top (mb) : number
'''
thetav = thermo.theta(prof.pres, thermo.virtemp(prof.pres, prof.tmpc, prof.dwpc))
try:
level = np.where(thetav[prof.sfc]+.5 < thetav)[0][0]
except IndexError:
print("Warning: PBL top could not be found.")
level = thetav.shape[0] - 1
return prof.pres[level]
def dcp(prof):
'''
Derecho Composite Parameter (*)
This parameter is based on a data set of 113 derecho events compiled by Evans and Doswell (2001).
The DCP was developed to identify environments considered favorable for cold pool "driven" wind
events through four primary mechanisms:
1) Cold pool production [DCAPE]
2) Ability to sustain strong storms along the leading edge of a gust front [MUCAPE]
3) Organization potential for any ensuing convection [0-6 km shear]
4) Sufficient flow within the ambient environment to favor development along downstream portion of the gust front [0-6 km mean wind].
This index is fomulated as follows:
DCP = (DCAPE/980)*(MUCAPE/2000)*(0-6 km shear/20 kt)*(0-6 km mean wind/16 kt)
Reference:
<NAME>., and <NAME>, 2001: Examination of derecho environments using proximity soundings. Wea. Forecasting, 16, 329-342.
Parameters
----------
prof : profile object
Profile object
Returns
-------
dcp : number
Derecho Composite Parameter (unitless)
'''
sfc = prof.pres[prof.sfc]
p6km = interp.pres(prof, interp.to_msl(prof, 6000.))
dcape_val = getattr(prof, 'dcape', dcape( prof )[0])
mupcl = getattr(prof, 'mupcl', None)
if mupcl is None:
mupcl = parcelx(prof, flag=1)
sfc_6km_shear = getattr(prof, 'sfc_6km_shear', winds.wind_shear(prof, pbot=sfc, ptop=p6km))
mean_6km = getattr(prof, 'mean_6km', utils.comp2vec(*winds.mean_wind(prof, pbot=sfc, ptop=p6km)))
mag_shear = utils.mag(sfc_6km_shear[0], sfc_6km_shear[1])
mag_mean_wind = mean_6km[1]
dcp = (dcape_val/980.) * (mupcl.bplus/2000.) * (mag_shear / 20. ) * (mag_mean_wind / 16.)
return dcp
def mburst(prof):
'''
Microburst Composite Index
Formulated by <NAME> NWS JAN 12/7/2014
Code donated by <NAME> (SPC)
Below is taken from the SPC Mesoanalysis:
The Microburst Composite is a weighted sum of the following individual parameters: SBCAPE, SBLI,
lapse rates, vertical totals (850-500 mb temperature difference), DCAPE, and precipitable water.
All of the terms are summed to arrive at the final microburst composite value.
The values can be interpreted in the following manner: 3-4 infers a "slight chance" of a microburst;
5-8 infers a "chance" of a microburst; >= 9 infers that microbursts are "likely".
These values can also be viewed as conditional upon the existence of a storm.
This code was updated on 9/11/2018 - TT was being used in the function instead of VT.
The original SPC code was checked to confirm this was the problem.
This error was not identified during the testing phase for some reason.
Parameters
----------
prof : profile object
Profile object
Returns
-------
mburst : number
Microburst Composite (unitless)
'''
sbpcl = getattr(prof, 'sfcpcl', None)
if sbpcl is None:
sbpcl = parcelx(prof, flag=1)
lr03 = getattr(prof, 'lapserate_3km', lapse_rate( prof, 0., 3000., pres=False ))
vt = getattr(prof, 'vertical_totals', v_totals(prof))
dcape_val = getattr(prof, 'dcape', dcape( prof )[0])
pwat = getattr(prof, 'pwat', precip_water( prof ))
tei_val = thetae_diff(prof)
sfc_thetae = thermo.thetae(sbpcl.lplvals.pres, sbpcl.lplvals.tmpc, sbpcl.lplvals.dwpc)
# SFC Theta-E term
if thermo.ctok(sfc_thetae) >= 355:
te = 1
else:
te = 0
# Surface-based CAPE Term
if not utils.QC(sbpcl.bplus):
sbcape_term = np.nan
else:
if sbpcl.bplus < 2000:
sbcape_term = -5
if sbpcl.bplus >= 2000:
sbcape_term = 0
if sbpcl.bplus >= 3300:
sbcape_term = 1
if sbpcl.bplus >= 3700:
sbcape_term = 2
if sbpcl.bplus >= 4300:
sbcape_term = 4
# Surface based LI term
if not utils.QC(sbpcl.li5):
sbli_term = np.nan
else:
if sbpcl.li5 > -7.5:
sbli_term = 0
if sbpcl.li5 <= -7.5:
sbli_term = 1
if sbpcl.li5 <= -9.0:
sbli_term = 2
if sbpcl.li5 <= -10.0:
sbli_term = 3
# PWAT Term
if not utils.QC(pwat):
pwat_term = np.nan
else:
if pwat < 1.5:
pwat_term = -3
else:
pwat_term = 0
# DCAPE Term
if not utils.QC(dcape_val):
dcape_term = np.nan
else:
if pwat > 1.70:
if dcape_val > 900:
dcape_term = 1
else:
dcape_term = 0
else:
dcape_term = 0
# Lapse Rate Term
if not utils.QC(lr03):
lr03_term = np.nan
else:
if lr03 <= 8.4:
lr03_term = 0
else:
lr03_term = 1
# Vertical Totals term
if not utils.QC(vt):
vt_term = np.nan
else:
if vt < 27:
vt_term = 0
elif vt >= 27 and vt < 28:
vt_term = 1
elif vt >= 28 and vt < 29:
vt_term = 2
else:
vt_term = 3
# TEI term?
if not utils.QC(tei_val):
ted = np.nan
else:
if tei_val >= 35:
ted = 1
else:
ted = 0
mburst = te + sbcape_term + sbli_term + pwat_term + dcape_term + lr03_term + vt_term + ted
if mburst < 0:
mburst = 0
if np.isnan(mburst):
mburst = np.ma.masked
return mburst
def ehi(prof, pcl, hbot, htop, stu=0, stv=0):
'''
Energy-Helicity Index
Computes the energy helicity index (EHI) using a parcel
object and a profile object.
The equation is EHI = (CAPE * HELICITY) / 160000.
Parameters
----------
prof : profile object
Profile object
pcl : parcel object
Parcel object
hbot : number
Height of the bottom of the helicity layer [m]
htop : number
Height of the top of the helicity layer [m]
stu : number
Storm-relative wind U component [kts]
(optional; default=0)
stv : number
Storm-relative wind V component [kts]
(optional; default=0)
Returns
-------
ehi : number
Energy Helicity Index (unitless)
'''
helicity = winds.helicity(prof, hbot, htop, stu=stu, stv=stv)[0]
ehi = (helicity * pcl.bplus) / 160000.
return ehi
def sweat(prof):
'''
SWEAT Index
Computes the SWEAT (Severe Weather Threat Index) using the following numbers:
1. 850 Dewpoint
2. Total Totals Index
3. 850 mb wind speed
4. 500 mb wind speed
5. Direction of wind at 500
6. Direction of wind at 850
Formulation taken from Notes on Analysis and Severe-Storm Forecasting Procedures of the Air Force Global Weather Central, 1972 by <NAME>.
.. warning::
This function has not been tested against the SPC version of SHARP.
Parameters
----------
prof : profile object
Profile object
Returns
-------
sweat : number
SWEAT Index (number)
'''
td850 = interp.dwpt(prof, 850)
vec850 = interp.vec(prof, 850)
vec500 = interp.vec(prof, 500)
tt = getattr(prof, 'totals_totals', t_totals( prof ))
if td850 > 0:
term1 = 12. * td850
else:
term1 = 0
if tt < 49:
term2 = 0
else:
term2 = 20. * (tt - 49)
term3 = 2 * vec850[1]
term4 = vec500[1]
if 130 <= vec850[0] and 250 >= vec850[0] and 210 <= vec500[0] and 310 >= vec500[0] and vec500[0] - vec850[0] > 0 and vec850[1] >= 15 and vec500[1] >= 15:
term5 = 125 * (np.sin( np.radians(vec500[0] - vec850[0])) + 0.2)
else:
term5 = 0
sweat = term1 + term2 + term3 + term4 + term5
return sweat
def thetae_diff(prof):
'''
thetae_diff()
Adapted from code for thetae_diff2() provided by <NAME> (SPC)
Find the maximum and minimum Theta-E values in the lowest 3000 m of
the sounding and returns the difference. Only positive difference values
(where the minimum Theta-E is above the maximum) are returned.
Parameters
----------
prof : profile object
Profile object
Returns
-------
thetae_diff : number
the Theta-E difference between the max and min values (K)
'''
thetae = getattr(prof, 'thetae', prof.get_thetae_profile())
idx = np.where(interp.to_agl(prof, prof.hght) <= 3000)[0]
maxe_idx = np.ma.argmax(thetae[idx])
mine_idx = np.ma.argmin(thetae[idx])
maxe_pres = prof.pres[idx][maxe_idx]
mine_pres = prof.pres[idx][mine_idx]
thetae_diff = thetae[idx][maxe_idx] - thetae[idx][mine_idx]
if maxe_pres < mine_pres:
return 0
else:
return thetae_diff
def bore_lift(prof, hbot=0., htop=3000., pbot=None, ptop=None):
"""
Lift all parcels in the layer. Calculate and return the difference between
the liften parcel level height and the LFC height.
hbot: bottom of layer in meters (AGL)
htop: top of layer in meters(AGL)
OR
pbot: bottom of layer (in hPa)
ptop: top of layer (in hPa)
"""
pres = prof.pres; hght = prof.hght
tmpc = prof.tmpc; dwpc = prof.dwpc
mask = ~prof.pres.mask * ~prof.hght.mask * ~prof.tmpc.mask * ~prof.dwpc.mask
if pbot is not None:
layer_idxs = np.where( (prof.pres[mask] <= pbot ) & ( prof.pres[mask] >= ptop ) )[0]
else:
hbot = interp.to_msl(prof, hbot)
htop = interp.to_msl(prof, htop)
pbot = interp.pres(prof, hbot)
ptop = interp.pres(prof, htop)
layer_idxs = np.where( ( prof.hght[mask] >= hbot ) & ( prof.hght[mask] <= htop ) )[0]
delta_lfc = np.zeros((len(layer_idxs)))
delta_lfc[:] = np.ma.masked
i = 0
for idx in layer_idxs:
lpl = DefineParcel(prof, 5, pres=pres[idx])
pcl = parcelx(prof, pres=pres[idx], tmpc=tmpc[idx], dwpc=dwpc[idx], pbot=pres[idx])
delta_lfc[i] = pcl.lfchght - hght[idx]
i += 1
return np.ma.masked_invalid(delta_lfc)
|
instagram_scraper/endpoints.py | luengwaiban/instagram-python-scraper | 139 | 11092733 | # -*- coding:utf-8 -*-
import urllib.parse
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
BASE_URL = 'https://www.instagram.com'
LOGIN_URL = 'https://www.instagram.com/accounts/login/ajax/'
ACCOUNT_MEDIAS = "http://www.instagram.com/graphql/query/?query_hash=42323d64886122307be10013ad2dcc44&variables=%s"
ACCOUNT_PAGE = 'https://www.instagram.com/%s'
MEDIA_LINK = 'https://www.instagram.com/p/%s'
INSTAGRAM_CDN_URL = 'https://scontent.cdninstagram.com/'
ACCOUNT_JSON_PRIVATE_INFO_BY_ID = 'https://i.instagram.com/api/v1/users/%s/info/'
FOLLOWERS_URL = 'https://www.instagram.com/graphql/query/?query_id=17851374694183129&id={{accountId}}&first={{count}}&after={{after}}'
FOLLOWING_URL = 'https://www.instagram.com/graphql/query/?query_id=17874545323001329&id={{accountId}}&first={{count}}&after={{after}}'
COMMENTS_BEFORE_COMMENT_ID_BY_CODE = 'https://www.instagram.com/graphql/query/?query_hash=33ba35852cb50da46f5b5e889df7d159&variables=%s'
LIKES_BY_SHORTCODE = 'https://www.instagram.com/graphql/query/?query_id=17864450716183058&variables=%s'
MEDIA_JSON_BY_TAG = 'https://www.instagram.com/explore/tags/%s/?__a=1&max_id=%s'
GENERAL_SEARCH = 'https://www.instagram.com/web/search/topsearch/?query=%s'
MEDIA_JSON_BY_LOCATION_ID = 'https://www.instagram.com/explore/locations/%s/?__a=1&max_id=%s'
ACCOUNT_JSON_INFO = 'https://www.instagram.com/%s/?__a=1'
USER_STORIES_LINK = 'https://www.instagram.com/graphql/query/?query_id=17890626976041463&variables=%s'
STORIES_LINK = 'https://www.instagram.com/graphql/query/?query_id=17873473675158481&variables=%s'
LIKE_URL = 'https://www.instagram.com/web/likes/%s/like/'
UNLIKE_URL = 'https://www.instagram.com/web/likes/%s/unlike/'
ADD_COMMENT_URL = 'https://www.instagram.com/web/comments/%s/add/'
DELETE_COMMENT_URL = 'https://www.instagram.com/web/comments/%s/delete/%s/'
def get_account_media_url(variables):
return ACCOUNT_MEDIAS % urllib.parse.quote(variables)
def get_account_page_link(user_name):
return ACCOUNT_PAGE % user_name
def get_media_url(media_url):
return media_url.rstrip('/') + '/?__a=1'
def get_media_page_link(code):
return MEDIA_LINK % urllib.parse.quote(code)
def get_account_json_private_info_link_by_account_id(id):
return ACCOUNT_JSON_PRIVATE_INFO_BY_ID % urllib.parse.quote(str(id))
def get_followers_json_link(account_id, count, after=''):
url = FOLLOWERS_URL.replace('{{accountId}}', urllib.parse.quote(account_id))
url = url.replace('{{count}}', urllib.parse.quote(str(count)))
if after == '':
url = url.replace('&after={{after}}', '')
else:
url = url.replace('{{after}}', urllib.parse.quote(after))
return url
def get_following_json_link(account_id, count, after=''):
url = FOLLOWING_URL.replace('{{accountId}}', urllib.parse.quote(account_id))
url = url.replace('{{count}}', urllib.parse.quote(str(count)))
if after == '':
url = url.replace('&after={{after}}', '')
else:
url = url.replace('{{after}}', urllib.parse.quote(after))
return url
def get_comments_before_comment_id_by_code(variables):
return COMMENTS_BEFORE_COMMENT_ID_BY_CODE % urllib.parse.quote(variables)
def get_medias_json_by_tag_link(tag, max_id=''):
return MEDIA_JSON_BY_TAG % (urllib.parse.quote(tag), urllib.parse.quote(max_id))
def get_general_search_json_link(query):
return GENERAL_SEARCH % urllib.parse.quote(query)
def get_medias_json_by_location_id_link(facebook_location_id, max_id=''):
return MEDIA_JSON_BY_LOCATION_ID % (urllib.parse.quote(str(facebook_location_id)), urllib.parse.quote(max_id))
def get_account_json_link(username):
return ACCOUNT_JSON_INFO % urllib.parse.quote(str(username))
def get_user_stories_link(variables):
return USER_STORIES_LINK % urllib.parse.quote(variables)
def get_stories_link(variables):
return STORIES_LINK % urllib.parse.quote(variables)
def get_like_url(media_id):
return LIKE_URL % urllib.parse.quote(str(media_id))
def get_unlike_url(media_id):
return UNLIKE_URL % urllib.parse.quote(str(media_id))
def get_add_comment_url(media_id):
return ADD_COMMENT_URL % urllib.parse.quote(str(media_id))
def get_delete_comment_url(media_id, comment_id):
return DELETE_COMMENT_URL % (urllib.parse.quote(str(media_id)), urllib.parse.quote(str(comment_id)))
def get_last_likes_by_code(variables):
return LIKES_BY_SHORTCODE % urllib.parse.quote(variables) |
cd4ml/problems/groceries/problem.py | camila-contreras/CD4ML-Scenarios | 113 | 11092771 | from cd4ml.problems.problem_base import ProblemBase
import cd4ml.problems.groceries.splitting as splitting
import cd4ml.problems.groceries.readers.stream_data as data_streamer
import cd4ml.problems.groceries.download_data.download_data as dd
import logging
from cd4ml.utils.utils import create_lookup
class Problem(ProblemBase):
def __init__(self,
problem_name,
data_downloader='default',
ml_pipeline_params_name='default',
feature_set_name='default',
algorithm_name='default',
algorithm_params_name='default'):
super(Problem, self).__init__(problem_name,
data_downloader,
feature_set_name=feature_set_name,
ml_pipeline_params_name=ml_pipeline_params_name,
algorithm_name=algorithm_name,
algorithm_params_name=algorithm_params_name)
self.logger = logging.getLogger(__name__)
get_training_validation_filters = splitting.get_training_validation_filters
self._stream_data = data_streamer.stream_data
self.date_lookup = None
self.item_nbr_lookup = None
self.training_filter, self.validation_filter = get_training_validation_filters(self.ml_pipeline_params)
def prepare_feature_data(self):
self.logger.info('Preparing feature data')
train_data = self.training_stream()
date_lookup = create_lookup(train_data,
['dayofweek', 'days_til_end_of_data', 'dayoff'],
'date')
train_data = self.training_stream()
item_nbr_lookup = create_lookup(train_data, ['class', 'family'], 'item_nbr')
if self.feature_set is not None:
self.feature_set.info['date_lookup'] = date_lookup
self.feature_set.info['item_nbr_lookup'] = item_nbr_lookup
@staticmethod
def get_feature_set_constructor(feature_set_name):
if feature_set_name == "default":
from cd4ml.problems.groceries.features.feature_sets.default import feature_set as fs
return fs.get_feature_set
elif feature_set_name == "original":
from cd4ml.problems.groceries.features.feature_sets.original import feature_set as fs
return fs.get_feature_set
else:
raise ValueError("feature_set_name '{}' is invalid. Please check your groceries configuration"
.format(feature_set_name))
pass
def download_data(self):
dd.download(self.problem_name)
|
config/oktoberkite/rotors.py | leozz37/makani | 1,178 | 11092783 | <reponame>leozz37/makani
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rotor parameters."""
from makani.config import mconfig
from makani.control import system_types as m
import numpy as np
@mconfig.Config(deps={
'flight_plan': 'common.flight_plan',
'propellers': 'prop.propellers',
'wing_serial': 'common.wing_serial',
})
def MakeParams(params):
"""Returns parameters about the kite rotors."""
# Motor rotor moment-of-inertia [kg-m^2].
yasa_rotor_moment_of_inertia = 0.33
bottom_row = [m.kMotorSbo, m.kMotorSbi, m.kMotorPbi, m.kMotorPbo]
# Assign propeller versions.
propeller_versions = [None for _ in range(m.kNumMotors)]
# This arrangment has been shown to minimize net torques.
# Oktoberkite uses very similar rotors as Rev4.
propeller_versions[m.kMotorSbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbi] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPbo] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorPto] = m.kPropVersionRev4PositiveX
propeller_versions[m.kMotorPti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSti] = m.kPropVersionRev4NegativeX
propeller_versions[m.kMotorSto] = m.kPropVersionRev4PositiveX
# Set rotor inclination axis.
# Oktoberkite rotors are not inclined 3deg down as the M600's are.
rotor_tilt_down_deg = 0.0
rotors = [None for _ in range(m.kNumMotors)]
for r in range(m.kNumMotors):
rotors[r] = {
# Normal vector to the propeller plane.
'axis': [np.cos(rotor_tilt_down_deg * np.pi / 180.0), 0.0,
np.sin(rotor_tilt_down_deg * np.pi / 180.0)],
# Direction cosine matrix from body to rotor frame.
'dcm_b2r': {'d': [[np.cos(-rotor_tilt_down_deg * np.pi / 180.0), 0.0,
np.sin(-rotor_tilt_down_deg * np.pi / 180.0)],
[0.0, 1.0, 0.0],
[-np.sin(-rotor_tilt_down_deg * np.pi / 180.0), 0.0,
np.cos(-rotor_tilt_down_deg * np.pi / 180.0)]]},
# Local pressure coefficient [#] at the rotor position. The
# pressure coefficient, C_P, is related to local airspeed
# through the equation:
#
# C_P = 1 - (v / v_freestream)^2
#
# There is a significant difference in airspeeds between the top
# and bottom propellers caused by the lift of the wing.
#
# TODO: OktKite. Input proper pressure coefficient due to wing
# circulation. Currently derived from M600 CFD with the slatted
# kite at 4 deg alpha (https://goo.gl/yfkJJS):
'local_pressure_coeff': 0.1448 if r in bottom_row else -0.1501,
# The rotor direction, diameter [m] and moment of inertia [kg
# m^2] are set from the corresponding propeller's information.
'version': propeller_versions[r],
'dir': params['propellers'][propeller_versions[r]]['dir'],
'D': params['propellers'][propeller_versions[r]]['D'],
'I': (yasa_rotor_moment_of_inertia +
params['propellers'][propeller_versions[r]]['I']),
}
# We check that the rotor axis is normalized. because it is used
# to determine the force-moment conversion matrix in
# rotor_control.py.
assert abs(np.linalg.norm(rotors[r]['axis']) - 1.0) < 1e-9
# Rotor positions [m].
# TODO: Confirm values, provide source.
# Looks like M600 x and z values with new y values.
rotors[m.kMotorSbo]['pos'] = [1.613, 4.5, 1.597]
rotors[m.kMotorSbi]['pos'] = [1.613, 1.5, 1.597]
rotors[m.kMotorPbi]['pos'] = [1.613, -1.5, 1.597]
rotors[m.kMotorPbo]['pos'] = [1.613, -4.5, 1.597]
rotors[m.kMotorPto]['pos'] = [1.960, -4.5, -1.216]
rotors[m.kMotorPti]['pos'] = [1.960, -1.5, -1.216]
rotors[m.kMotorSti]['pos'] = [1.960, 1.5, -1.216]
rotors[m.kMotorSto]['pos'] = [1.960, 4.5, -1.216]
return rotors
|
tacker/sol_refactored/conductor/conductor_v2.py | h1r0mu/tacker | 116 | 11092786 | <reponame>h1r0mu/tacker<filename>tacker/sol_refactored/conductor/conductor_v2.py
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tacker.common import log
from tacker.sol_refactored.common import config
from tacker.sol_refactored.common import coordinate
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import lcm_op_occ_utils as lcmocc_utils
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
from tacker.sol_refactored.conductor import vnflcm_driver_v2
from tacker.sol_refactored.nfvo import nfvo_client
from tacker.sol_refactored import objects
from tacker.sol_refactored.objects.v2 import fields
LOG = logging.getLogger(__name__)
CONF = config.CONF
class ConductorV2(object):
def __init__(self):
self.vnflcm_driver = vnflcm_driver_v2.VnfLcmDriverV2()
self.endpoint = CONF.v2_vnfm.endpoint
self.nfvo_client = nfvo_client.NfvoClient()
def _get_lcm_op_method(self, op, postfix):
method = getattr(self.vnflcm_driver, "%s_%s" % (op.lower(), postfix))
return method
def _set_lcmocc_error(self, lcmocc, ex):
if isinstance(ex, sol_ex.SolException):
problem_details = ex.make_problem_details()
else:
# program bug. it occurs only under development.
problem_details = {'status': 500,
'detail': str(ex)}
lcmocc.error = objects.ProblemDetails.from_dict(problem_details)
@log.log
def start_lcm_op(self, context, lcmocc_id):
lcmocc = lcmocc_utils.get_lcmocc(context, lcmocc_id)
self._start_lcm_op(context, lcmocc)
@coordinate.lock_vnf_instance('{lcmocc.vnfInstanceId}', delay=True)
def _start_lcm_op(self, context, lcmocc):
# just consistency check
if lcmocc.operationState != fields.LcmOperationStateType.STARTING:
LOG.error("VnfLcmOpOcc unexpected operationState.")
return
inst = inst_utils.get_inst(context, lcmocc.vnfInstanceId)
# NOTE: error cannot happen to here basically.
# if an error occurred lcmocc.opetationState remains STARTING.
# see the log of the tacker-conductor to investigate the cause
# of error.
# NOTE: the following flow follows SOL003 5.4.1.2
# send notification STARTING
self.nfvo_client.send_lcmocc_notification(context, lcmocc, inst,
self.endpoint)
try:
vnfd = self.nfvo_client.get_vnfd(context, inst.vnfdId,
all_contents=True)
# NOTE: perform grant exchange mainly but also perform
# something to do at STATING phase ex. request check.
grant_method = self._get_lcm_op_method(lcmocc.operation, 'grant')
grant_req, grant = grant_method(context, lcmocc, inst, vnfd)
lcmocc.operationState = fields.LcmOperationStateType.PROCESSING
lcmocc.update(context)
except Exception as ex:
LOG.exception("STARTING %s failed", lcmocc.operation)
lcmocc.operationState = fields.LcmOperationStateType.ROLLED_BACK
self._set_lcmocc_error(lcmocc, ex)
lcmocc.update(context)
# send notification PROCESSING or ROLLED_BACK
self.nfvo_client.send_lcmocc_notification(context, lcmocc, inst,
self.endpoint)
if lcmocc.operationState != fields.LcmOperationStateType.PROCESSING:
return
try:
# perform preamble LCM script
start_method = self._get_lcm_op_method(lcmocc.operation, 'start')
start_method(context, lcmocc, inst, grant_req, grant, vnfd)
process_method = self._get_lcm_op_method(lcmocc.operation,
'process')
process_method(context, lcmocc, inst, grant_req, grant, vnfd)
# perform postamble LCM script
end_method = self._get_lcm_op_method(lcmocc.operation, 'end')
end_method(context, lcmocc, inst, grant_req, grant, vnfd)
lcmocc.operationState = fields.LcmOperationStateType.COMPLETED
# update inst and lcmocc at the same time
with context.session.begin(subtransactions=True):
inst.update(context)
lcmocc.update(context)
except Exception as ex:
LOG.exception("PROCESSING %s failed", lcmocc.operation)
lcmocc.operationState = fields.LcmOperationStateType.FAILED_TEMP
self._set_lcmocc_error(lcmocc, ex)
lcmocc.update(context)
# send notification COMPLETED or FAILED_TEMP
self.nfvo_client.send_lcmocc_notification(context, lcmocc, inst,
self.endpoint)
|
manopth/argutils.py | BiggerBinBin/e3d_handpose_x-master | 544 | 11092807 | <reponame>BiggerBinBin/e3d_handpose_x-master
import datetime
import os
import pickle
import subprocess
import sys
def print_args(args):
opts = vars(args)
print('======= Options ========')
for k, v in sorted(opts.items()):
print('{}: {}'.format(k, v))
print('========================')
def save_args(args, save_folder, opt_prefix='opt', verbose=True):
opts = vars(args)
# Create checkpoint folder
if not os.path.exists(save_folder):
os.makedirs(save_folder, exist_ok=True)
# Save options
opt_filename = '{}.txt'.format(opt_prefix)
opt_path = os.path.join(save_folder, opt_filename)
with open(opt_path, 'a') as opt_file:
opt_file.write('====== Options ======\n')
for k, v in sorted(opts.items()):
opt_file.write(
'{option}: {value}\n'.format(option=str(k), value=str(v)))
opt_file.write('=====================\n')
opt_file.write('launched {} at {}\n'.format(
str(sys.argv[0]), str(datetime.datetime.now())))
# Add git info
label = subprocess.check_output(["git", "describe",
"--always"]).strip()
if subprocess.call(
["git", "branch"],
stderr=subprocess.STDOUT,
stdout=open(os.devnull, 'w')) == 0:
opt_file.write('=== Git info ====\n')
opt_file.write('{}\n'.format(label))
commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
opt_file.write('commit : {}\n'.format(commit.strip()))
opt_picklename = '{}.pkl'.format(opt_prefix)
opt_picklepath = os.path.join(save_folder, opt_picklename)
with open(opt_picklepath, 'wb') as opt_file:
pickle.dump(opts, opt_file)
if verbose:
print('Saved options to {}'.format(opt_path))
|
core/dataset/loader_multi_realdata.py | hyunynim/DIST-Renderer | 176 | 11092811 | <reponame>hyunynim/DIST-Renderer<filename>core/dataset/loader_multi_realdata.py
import numpy as np
import os, sys
import copy
import json
import torch
import trimesh
import torch.utils.data
import cv2
import easydict
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
from common.geometry import Camera
class LoaderMultiReal(torch.utils.data.Dataset):
def __init__(self, data_dir, scale=1, refine_sim=False):
self.data_dir = data_dir
self.scale = scale
self.folder_list = [f.name for f in os.scandir(self.data_dir) if f.is_dir()]
self.refine_sim = refine_sim
def transform_camera(self, R, t, sim_transform, scale):
R_new = np.matmul(R, sim_transform[:3, :3])
t_new = np.matmul(R, sim_transform[:, 3]) + t
R_new = R_new/scale
t_new = t_new/scale
return R_new, t_new
def __len__(self):
return len(self.folder_list)
def __getitem__(self, idx):
instance_name = self.folder_list[idx]
base_path = os.path.join(self.data_dir, instance_name)
data_parsed = np.load(os.path.join(base_path, 'parsed.npz'), allow_pickle=True)
data_all = data_parsed['frames']
num_img = len(data_all)
if os.path.exists(os.path.join(base_path, 'picked_frame.npz')):
f_pick = np.load(os.path.join(base_path, 'picked_frame.npz'))
picked_frame = f_pick['picked_frame']
img_list = []
mask_list = []
camera_list = []
sim_transform = np.load(os.path.join(base_path, 'estim_3Dsim.npy'))
# estimate the scale from the provided similarity transform
scale = np.max(np.linalg.svd(sim_transform[:3, :3])[1])
for i in range(num_img):
if (picked_frame is not None) and (i not in picked_frame):
continue
else:
img_cur = cv2.imread(os.path.join(base_path+'/images_clean', data_all[i]['name']))/255
if self.refine_sim is False:
R_cur = data_all[i]['extr'][:3, :3]
t_cur = data_all[i]['extr'][:, 3]
data_all[i]['extr'][:3, :3], data_all[i]['extr'][:, 3] = self.transform_camera(R_cur, t_cur, sim_transform, scale)
cam_cur = Camera(data_all[i]['intr'], data_all[i]['extr'])
mask_cur = np.zeros((img_cur.shape[0], img_cur.shape[1]))
if self.scale != 1:
img_cur = cv2.resize(img_cur, None, fx=self.scale, fy=self.scale)
mask_cur = cv2.resize(mask_cur, None, fx=self.scale, fy=self.scale)
cam_cur.intrinsic[:2] = cam_cur.intrinsic[:2] * self.scale
img_list.append(torch.from_numpy(img_cur).float())
mask_list.append(torch.from_numpy(mask_cur).type(torch.uint8).cuda())
camera_list.append(cam_cur)
sim_transform = torch.from_numpy(np.load(os.path.join(base_path, 'estim_3Dsim.npy'))).float().cuda()
return instance_name, img_list, mask_list, camera_list, sim_transform
|
detection_img.py | tingyumao94/groupsoftmax-simpledet | 153 | 11092822 | <reponame>tingyumao94/groupsoftmax-simpledet<gh_stars>100-1000
import os
from core.detection_module import DetModule
from core.detection_input import Loader
from utils.load_model import load_checkpoint
from operator_py.nms import py_nms_wrapper
from utils import callback
from mxnet.base import _as_list
from six.moves import reduce
from six.moves.queue import Queue
from threading import Thread
import argparse
import importlib
import mxnet as mx
import numpy as np
import six.moves.cPickle as pkl
import time
import json
import cv2
def parse_args():
parser = argparse.ArgumentParser(description='Test Detection')
# general
parser.add_argument('--config', help='config file path', type=str)
parser.add_argument('--gpu_id', help='gpu_id', type=int, default=0)
parser.add_argument('--epoch', help='load params epoch', type=int, default=0)
parser.add_argument('--thr', help='detection threshold', type=float, default=0.80)
parser.add_argument('--path', help='images path to detect', type=str)
args = parser.parse_args()
config = importlib.import_module(args.config.replace('.py', '').replace('/', '.'))
return args, config
if __name__ == "__main__":
# os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
args, config = parse_args()
pGen, pKv, pRpn, pRoi, pBbox, pDataset, pModel, pOpt, pTest, \
transform, data_name, label_name, metric_list = config.get_config(is_train=False)
nms = py_nms_wrapper(pTest.nms.thr)
sym = pModel.test_symbol
pshort = 800
plong = 2000
arg_params, aux_params = load_checkpoint(pTest.model.prefix, args.epoch)
mod = DetModule(sym, data_names=["data", "im_info", "im_id", "rec_id"], context=mx.gpu(args.gpu_id))
provide_data = [("data", (1, 3, pshort, plong)), ("im_info", (1, 3)), ("im_id", (1,)), ("rec_id", (1,))]
mod.bind(data_shapes=provide_data, for_training=False)
mod.set_params(arg_params, aux_params, allow_extra=False)
image_list = []
if os.path.isfile(args.path):
if ".txt" in args.path:
list_file = open(args.path, 'r')
list_lines = list_file.readlines()
list_file.close()
(fpath, fname) = os.path.split(args.path)
for aline in list_lines:
uints = aline.split(' ')
imgpath = os.path.join(fpath, uints[0])
image_list.append(imgpath)
else:
image_list.append(args.path)
else:
for fname in os.listdir(args.path):
fpath = os.path.join(args.path, fname)
if os.path.isfile(fpath):
image_list.append(fpath)
for imgpath in image_list:
img = cv2.imread(imgpath, cv2.IMREAD_COLOR)
image = img[:, :, ::-1]
short = image.shape[0]
long = image.shape[1]
scale = min(pshort / short, plong / long)
image = cv2.resize(image, None, None, scale, scale, interpolation=cv2.INTER_LINEAR)
# exactly as opencv
h, w = image.shape[:2]
im_info = (h, w, scale)
# shape = (plong, pshort, 3) if h >= w else (pshort, plong, 3)
shape = (pshort, plong, 3)
padded_image = np.zeros(shape, dtype=np.float32)
padded_image[:h, :w] = image
padded_image = padded_image.transpose((2, 0, 1))
img_array = []
img_array.append(padded_image)
iminfo_array = []
iminfo_array.append(im_info)
im_id = mx.nd.array([1])
rec_id = mx.nd.array([1])
data = [mx.nd.array(img_array)]
data.append(mx.nd.array(iminfo_array))
data.append(im_id)
data.append(rec_id)
mbatch = mx.io.DataBatch(data=data, provide_data=provide_data)
start_t = time.time()
mod.forward(mbatch, is_train=False)
outs = [x.asnumpy() for x in mod.get_outputs()]
im_info = outs[2] # h_raw, w_raw, scale
cls_score = outs[3]
bbox_xyxy = outs[4]
if cls_score.ndim == 3:
cls_score = cls_score[0]
bbox_xyxy = bbox_xyxy[0]
bbox_xyxy = bbox_xyxy / scale # scale to original image scale
cls_score = cls_score[:, 1:] # remove background score
# TODO: the output shape of class_agnostic box is [n, 4], while class_aware box is [n, 4 * (1 + class)]
bbox_xyxy = bbox_xyxy[:, 4:] if bbox_xyxy.shape[1] != 4 else bbox_xyxy
final_dets = {}
for cid in range(cls_score.shape[1]):
score = cls_score[:, cid]
if bbox_xyxy.shape[1] != 4:
cls_box = bbox_xyxy[:, cid * 4:(cid + 1) * 4]
else:
cls_box = bbox_xyxy
valid_inds = np.where(score > args.thr)[0]
box = cls_box[valid_inds]
score = score[valid_inds]
det = np.concatenate((box, score.reshape(-1, 1)), axis=1).astype(np.float32)
final_dets[cid] = nms(det)
end_t = time.time()
print("detection use: %.3f seconds." % (end_t - start_t))
for cid in final_dets:
det = final_dets[cid]
if det.shape[0] == 0:
continue
scores = det[:, -1]
x1 = det[:, 0]
y1 = det[:, 1]
x2 = det[:, 2]
y2 = det[:, 3]
for k in range(det.shape[0]):
bbox = [float(x1[k]), float(y1[k]), float(x2[k]), float(y2[k])]
score = float(scores[k])
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 0, 255), 2)
# cv2.putText(img, "{}:{:.2}".format(str(cid), score), (int(bbox[0]), int(bbox[1] - 10)), 4, 0.6, (0, 0, 255))
(filepath, filename) = os.path.split(imgpath)
cv2.imwrite(filename, img)
exit()
|
mayan/apps/document_signatures/migrations/0011_rename_signaturebasemodel_field.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 11092827 | <filename>mayan/apps/document_signatures/migrations/0011_rename_signaturebasemodel_field.py
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('documents', '0057_auto_20200916_1057'),
('document_signatures', '0010_auto_20191201_0146')
]
operations = [
migrations.RenameField(
model_name='signaturebasemodel',
old_name='document_version',
new_name='document_file',
),
]
|
src/visitpy/mpicom/py_src/__init__.py | visit-dav/vis | 226 | 11092833 | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
###############################################################################
# file: __init__.py
# Purpose: Main init for the mpicom module.
#
# Programmer: <NAME>
# Creation: Thu Apr 5 08:51:03 PDT 2012
#
#
# Modifications:
# <NAME>, Thu Feb 18 16:06:46 PST 2021
# Change the way the compiled lib import works to support Python 3.
#
###############################################################################
# import the serial stub module
from . import mpistub
# try to import the compiled module
# (this will only exist if visit was built with mpi support)
try:
from . import mpicom
except ImportError:
pass
|
blesuite/entities/gatt_include.py | jreynders/BLESuite-1 | 198 | 11092843 | <reponame>jreynders/BLESuite-1<filename>blesuite/entities/gatt_include.py
import blesuite.utils.att_utils as att_utils
class BLEInclude(object):
""" BLEInclude is used to represent an included service located on a BTLE device
:var handle: Handle of attribute. Type int
:var included_service_att_handle: Start handle for included service. Type int
:var included_service_end_group_handle: End handle for included service. Type int
:var included_service_uuid: UUID of included service. Type str
:var attribute_type: Attribute type UUID (default "2802" - include service), Type str
:var include_definition_attribute_properties: Attribute properties (default blesuite.utils.att_utils.ATT_PROP_READ). Type: blesuite.utils.att_utils.ATT_PROP_*
:var include_definition_attribute_read_permission: Required security mode to read attribute (default blesuite.utils.att_utils.ATT_SECURITY_MODE_OPEN). Type: blesuite.utils.att_utils.ATT_SECURITY_MODE_*
:var include_definition_attribute_write_permission: Required security mode to write to attribute (default blesuite.utils.att_utils.ATT_SECURITY_MODE_NO_ACCESS). Type: blesuite.utils.att_utils.ATT_SECURITY_MODE_*
:var include_definition_attribute_require_authorization: Flag to indicate that access of the attribute requires authorization (default False)
:type handle: int
:type included_service_att_handle: int
:type included_service_end_group_handle: int
:type included_service_uuid: str
:type attribute_type: str
:type include_definition_attribute_properties: blesuite.utils.att_utils.ATT_PROP_*
:type include_definition_attribute_read_permission: blesuite.utils.att_utils.ATT_SECURITY_MODE_*
:type include_definition_attribute_write_permission: blesuite.utils.att_utils.ATT_SECURITY_MODE_*
:type include_definition_attribute_require_authorization: bool
"""
def __init__(self, handle, included_service_att_handle, included_service_end_group_handle, included_service_uuid,
attribute_type="2802",
include_definition_attribute_properties=att_utils.ATT_PROP_READ,
include_definition_attribute_read_permission=att_utils.ATT_SECURITY_MODE_OPEN,
include_definition_attribute_write_permission=att_utils.ATT_SECURITY_MODE_NO_ACCESS,
include_definition_attribute_require_authorization=False):
self.handle = handle
self.included_service_att_handle = included_service_att_handle
self.included_service_uuid = included_service_uuid
self.included_service_end_group_handle = included_service_end_group_handle
self.characteristics = []
self.includes = []
self.type = None
self.type_string = ""
self.attribute_type = attribute_type
self.include_definition_attribute_properties = include_definition_attribute_properties
self.include_definition_attribute_read_permission = include_definition_attribute_read_permission
self.include_definition_attribute_write_permission = include_definition_attribute_write_permission
self.include_definition_attribute_require_authorization = include_definition_attribute_require_authorization
self.determine_type()
def determine_type(self):
"""
Used by blesuite.entities.gatt_include to populate gatt_include.type_string with a readable
type based on the include's UUID. The defined include service types were pulled from
https://www.bluetooth.com/specifications/gatt/characteristics
:return:
:rtype:
"""
# Defined Services from https://www.bluetooth.com/specifications/gatt/services
# last updated dict on 1/12/18
type_dict = {
0x1800: "Generic Access",
0x1811: "Alert Notification Service",
0x1815: "Automation IO",
0x180F: "Battery Service",
0x1810: "Blood Pressure",
0x181B: "Body Composition",
0x181E: "Bond Management Service",
0x181F: "Continuous Glucose Monitoring",
0x1805: "Current Time Service",
0x1818: "Cycling Power",
0x1816: "Cycling Speed and Cadence",
0x180A: "Device Information",
0x181A: "Environmental Sensing",
0x1826: "Fitness Machine",
0x1801: "Generic Attribute",
0x1808: "Glucose",
0x1809: "Health Thermometer",
0x180D: "Heart Rate",
0x1823: "HTTP Proxy",
0x1812: "Human Interface Device",
0x1802: "Immediate Alert",
0x1821: "Indoor Positioning",
0x1820: "Internet Protocol Support Service",
0x1803: "Link Loss",
0x1819: "Location and Navigation",
0x1827: "Mesh Provisioning Service",
0x1828: "Mesh Proxy Service",
0x1807: "Next DST Change Service",
0x1825: "Object Transfer Service",
0x180E: "Phone Alert Status Service",
0x1822: "Pulse Oximeter Service",
0x1806: "Reference Time Update Service",
0x1814: "Running Speed and Cadence",
0x1813: "Scan Parameters",
0x1824: "Transport Discovery",
0x1804: "Tx Power",
0x181C: "User Data",
0x181D: "Weight Scale",
}
if self.included_service_uuid is None:
return
type_int = int(self.included_service_uuid[:8], 16)
self.type = type_int
if type_int in type_dict.keys():
self.type_string = type_dict[type_int]
def get_type_string(self):
"""
Returns readable type string of the included service.
:return: Type of service
:rtype: str
"""
return self.type_string
def export_include_to_dictionary(self):
"""
Exports include service information to a dictionary for use by the BLEDevice export functionality.
:return: Dictionary representation of include
:rtype: dict
"""
from collections import OrderedDict
# ordered dictionary allows us to maintain the order we insert keys, this makes reading the resulting
# dictionary easier
include_dictionary = OrderedDict()
include_dictionary['handle'] = self.handle
include_dictionary['included_service_att_handle'] = self.included_service_att_handle
include_dictionary['included_service_end_group_handle'] = self.included_service_end_group_handle
include_dictionary['included_service_uuid'] = self.included_service_uuid
# include_dictionary['attribute_type'] = self.attribute_type
attribute_properties = []
if self.include_definition_attribute_properties & att_utils.ATT_PROP_READ == att_utils.ATT_PROP_READ:
attribute_properties.append("read")
if self.include_definition_attribute_properties & att_utils.ATT_PROP_WRITE == att_utils.ATT_PROP_WRITE:
attribute_properties.append("write")
include_dictionary['include_definition_attribute_properties'] = attribute_properties
attribute_read_permissions = {"security_mode": self.include_definition_attribute_read_permission.security_mode,
"security_level": self.include_definition_attribute_read_permission.security_level
}
include_dictionary['include_definition_attribute_read_permission'] = attribute_read_permissions
attribute_write_permissions = {
"security_mode": self.include_definition_attribute_write_permission.security_mode,
"security_level": self.include_definition_attribute_write_permission.security_level
}
include_dictionary['include_definition_attribute_write_permission'] = attribute_write_permissions
include_dictionary['include_definition_attribute_require_authorization'] = self.include_definition_attribute_require_authorization
return include_dictionary
def import_include_from_dictionary(self, include_dictionary):
"""
Populate include attributes from a dictionary containing included service information.
This is complimentary to export_include_to_dictionary .
:param include_dictionary: Dictionary containing include information
:type include_dictionary: dict
:return:
:rtype:
:raises blesuite.pybt.gatt.InvalidUUIDException: if the provided include dictionary contains an include with an invalid UUID
:raises blesuite.utils.validators.InvalidATTHandle: if the provided include dictionary contains an include with an invalid handle
:raises blesuite.utils.validators.InvalidATTProperty: if the provided include dictionary contains an include with an invalid attribute property
:raises blesuite.utils.validators.InvalidATTSecurityMode: if the provided include dictionary contains an include with an invalid attribute permission
"""
import blesuite.utils.validators as validator
include_attributes = include_dictionary.keys()
if 'included_service_uuid' in include_attributes:
uuid = validator.validate_attribute_uuid(include_dictionary['included_service_uuid'])
self.included_service_uuid = uuid
else:
raise validator.InvalidUUIDException(None)
self.determine_type()
if 'handle' in include_attributes:
handle = validator.validate_int_att_handle(include_dictionary['handle'])
self.handle = handle
else:
# This will allow us to disregard adding handles to our import JSON file and we can calculate during
# the gatt_server creation that uses the BLEDevice (flag enabled by default)
self.included_service_att_handle = 0x00
if 'included_service_att_handle' in include_attributes:
included_service_att_handle = validator.validate_int_att_handle(include_dictionary['included_service_att_handle'])
self.included_service_att_handle = included_service_att_handle
else:
# This will allow us to disregard adding handles to our import JSON file and we can calculate during
# the gatt_server creation that uses the BLEDevice (flag enabled by default)
self.included_service_att_handle = 0x00
if 'included_service_end_group_handle' in include_attributes:
end = validator.validate_int_att_handle(include_dictionary['included_service_end_group_handle'])
self.included_service_end_group_handle = end
else:
self.included_service_end_group_handle = 0x00
if 'include_definition_attribute_properties' in include_attributes:
att_properties = include_dictionary['include_definition_attribute_properties']
for att_property in att_properties:
self.include_definition_attribute_properties = 0
validated_att_property = validator.validate_att_property(att_property)
if validated_att_property == "read":
self.include_definition_attribute_properties |= att_utils.ATT_PROP_READ
elif validated_att_property == "write":
self.include_definition_attribute_properties |= att_utils.ATT_PROP_WRITE
if 'include_definition_attribute_read_permission' in include_attributes:
permission_dictionary = include_dictionary['include_definition_attribute_read_permission']
permission_keys = permission_dictionary.keys()
if "security_mode" not in permission_keys:
mode = None
else:
mode = permission_dictionary['security_mode']
if "security_level" not in permission_keys:
level = None
else:
level = permission_dictionary['security_level']
mode, level = validator.validate_att_security_mode(mode, level)
self.include_definition_attribute_read_permission = att_utils.get_att_security_mode_from_mode_and_level(
mode, level)
if 'include_definition_attribute_write_permission' in include_attributes:
permission_dictionary = include_dictionary['include_definition_attribute_write_permission']
permission_keys = permission_dictionary.keys()
if "security_mode" not in permission_keys:
mode = None
else:
mode = permission_dictionary['security_mode']
if "security_level" not in permission_keys:
level = None
else:
level = permission_dictionary['security_level']
mode, level = validator.validate_att_security_mode(mode, level)
self.include_definition_attribute_write_permission = att_utils.get_att_security_mode_from_mode_and_level(
mode, level)
if 'include_definition_attribute_require_authorization' in include_attributes:
require_auth = include_dictionary['include_definition_attribute_require_authorization']
if require_auth is not None:
self.include_definition_attribute_require_authorization = require_auth
return
|
towhee/trainer/models/utils/pretrained_utils.py | ThyeeZz/towhee | 365 | 11092856 | <reponame>ThyeeZz/towhee
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch.utils import model_zoo
import torch
import logging
def load_pretrained_weights(
model,
model_name=None,
model_configs=None,
weights_path=None,
):
"""
Load pretrained weights from path or url.
Args:
model(nn.Module):
Model.
model_name(str):
Model name
model_configs(dict):
Model configs
weights_path(str, optional):
Path to pretrained weights file on the local disk.
verbose (bool):
If printing is done when downloading is over.
"""
logging.info('model_name is %s', str(model_name))
logging.info('weights_path is %s', str(weights_path))
assert bool(model_name), 'Expected model_name'
# Load or download weights
if weights_path is None:
logging.info(model_configs.keys())
url = model_configs['url']
if url:
logging.info('Please check hub connection in case weights can not be downloaded!')
state_dict = model_zoo.load_url(url, map_location=torch.device('cpu'))
else:
raise ValueError(f'Pretrained model for {model_name} is not available.')
else:
state_dict = torch.load(weights_path)
# Load state dict
ret = model.load_state_dict(state_dict['model'], strict=False)
return ret
|
isserviceup/services/pagerduty.py | EvgeshaGars/is-service-up | 182 | 11092864 | <filename>isserviceup/services/pagerduty.py
from isserviceup.services.models.statuspage import StatusPagePlugin
class PagerDuty(StatusPagePlugin):
name = 'PagerDuty'
status_url = 'https://status.pagerduty.com/'
icon_url = '/images/icons/pagerduty.png'
|
qiskit/transpiler/passes/scheduling/padding/base_padding.py | itoko/qiskit-terra | 1,456 | 11092883 | <reponame>itoko/qiskit-terra<gh_stars>1000+
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Padding pass to fill empty timeslot."""
from typing import List, Optional, Union
from qiskit.circuit import Qubit, Clbit, Instruction
from qiskit.circuit.delay import Delay
from qiskit.dagcircuit import DAGCircuit, DAGNode
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.transpiler.exceptions import TranspilerError
class BasePadding(TransformationPass):
"""The base class of padding pass.
This pass requires one of scheduling passes to be executed before itself.
Since there are multiple scheduling strategies, the selection of scheduling
pass is left in the hands of the pass manager designer.
Once a scheduling analysis pass is run, ``node_start_time`` is generated
in the :attr:`property_set`. This information is represented by a python dictionary of
the expected instruction execution times keyed on the node instances.
Entries in the dictionary are only created for non-delay nodes.
The padding pass expects all ``DAGOpNode`` in the circuit to be scheduled.
This base class doesn't define any sequence to interleave, but it manages
the location where the sequence is inserted, and provides a set of information necessary
to construct the proper sequence. Thus, a subclass of this pass just needs to implement
:meth:`_pad` method, in which the subclass constructs a circuit block to insert.
This mechanism removes lots of boilerplate logic to manage whole DAG circuits.
Note that padding pass subclasses should define interleaving sequences satisfying:
- Interleaved sequence does not change start time of other nodes
- Interleaved sequence should have total duration of the provided ``time_interval``.
Any manipulation violating these constraints may prevent this base pass from correctly
tracking the start time of each instruction,
which may result in violation of hardware alignment constraints.
"""
def run(self, dag: DAGCircuit):
"""Run the padding pass on ``dag``.
Args:
dag: DAG to be checked.
Returns:
DAGCircuit: DAG with idle time filled with instructions.
Raises:
TranspilerError: When a particular node is not scheduled, likely some transform pass
is inserted before this node is called.
"""
self._pre_runhook(dag)
node_start_time = self.property_set["node_start_time"].copy()
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
# Update start time dictionary for the new_dag.
# This information may be used for further scheduling tasks,
# but this is immediately invalidated becasue node id is updated in the new_dag.
self.property_set["node_start_time"].clear()
new_dag.name = dag.name
new_dag.metadata = dag.metadata
new_dag.unit = self.property_set["time_unit"]
new_dag.calibrations = dag.calibrations
new_dag.global_phase = dag.global_phase
idle_after = {bit: 0 for bit in dag.qubits}
# Compute fresh circuit duration from the node start time dictionary and op duration.
# Note that pre-scheduled duration may change within the alignment passes, i.e.
# if some instruction time t0 violating the hardware alignment constraint,
# the alignment pass may delay t0 and accordingly the circuit duration changes.
circuit_duration = 0
for node in dag.topological_op_nodes():
if node in node_start_time:
t0 = node_start_time[node]
t1 = t0 + node.op.duration
circuit_duration = max(circuit_duration, t1)
if isinstance(node.op, Delay):
# The padding class considers a delay instruction as idle time
# rather than instruction. Delay node is removed so that
# we can extract non-delay predecessors.
dag.remove_op_node(node)
continue
for bit in node.qargs:
# Fill idle time with some sequence
if t0 - idle_after[bit] > 0:
# Find previous node on the wire, i.e. always the latest node on the wire
prev_node = next(new_dag.predecessors(new_dag.output_map[bit]))
self._pad(
dag=new_dag,
qubit=bit,
t_start=idle_after[bit],
t_end=t0,
next_node=node,
prev_node=prev_node,
)
idle_after[bit] = t1
self._apply_scheduled_op(new_dag, t0, node.op, node.qargs, node.cargs)
else:
raise TranspilerError(
f"Operation {repr(node)} is likely added after the circuit is scheduled. "
"Schedule the circuit again if you transformed it."
)
# Add delays until the end of circuit.
for bit in new_dag.qubits:
if circuit_duration - idle_after[bit] > 0:
node = new_dag.output_map[bit]
prev_node = next(new_dag.predecessors(node))
self._pad(
dag=new_dag,
qubit=bit,
t_start=idle_after[bit],
t_end=circuit_duration,
next_node=node,
prev_node=prev_node,
)
new_dag.duration = circuit_duration
return new_dag
def _pre_runhook(self, dag: DAGCircuit):
"""Extra routine inserted before running the padding pass.
Args:
dag: DAG circuit on which the sequence is applied.
Raises:
TranspilerError: If the whole circuit or instruction is not scheduled.
"""
if "node_start_time" not in self.property_set:
raise TranspilerError(
f"The input circuit {dag.name} is not scheduled. Call one of scheduling passes "
f"before running the {self.__class__.__name__} pass."
)
def _apply_scheduled_op(
self,
dag: DAGCircuit,
t_start: int,
oper: Instruction,
qubits: Union[Qubit, List[Qubit]],
clbits: Optional[Union[Clbit, List[Clbit]]] = None,
):
"""Add new operation to DAG with scheduled information.
This is identical to apply_operation_back + updating the node_start_time propety.
Args:
dag: DAG circuit on which the sequence is applied.
t_start: Start time of new node.
oper: New operation that is added to the DAG circuit.
qubits: The list of qubits that the operation acts on.
clbits: The list of clbits that the operation acts on.
"""
if isinstance(qubits, Qubit):
qubits = [qubits]
if isinstance(clbits, Clbit):
clbits = [clbits]
new_node = dag.apply_operation_back(oper, qargs=qubits, cargs=clbits)
self.property_set["node_start_time"][new_node] = t_start
def _pad(
self,
dag: DAGCircuit,
qubit: Qubit,
t_start: int,
t_end: int,
next_node: DAGNode,
prev_node: DAGNode,
):
"""Interleave instruction sequence in between two nodes.
.. note::
If a DAGOpNode is added here, it should update node_start_time property
in the property set so that the added node is also scheduled.
This is achieved by adding operation via :meth:`_apply_scheduled_op`.
.. note::
This method doesn't check if the total duration of new DAGOpNode added here
is identical to the interval (``t_end - t_start``).
A developer of the pass must guarantee this is satisfied.
If the duration is greater than the interval, your circuit may be
compiled down to the target code with extra duration on the backend compiler,
which is then played normally without error. However, the outcome of your circuit
might be unexpected due to erroneous scheduling.
Args:
dag: DAG circuit that sequence is applied.
qubit: The wire that the sequence is applied on.
t_start: Absolute start time of this interval.
t_end: Absolute end time of this interval.
next_node: Node that follows the sequence.
prev_node: Node ahead of the sequence.
"""
raise NotImplementedError
|
Toxicity/toxic.py | UmaTaru/run | 163 | 11092895 | import re, string
import numpy as np
import pandas as pd
import pickle
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
re_tok = re.compile(f'([{string.punctuation}“”¨«»®´·º½¾¿¡§£₤‘’])')
def tokenize(s):
return re_tok.sub(r' \1 ', s).split()
class NBTfidfVectorizer(TfidfVectorizer):
"""Class for generating Naive Bayes features with tf-idf priors.
Can also be used to generate tf-idf only.
"""
def __init__(self):
super().__init__(
ngram_range=(1,2), tokenizer=tokenize,
min_df=3, max_df=0.9, strip_accents='unicode', use_idf=1,
smooth_idf=1, sublinear_tf=1)
# Nive Bayes parameter
self._r = None
def fit(self, X, y):
"""Calculate NB and tf-idf parameters """
# fit and generate TF-IDF features
X_tfidf = super().fit_transform(X)
# get NB features
p = (X_tfidf[y == 1].sum(0) + 1) / ((y == 1).sum() + 1)
q = (X_tfidf[y == 0].sum(0) + 1) / ((y == 0).sum() + 1)
self._r = np.log(p / q)
def transform(self, X):
X_tfidf = super().transform(X)
return X_tfidf.multiply(self._r)
def fit_transform(self, X, y):
self.fit(X, y)
return self.transform(X)
class NBLogisticRegression(LogisticRegression, NBTfidfVectorizer):
def __init__(self):
self.regressor = LogisticRegression(C=4, dual=True)
self.vectorizer = NBTfidfVectorizer()
def fit(self, X, y):
print('Fitting NBTfidf')
X_NBTfidf = self.vectorizer.fit_transform(X, y)
print('Fitting LogisticRegression')
self.regressor.fit(X_NBTfidf, y)
def predict_proba(self, X):
X_NBTfidf = self.vectorizer.transform(X)
return self.regressor.predict_proba(X_NBTfidf)[:,1]
def predict(self, X):
X_NBTfidf = self.vectorizer.transform(X)
return self.regressor.predict(X_NBTfidf)
if __name__ == '__main__':
# Code from https://www.kaggle.com/jhoward/nb-svm-strong-linear-baseline
data = pd.read_csv('../datasets/Kaggle_Toxic/data/train.csv')
data['toxic'] = data['toxic'] + data['insult'] + data['obscene'] + data['severe_toxic'] + data['identity_hate'] + data['threat']
data['toxic'][data['toxic'] != 0] = 1
train, test = train_test_split(data, test_size=0.25)
train['none'] = 1-train['toxic']
print('{} none labels out of {} comments'.format(train['none'].sum(), train.shape[0]))
print('so {} of the comments are non toxic'.format(train['none'].sum() / train.shape[0]))
COMMENT = 'comment_text'
train[COMMENT].fillna("<unk>", inplace=True)
test[COMMENT].fillna("<unk>", inplace=True)
logistic = NBLogisticRegression()
logistic.fit(train[COMMENT], train['toxic'].values)
train_preds = logistic.predict(train[COMMENT])
test_preds = logistic.predict(test[COMMENT])
print('Train accuracy is: {:.3f}'.format(accuracy_score(train['toxic'], train_preds)))
print('Train recall (True positive) is {:.3f}'.format(recall_score(train['toxic'], train_preds)))
print('Train precision is {:.3f}'.format(precision_score(train['toxic'], train_preds)))
print('Train F1 is {:3f}'.format(f1_score(train['toxic'], train_preds)))
print('*' * 20)
print('*' * 20)
print('Test accuracy is: {:.3f}'.format(accuracy_score(test['toxic'], test_preds)))
print('Test recall (True positive) is {:.3f}'.format(recall_score(test['toxic'], test_preds)))
print('Test precision is {:.3f}'.format(precision_score(test['toxic'], test_preds)))
print('Test F1 is {:3f}'.format(f1_score(test['toxic'], test_preds)))
print('#' * 20)
print('#' * 20)
print('Training model on full data')
logistic = NBLogisticRegression()
logistic.fit(data[COMMENT], data['toxic'].values)
print('Saving trained toxicity model')
with open('toxicity_model.pkl', 'wb') as f:
pickle.dump(logistic, f)
|
tools/ops/script_runner/lib/custom_commands/management/commands/estimate_concurrent_users.py | yetsun/hue | 5,079 | 11092906 | <reponame>yetsun/hue
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import logging
import heapq
import datetime
import time
import subprocess
from collections import OrderedDict
from django.core.management.base import BaseCommand, CommandError
import desktop.conf
from desktop.conf import TIME_ZONE
from search.conf import SOLR_URL, SECURITY_ENABLED as SOLR_SECURITY_ENABLED
from liboozie.conf import OOZIE_URL, SECURITY_ENABLED as OOZIE_SECURITY_ENABLED
from hadoop import conf as hdfs_conf
from hadoop import cluster
if sys.version_info[0] > 2:
from django.utils.translation import gettext_lazy as _t, gettext as _
else:
from django.utils.translation import ugettext_lazy as _t, ugettext as _
DEFAULT_LOG_DIR = 'logs'
log_dir = os.getenv("DESKTOP_LOG_DIR", DEFAULT_LOG_DIR)
class Command(BaseCommand):
"""
Handler for renaming duplicate User objects
"""
try:
from optparse import make_option
option_list = BaseCommand.option_list + (
make_option("--today", help=_t("Estimate users for today."),
action="store_true", default=False, dest='today'),
make_option("--logdir", help=_t("Specify directory to process access logs."),
action="store", default=log_dir, dest='logdir'),
make_option("--increment", help=_t("Increments to count users, hour, min10, day"),
action="store", default="day", dest='increment'),
make_option("--date", help=_t("Estimate users for date. In form of YYYY-MM-DD"),
action="store", default=False, dest='date'),
make_option("--last10", help=_t("Process logs for last 10 minutes."),
action="store_true", default=False, dest='last10'),
make_option("--last1h", help=_t("Process logs for last hour."),
action="store_true", default=False, dest='last1h'),
make_option("--includejb", help=_t("Include Jobbrowser entries."),
action="store_true", default=False, dest='includejb'),
make_option("--verbose", help=_t("Verbose."),
action="store_true", default=False, dest='verbose'),
)
except AttributeError, e:
baseoption_test = 'BaseCommand' in str(e) and 'option_list' in str(e)
if baseoption_test:
def add_arguments(self, parser):
parser.add_argument("--today", help=_t("Estimate users for today."),
action="store_true", default=False, dest='today'),
parser.add_argument("--logdir", help=_t("Specify directory to process access logs."),
action="store", default=log_dir, dest='logdir'),
parser.add_argument("--increment", help=_t("Increments to count users, hour, min10, day"),
action="store", default="day", dest='increment'),
parser.add_argument("--date", help=_t("Estimate users for date. In form of YYYY-MM-DD"),
action="store", default=False, dest='date'),
parser.add_argument("--last10", help=_t("Process logs for last 10 minutes."),
action="store_true", default=False, dest='last10'),
parser.add_argument("--last1h", help=_t("Process logs for last hour."),
action="store_true", default=False, dest='last1h'),
parser.add_argument("--includejb", help=_t("Include Jobbrowser entries."),
action="store_true", default=False, dest='includejb'),
parser.add_argument("--verbose", help=_t("Verbose."),
action="store_true", default=False, dest='verbose')
else:
logging.warn(str(e))
sys.exit(1)
def handle(self, *args, **options):
if options['date']:
now = datetime.datetime.strptime(options['date'], '%Y-%m-%d')
else:
now = datetime.datetime.now()
minus10 = now - datetime.timedelta(minutes=10)
minus1h = now - datetime.timedelta(minutes=60)
date = now - datetime.timedelta(days=1999)
previous_date = now - datetime.timedelta(days=2000)
totalconcurrent = 0
userlist = []
numlist = []
regex = re.compile(
# Example line
# [20/Jun/2017 04:40:07 -0700] DEBUG 172.31.112.36 -anon- - "HEAD /desktop/debug/is_alive HTTP/1.1"
r'\['
r'(?P<date>'
r'\d{2}/\w{3}/\d{4} ' # Parse Date in form of '25/Oct/2015'
r'\d{2}:\d{2}:\d{2}' # Parse Time in form of '12:34:56'
r') '
r'[-+]?\d{4}' # Ignore the timezone
r'\] '
r'(?P<level>\w+) +'
r'(?P<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) '
r'(?P<user>\w+) '
r'\S+ "' # Ignore unknown
r'(?P<method>\w+) '
r'(?P<url>\S+) '
)
for filename in sorted(os.listdir(options['logdir']), reverse=True):
if not filename.startswith("access"):
continue # Only process access log files
for line in open(options['logdir'] + "/" + filename).xreadlines():
if not line.startswith("["):
continue # Only process lines that start with a date
# Make sure this log entry is a user access
m = regex.match(line)
if m:
previous_date = date
date = datetime.datetime.strptime(m.group('date'), '%d/%b/%Y %H:%M:%S')
if not options['includejb']:
if re.match(m.group('url'), '/jobbrowser/jobs/$'):
continue
if options['today']:
if \
date.year != now.year or \
date.month != now.month or \
date.day != now.day:
continue
if options['last10']:
# Skip anything older than 10 mins ago
if date < minus10:
continue
if options['last1h']:
# Skip anything older than 1 hour ago
if date < minus1h:
continue
user = m.group('user')
if previous_date.day == date.day:
if not user == "-anon-":
userlist.append(user)
else:
newuserlist = list(OrderedDict.fromkeys(userlist))
userlist = []
totalconcurrent = len(newuserlist)
numlist.append(totalconcurrent)
newuserlist = list(OrderedDict.fromkeys(userlist))
totalconcurrent = len(newuserlist)
numlist.append(totalconcurrent)
# Sort the list and remove any unique values
numlist = sorted(set(numlist))
# Print the top 10 most concurrent counts
logging.warn("largest: %s" % heapq.nlargest(10, numlist))
# print "newuserlist: %s" % newuserlist
# print "userlist: %s" % userlist
|
ranking/management/modules/adventofcode.py | aropan/clist | 166 | 11092921 | <gh_stars>100-1000
#!/usr/bin/env python
import html
import json
import re
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta, timezone
from urllib.parse import urljoin
import arrow
from ranking.management.modules import conf
from ranking.management.modules.common import REQ, BaseModule
class Statistic(BaseModule):
def get_standings(self, *args, **kwargs):
func = self._get_private_standings if '/private/' in self.url else self._get_global_standings
return func(*args, **kwargs)
def _get_private_standings(self, users=None, statistics=None):
REQ.add_cookie('session', conf.ADVENTOFCODE_SESSION, '.adventofcode.com')
page = REQ.get(self.url.rstrip('/') + '.json')
data = json.loads(page)
year = int(data['event'])
problems_infos = OrderedDict()
times = defaultdict(list)
def items_sort(d):
return sorted(d.items(), key=lambda i: int(i[0]))
result = {}
total_members = len(data['members'])
tz = timezone(timedelta(hours=-5))
for r in data['members'].values():
handle = r.pop('id')
row = result.setdefault(handle, OrderedDict())
row['member'] = handle
row['solving'] = r.pop('local_score')
row['name'] = r.pop('name')
row['global_score'] = r.pop('global_score')
row['stars'] = r.pop('stars')
ts = int(r.pop('last_star_ts'))
if ts:
row['last_star'] = ts
solutions = r.pop('completion_day_level')
problems = row.setdefault('problems', OrderedDict())
for day, solution in items_sort(solutions):
if not solution:
continue
day = str(day)
for star, res in items_sort(solution):
star = str(star)
k = f'{day}.{star}'
if k not in problems_infos:
problems_infos[k] = {'name': day,
'code': k,
'group': day,
'subname': '*',
'subname_class': 'first-star' if star == '1' else 'both-stars',
'url': urljoin(self.url, f'/{year}/day/{day}'),
'_order': (int(day), int(star)),
'visible': False}
times[k].append(res['get_star_ts'])
day_start_time = datetime(year=year, month=12, day=int(day), tzinfo=tz)
delta = datetime.fromtimestamp(res['get_star_ts'], tz=timezone.utc) - day_start_time
problems[k] = {
'ts': res['get_star_ts'],
'time': self.to_time(delta),
}
for v in times.values():
v.sort()
for row in result.values():
problems = row.setdefault('problems', {})
for k, p in row['problems'].items():
ts = p.pop('ts')
rank = times[k].index(ts) + 1
score = total_members - rank + 1
p['time_in_seconds'] = ts
p['result'] = score
last = None
for idx, r in enumerate(sorted(result.values(), key=lambda r: -r['solving']), start=1):
if r['solving'] != last:
last = r['solving']
rank = idx
r['place'] = rank
problems = list(sorted(problems_infos.values(), key=lambda p: p['_order']))
for p in problems:
p.pop('_order')
ret = {
'hidden_fields': {'last_star', 'stars', 'ranks'},
'result': result,
'fields_types': {'last_star': ['timestamp']},
'problems': problems,
}
now = datetime.now(tz=tz)
if now.year == year and now.month == 12:
start = now.replace(hour=0, minute=0, second=0)
delta = start - now
if delta < timedelta():
delta += timedelta(days=1, seconds=42)
if delta > timedelta(hours=23):
delta = timedelta(minutes=5)
else:
delta = min(delta, timedelta(hours=4))
ret['timing_statistic_delta'] = delta
return ret
def _get_global_standings(self, users=None, statistics=None):
year = self.start_time.year
year = year if self.start_time.month >= 9 else year - 1
season = '%d-%d' % (year, year + 1)
ret = {}
if '/day/' not in self.url:
match = re.search(r'\bday\b\s+(?P<day>[0-9]+)', self.name, re.IGNORECASE)
contest_url = self.url.rstrip('/') + '/day/' + match['day']
else:
contest_url = self.url
page = REQ.get(contest_url)
match = re.search(r'<h2>[^<]*Day\s*[0-9]+:\s*(?P<problem_name>[^<]*)</h2>', page)
problem_name = match.group('problem_name').strip('-').strip()
if self.name.count('.') == 1 and problem_name:
ret['title'] = f'{self.name}. {problem_name}'
standings_url = self.standings_url or contest_url.replace('/day/', '/leaderboard/day/')
page = REQ.get(standings_url)
matches = re.finditer(
r'''
<div[^>]*class="leaderboard-entry"[^>]*>\s*
<span[^>]*class="leaderboard-position"[^>]*>\s*(?P<rank>[0-9]+)[^<]*</span>\s*
<span[^>]*class="leaderboard-time"[^>]*>(?P<time>[^<]*)</span>\s*
(?:<a[^>]*href="(?P<href>[^"]*)"[^>]*>\s*)?
<span[^>]*class="leaderboard-userphoto"[^>]*>(\s*<img[^>]*src="(?P<avatar>[^"]*)"[^>]*>)?[^<]*</span>\s*
(?:<span[^>]*class="leaderboard-anon"[^>]*>)?(?P<name>[^<]*)
''',
page,
re.VERBOSE
)
problems_info = OrderedDict()
result = {}
last = None
n_problems = 0
n_results = 0
for match in matches:
n_results += 1
href = match.group('href')
name = html.unescape(match.group('name')).strip()
if href:
handle = href.split('//')[-1].strip('/')
elif re.match(r'^\(anonymous user #[0-9]+\)$', name):
handle = name
else:
handle = f'{name}, {season}'
handle = handle.replace('/', '-')
rank = int(match.group('rank'))
if last is None or last >= rank:
n_problems += 1
last = rank
row = result.setdefault(handle, {'solving': 0, '_skip_for_problem_stat': True})
score = 100 - rank + 1
row['solving'] += score
row['name'] = name
row['member'] = handle
avatar = match.group('avatar')
if avatar:
row['info'] = {'avatar': avatar}
k = str(n_problems)
if k not in problems_info:
problems_info[k] = {'name': problem_name, 'code': k, 'url': contest_url, 'group': 0}
problem = row.setdefault('problems', {}).setdefault(k, {})
problem['result'] = score
time = f'''{self.start_time.year} {match.group('time')} -05:00'''
problem['time'] = self.to_time(arrow.get(time, 'YYYY MMM D HH:mm:ss ZZ') - self.start_time)
if rank == 1:
problem['first_ac'] = True
if rank <= 3:
problem['_class'] = ['gold-medal', 'silver-medal', 'bronze-medal'][rank - 1]
problems = list(reversed(problems_info.values()))
problems[0].update({'subname': '*', 'subname_class': 'first-star'})
if len(problems) > 1:
problems[1].update({'subname': '*', 'subname_class': 'both-stars'})
place = None
last = None
for rank, row in enumerate(sorted(result.values(), key=lambda r: -r['solving']), start=1):
score = row['solving']
if last != score:
place = rank
last = score
row['place'] = place
ret.update({
'contest_url': contest_url,
'result': result,
'url': standings_url,
'problems': problems,
})
if n_results < 200:
ret['timing_statistic_delta'] = timedelta(minutes=5)
return ret
|
yasql/libs/RenderColumns.py | Fanduzi/YaSQL | 443 | 11092934 | <reponame>Fanduzi/YaSQL
# -*- coding:utf-8 -*-
# edit by fuzongfei
def render_dynamic_columns(render_columns):
try:
columns = []
for x in render_columns:
col = {'title': x['value'],
'dataIndex': x['key'],
'key': x['key'],
'scopedSlots': {'customRender': x['key']}
}
if x.get('width'):
col['width'] = x['width']
if x.get('fixed'):
col['fixed'] = x['fixed']
if x.get('ellipsis'):
col['ellipsis'] = True
columns.append(col)
except IndexError as err:
columns = []
return columns
|
tcapy/conf/celeryconfig.py | Ahrvo-Trading-Systems/tcapy | 189 | 11092940 | __author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""Has the configuration settings for celery. The main thing that needs to be changed is the broker URL settings (in
the ConstantsGen file"
"""
from tcapy.conf.constants import Constants
constants = Constants()
broker_url = constants.celery_broker_url
result_backend = constants.celery_result_backend
# from kombu import serialization
# serialization.registry._decoders.("application/x-python-serialize")
# the below should not need to be changed by nearly all users
# result_backend = "amqp"
# result_backend = "redis://localhost:6379/2"
event_serializer = 'pickle'
accept_content = ['pickle'] #
task_serializer = 'pickle'
result_serializer = 'pickle'
worker_hijack_root_logger = False
task_store_errors_even_if_ignored = True
worker_max_tasks_per_child = 50 # Stop memory leaks, so restart workers after a 100 tasks
tasks_acks_late = True
result_expires = 900 # Clear memory after a while of results, if not picked up
# task_always_eager = True # For debugging, to run Celery in the same process
broker_transport_options = {'socket_timeout': 900}
# broker_pool_limit = 0
|
mac/pyobjc-framework-Quartz/PyObjCTest/test_ikimageeditpanel.py | albertz/music-player | 132 | 11092970 | <reponame>albertz/music-player
from PyObjCTools.TestSupport import *
from Quartz import *
class TestIKImageEditPanelHelper (NSObject):
def thumbnailWithMaximumSize_(self, sz): return None
def hasAdjustMode(self): return 1
def hasEffectsMode(self): return 1
def hasDetailsMode(self): return 1
class TestIKImageEditPanel (TestCase):
@min_os_level('10.5')
def no_testProtocols(self):
self.assertIsInstance(objc.protocolNamed('IKImageEditPanel'), objc.formal_protocol)
@min_os_level('10.5')
def testProtocolMethods(self):
self.assertArgHasType(TestIKImageEditPanelHelper.thumbnailWithMaximumSize_, 0, NSSize.__typestr__)
@min_os_level('10.6')
def testProtocolMethods10_6(self):
self.assertResultIsBOOL(TestIKImageEditPanelHelper.hasAdjustMode)
self.assertResultIsBOOL(TestIKImageEditPanelHelper.hasEffectsMode)
self.assertResultIsBOOL(TestIKImageEditPanelHelper.hasDetailsMode)
if __name__ == "__main__":
main()
|
urls.py | wuzhongdehua/SinaWeibo-Emotion-Classification | 137 | 11093002 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
import time, json, base64, logging, hashlib
from datetime import datetime, tzinfo, timedelta
from transwarp.web import ctx, get, post, route, seeother, forbidden, jsonresult, Template
from transwarp import db
from weibo import APIError, APIClient
import pickle, random, jieba,jieba.analyse
import StringIO
try:
import pylibmc
except Exception,e:
pass
try:
import nltk
except Exception, e:
print e
try:
from nltk import FreqDist
import nltk.NaiveBayesClassifier
except Exception, e:
pass
#import nltk
_TD_ZERO = timedelta(0)
_TD_8 = timedelta(hours=8)
#using memcache
#mc = pylibmc.Client()
#mc.set('test','testyourmoathaf')
#mc.incr('test')
#word_features = mc.get('word_features')
#classifier = mc.get('classifier')
#if not mc.get('word_features'):
# word_features = pickle.load(open('word_features.dat','r'))
# mc.set("word_features", str(word_features) )
word_features = pickle.load(open('word_features.dat','r'))
classifier = pickle.load(open("classifierdata.dat","r"))
class UTC8(tzinfo):
def utcoffset(self, dt):
return _TD_8
def tzname(self, dt):
return "UTC+8:00"
def dst(self, dt):
return _TD_ZERO
_UTC8 = UTC8()
def _format_datetime(dt):
t = datetime.strptime(dt, '%a %b %d %H:%M:%S +0800 %Y').replace(tzinfo=_UTC8)
return time.mktime(t.timetuple())
def _format_user(u):
return dict(id=str(u.id), screen_name=u.screen_name, profile_url=u.profile_url, verified=u.verified, verified_type=u.verified_type, profile_image_url=u.profile_image_url)
def _format_weibo(st):
# user = st.user
# print "type", type(st),st['rank']
r = dict(
text = st.text,
created_at = _format_datetime(st.created_at)
)
try:
r['user'] = _format_user(st.user)
except Exception, e:
print e
r['user'] = dict(id='123456',screen_name='weibo',profile_url='',verified='0',verified_type='0',profile_image_url='')
try:
r['reposts_count'] = st.reposts_count
except Exception, e:
r['reposts_count'] = 0
try:
r['comments_count'] = st.comments_count
except Exception, e:
r['comments_count'] = 0
else:
pass
try:
r['type'] = st.rank
except Exception, e:
print e
if 'original_pic' in st:
r['original_pic'] = st.original_pic
if 'thumbnail_pic' in st:
r['thumbnail_pic'] = st.thumbnail_pic
if 'retweeted_status' in st:
r['retweeted_status'] = _format_weibo(st.retweeted_status)
return r
@get('/')
def index():
u = _check_cookie()
#return Template('./static/signin.html')
if u is None:
return Template('./static/signin.html')
return Template('static/myweibo.html', user=u)
@get('/go')
def app():
u = _check_cookie()
if u is None:
return Template('static/signin.html')
return Template('static/index.html', user=u)
@post('/update')
@jsonresult
def update():
u = _check_cookie()
if u is None:
return dict(error='failed', redirect='/signin')
client = _create_client()
client.set_access_token(u.auth_token, u.expired_time)
try:
r = client.statuses.update.post(status=ctx.request['status'])
# r = client.statuses.update.post(status=ctx.request['status'], pic = StringIO.StringIO(ctx.request['dataurl'].encode('utf8')))
if 'error' in r:
return r
return dict(result='success')
except APIError, e:
return dict(error='failed')
@route('/friends')
@jsonresult
def friends():
u = _check_cookie()
if u is None:
return dict(error='failed', redirect='/signin')
client = _create_client()
client.set_access_token(u.auth_token, u.expired_time)
try:
r = client.friendships.friends.get(uid=u.id, count=99)
return [_format_user(u) for u in r.users]
except APIError, e:
return dict(error='failed')
@route('/bifriends')
@jsonresult
def bifriends():
u = _check_cookie()
if u is None:
return dict(error='failed', redirect='/signin')
# return
client = _create_client()
client.set_access_token(u.auth_token, u.expired_time)
try:
r = client.friendships.friends.bilateral.get(uid=u.id, count=99)
return [_format_user(u) for u in r.users]
except APIError, e:
return dict(error='failed')
@route('/analysis')
@jsonresult
def analysis():
u = _check_cookie()
if u is None:
return dict(error='failed', redirect='/signin')
month = int(ctx.request.get('month'))
weibo = getWeiboByTime(months = month)
analysis_result, weibo, keywords= weiboAnalysis(weibo)
# for w in weibo:
# print "analysis", w['rank']
weibo = [_format_weibo(wb) for wb in weibo]
remark = ''
if analysis_result[0] > analysis_result[2]:
remark = u'经检测我这段时间内的负能量过高,需要补充正能量!'
else:
remark = u'经检测我这段时间内正能量爆棚啦哇咔咔!'
return json.dumps({'total':len(weibo), 'pos' : analysis_result[2], 'neu' : analysis_result[1], 'neg' : analysis_result[0], 'weibo' : weibo, 'keywords' : keywords, 'remark' : remark})
@route('/load')
@jsonresult
def load():
u = _check_cookie()
if u is None:
return dict(error='failed', redirect='/signin')
client = _create_client()
client.set_access_token(u.auth_token, u.expired_time)
try:
r = client.statuses.home_timeline.get()
return [_format_weibo(s) for s in r.statuses]
except APIError, e:
return dict(error='failed')
@route('/myweibo')
@jsonresult
def myweibo():
u = _check_cookie()
if u is None:
return dict(error='failed', redirect='/signin')
# return
client = _create_client()
client.set_access_token(u.auth_token, u.expired_time)
try:
r = client.statuses.user_timeline.get()
return [_format_weibo(s) for s in r.statuses]
except APIError, e:
return dict(error='failed')
@post('/hint')
@jsonresult
def hint():
u = _check_cookie()
if u is None:
return dict(error='failed', redirect='/signin')
client = _create_client()
client.set_access_token(u.auth_token, u.expired_time)
try:
return client.remind.unread_count.get()
except APIError, e:
return dict(error='failed')
@get('/signin')
def signin():
client = _create_client()
raise seeother(client.get_authorize_url())
@get('/signout')
def signout():
ctx.response.set_cookie(_COOKIE, 'deleted', max_age=0)
raise seeother('/')
@get('/callback')
def callback():
i = ctx.request.input(code='')
code = i.code
client = _create_client()
r = client.request_access_token(code)
logging.info('access token: %s' % json.dumps(r))
access_token, expires_in, uid = r.access_token, r.expires_in, r.uid
client.set_access_token(access_token, expires_in)
u = client.users.show.get(uid=uid)
logging.info('got user: %s' % uid)
users = db.select('select * from users where id=?', uid)
user = dict(name=u.screen_name, \
image_url=u.avatar_large or u.profile_image_url, \
statuses_count=u.statuses_count, \
friends_count=u.friends_count, \
followers_count=u.followers_count, \
verified=u.verified, \
verified_type=u.verified_type, \
auth_token=access_token, \
expired_time=expires_in)
if users:
db.update_kw('users', 'id=?', uid, **user)
else:
user['id'] = uid
db.insert('users', **user)
_make_cookie(uid, access_token, expires_in)
raise seeother('/')
_COOKIE = 'authuser'
_SALT = 'A random string'
def _make_cookie(uid, token, expires_in):
expires = str(int(expires_in))
s = '%s:%s:%s:%s' % (str(uid), str(token), expires, _SALT)
md5 = hashlib.md5(s).hexdigest()
cookie = '%s:%s:%s' % (str(uid), expires, md5)
ctx.response.set_cookie(_COOKIE, base64.b64encode(cookie).replace('=', '_'), expires=expires_in)
def _check_cookie():
try:
b64cookie = ctx.request.cookies[_COOKIE]
cookie = base64.b64decode(b64cookie.replace('_', '='))
uid, expires, md5 = cookie.split(':', 2)
if int(expires) < time.time():
return
L = db.select('select * from users where id=?', uid)
if not L:
return
u = L[0]
s = '%s:%s:%s:%s' % (uid, str(u.auth_token), expires, _SALT)
if md5 != hashlib.md5(s).hexdigest():
return
return u
except BaseException:
pass
_APP_ID = ''
_APP_SECRET = ''
_ADMIN_PASS = '<PASSWORD>'
@get('/admin')
def show_admin():
return '''<html>
<body>
<form action="/admin" method="post">
<p>Input password:</p>
<p><input type="password" name="passwd" /></p>
</form>
</body>
</html>
'''
@post('/admin')
def do_admin():
global _APP_ID, _APP_SECRET, _ADMIN_PASS
i = ctx.request.input()
if i.passwd != _ADMIN_PASS:
raise forbidden()
admin_pass = i.get('new_passwd', '')
app_id = i.get('app_id', '')
app_secret = i.get('app_secret', '')
msg = ''
if admin_pass and app_id and app_secret:
db.update('delete from settings')
db.update('insert into settings (id, value) values (?, ?)', 'app_id', app_id)
db.update('insert into settings (id, value) values (?, ?)', 'app_secret', app_secret)
db.update('insert into settings (id, value) values (?, ?)', 'admin_pass', admin_pass)
msg = 'Updated!'
_APP_ID = app_id
_APP_SECRET = app_secret
_ADMIN_PASS = admin_pass
return '''<html>
<body>
<p>%s</p>
<form action="/admin" method="post">
<p>App ID:</p>
<p><input type="text" name="app_id" value="%s" /></p>
<p>App Secret:</p>
<p><input type="text" name="app_secret" value="%s" /></p>
<p>Old Password:</p>
<p><input type="text" name="passwd" readonly="readonly" value="%s" /></p>
<p>New Password:</p>
<p><input type="text" name="new_passwd" value="%s" /></p>
<p>WARNING: click submit will update app_id, app_secret and admin password!</p>
<p><input type="submit" name="submit" value="Submit" /></p>
</form>
</body>
</html>
''' % (msg, _APP_ID, _APP_SECRET, _ADMIN_PASS, _ADMIN_PASS)
def _load_app_info():
global _APP_ID, _APP_SECRET, _ADMIN_PASS
for s in db.select('select * from settings'):
if s.id == 'app_id':
_APP_ID = s.value
if s.id == 'app_secret':
_APP_SECRET = s.value
if s.id == 'admin_pass':
_ADMIN_PASS = s.value
def _create_client():
global _APP_ID, _APP_SECRET
try:
import sae
except Exception, e:
return APIClient(_APP_ID, _APP_SECRET, 'http://127.0.0.1:8080/callback')
else:
return APIClient(_APP_ID, _APP_SECRET, 'http://tobeornottobe.sinaapp.com/callback')
def transformTime(created_time):
a = created_time.split()
month_mapping = {'Jan' : '01', 'Feb' : '02', 'Mar' : '03', 'Apr' : '04', 'May' : '05', 'Jun' : '06', 'Jul' : '07', 'Aug' : '08', 'Sep' : '09', 'Oct' : '10', 'Nov' : '11', 'Dec' : '12'}
t = a[-1] + month_mapping[a[1]] + a[2]
return t
def getWeiboByTime(assigned_time = None, months = 3):
import time
t = time.strftime('%Y%m',time.localtime(time.time() - 2592000 * (months-1))) + '01'
if assigned_time:
t = assigned_time
weibo = []
page = 1
u = _check_cookie()
if u is None:
return dict(error='failed', redirect='/signin')
client = _create_client()
client.set_access_token(u.auth_token, u.expired_time)
while True:
wbs = client.statuses.user_timeline.get( count = 100, page = page)
flag = False
for wb in wbs['statuses']:
if transformTime(wb['created_at']) >= t:
# text = wb['text']
# if 'retweeted_status' in wb:
# text += wb['retweeted_status']['text']
weibo.append(wb)
# print transformTime(wb['created_at'])
else:
flag = True
break
page += 1
if flag:
break
print "Total weibos: %d" %(len(weibo))
return weibo
def gender_features(weibo):
global word_features
a = jieba.cut(weibo)
fl = (" ".join(a)).split()
fd = {}
for word in word_features:
fd[u'contains(%s)'%word] = (word in fl)
return fd
def weiboAnalysis(weibo):
"""weibo analysis tool"""
data = [0] * 3
keywords = []
global classifier
for i in range(len(weibo)):
w = weibo[i]
text = w['text']
if 'retweeted_status' in w:
text += w['retweeted_status']['text']
keywords += jieba.analyse.extract_tags(text, topK=10)
rank = int(classifier.classify(gender_features(text)))
weibo[i]['rank'] = rank
data[rank] += 1
# print rank, '\n\n'
# for w in weibo:
# print "rank", w['rank']
print u'Total analysis: %d' %(len(weibo))
for i in range(3) :
print i, ' ', data[i]
keywords = nltk.FreqDist(keywords).keys()[:300]
# for i in keywords:
# print i
return data, weibo,keywords
_load_app_info()
|
keepercommander/importer/lastpass/attachment_reader.py | Keeper-Security/commander | 151 | 11093003 | from base64 import b64decode
from io import RawIOBase, BufferedReader, TextIOWrapper
from Cryptodome.Cipher import AES
# Chunk size must be a multiple of 256
# Two b64 decodes each requiring a multiple of four times multiple of 16 needed for AES decryption (4 * 16 * 4 = 256)
CHUNK_SIZE = 8 * 1024
def decode_aes256_base64_from_stream(stream, encryption_key, chunk_size=CHUNK_SIZE):
"""Decrypts base64 encoded AES-256 from file in chunks
CHUNK_SIZE is read in but only 9/16 of CHUNK_SIZE is yielded for every iteration due to b64 decoding
"""
first_chunk = stream.read(chunk_size)
if not first_chunk:
return
# LastPass AES-256/CBC/base64 encryted string starts with an "!".
# Next 24 bytes are the base64 encoded IV for the cipher.
# Then comes the "|".
# And the rest is the base64 encoded encrypted payload.
if first_chunk[0] == b'!'[0]:
iv = b64decode(first_chunk[1:25])
aes = AES.new(encryption_key, AES.MODE_CBC, iv)
chunk = b64decode(first_chunk[26:] + stream.read(26))
if not chunk:
return
else:
aes = AES.new(encryption_key, AES.MODE_ECB)
chunk = b64decode(first_chunk)
d = aes.decrypt(chunk)
chunk = b64decode(stream.read(chunk_size))
while chunk:
yield b64decode(d)
d = aes.decrypt(chunk)
chunk = b64decode(stream.read(chunk_size))
yield b64decode(d[:-d[-1]])
class LastpassAttachmentReader(RawIOBase):
"""A RawIOBase reader that decrypts and decodes the input stream of a Lastpass attachment"""
def __init__(self, attachment):
self.attachment = attachment
self.encrypted_stream = open(attachment.tmpfile, 'rb')
key = attachment.parent.attach_key
self.decryption_generator = decode_aes256_base64_from_stream(self.encrypted_stream, key)
self.leftover = None
self.size = 0
def readable(self):
return True
def readinto(self, b):
try:
buf_len = len(b)
chunk = self.leftover or next(self.decryption_generator)
output = chunk[:buf_len]
self.leftover = chunk[buf_len:]
ret_len = len(output)
b[:ret_len] = output
self.size += ret_len
return ret_len
except StopIteration:
return 0
def close(self):
self.encrypted_stream.close()
self.attachment.size = self.size
@classmethod
def get_buffered_reader(cls, attachment):
return BufferedReader(cls(attachment))
@classmethod
def get_text_reader(cls, attachment, **kwargs):
buffered_reader = cls.get_buffered_reader(attachment)
return TextIOWrapper(buffered_reader, **kwargs)
|
tests/run_tests.py | CurrySoftware/style-elements | 493 | 11093006 | # Run tests via selenium, either locally or on saucelabs
#
# Compile elm test code
# Open up a browser
#
#
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# This is the only code you need to edit in your existing scripts.
# The command_executor tells the test to run on Sauce, while the desired_capabilities
# parameter tells us which browsers and OS to spin up.
desired_cap = {
'platform': "Mac OS X 10.9",
'browserName': "chrome",
'version': "31",
}
driver = webdriver.Remote(
command_executor='http://YOUR_SAUCE_USERNAME:[email protected]:80/wd/hub',
desired_capabilities=desired_cap)
# This is your test logic. You can add multiple tests here.
driver.implicitly_wait(10)
driver.get("http://www.google.com")
if not "Google" in driver.title:
raise Exception("Unable to load google page!")
elem = driver.find_element_by_name("q")
elem.send_keys("<NAME>")
elem.submit()
print driver.title
# This is where you tell Sauce Labs to stop running tests on your behalf.
# It's important so that you aren't billed after your test finishes.
driver.quit() |
tests/test_album.py | jack-debug/LyricsGenius | 692 | 11093011 | <gh_stars>100-1000
import unittest
import os
import warnings
from . import genius
from lyricsgenius.types import Album
class TestAlbum(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\n---------------------\nSetting up Album tests...\n")
warnings.simplefilter("ignore", ResourceWarning)
cls.album_name = "The Party"
cls.artist_name = "<NAME>"
cls.num_tracks = 10
cls.album = genius.search_album(
cls.album_name,
cls.artist_name
)
def test_type(self):
self.assertIsInstance(self.album, Album)
def test_album_name(self):
self.assertEqual(self.album.name, self.album_name)
def test_album_artist(self):
self.assertEqual(self.album.artist.name, self.artist_name)
def test_tracks(self):
self.assertEqual(len(self.album.tracks), self.num_tracks)
def test_saving_json_file(self):
print('\n')
extension = 'json'
msg = "Could not save {} file.".format(extension)
expected_filename = ('Lyrics_'
+ self.album.name.replace(' ', '')
+ '.'
+ extension)
# Remove the test file if it already exists
if os.path.isfile(expected_filename):
os.remove(expected_filename)
# Test saving json file
self.album.save_lyrics(extension=extension, overwrite=True)
self.assertTrue(os.path.isfile(expected_filename), msg)
# Test overwriting json file (now that file is written)
try:
self.album.save_lyrics(extension=extension, overwrite=True)
except Exception:
self.fail("Failed {} overwrite test".format(extension))
os.remove(expected_filename)
def test_saving_txt_file(self):
print('\n')
extension = 'txt'
msg = "Could not save {} file.".format(extension)
expected_filename = ('Lyrics_'
+ self.album.name.replace(' ', '')
+ '.'
+ extension)
# Remove the test file if it already exists
if os.path.isfile(expected_filename):
os.remove(expected_filename)
# Test saving txt file
self.album.save_lyrics(extension=extension, overwrite=True)
self.assertTrue(os.path.isfile(expected_filename), msg)
# Test overwriting txt file (now that file is written)
try:
self.album.save_lyrics(
extension=extension, overwrite=True)
except Exception:
self.fail("Failed {} overwrite test".format(extension))
os.remove(expected_filename)
|
pyanomaly/networks/meta/pcn_parts/pcm.py | YuhaoCheng/PyAnomaly | 102 | 11093017 | <filename>pyanomaly/networks/meta/pcn_parts/pcm.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchsnooper
from pyanomaly.networks.parts.base.commonness import Conv2dLeakly
from pyanomaly.networks.parts.base.commonness import DoubleConv, Down, Up, OutConv, BasicConv2d
from .convolution_lstm import ConvLSTMCell
class SingleStampConvLSTM(nn.Module):
# input_channels corresponds to the first input feature map
# hidden state is a list of succeeding lstm layers.
def __init__(self, input_channels, hidden_channels, kernel_size):
super(SingleStampConvLSTM, self).__init__()
self.input_channels = [input_channels] + hidden_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_layers = len(hidden_channels) # 是len(hidden_channels)
self._all_layers = []
for i in range(self.num_layers):
name = 'cell{}'.format(i)
cell = ConvLSTMCell(self.input_channels[i], self.hidden_channels[i], self.kernel_size)
setattr(self, name, cell)
self._all_layers.append(cell)
# for each sequence, we need to clear the internal_state
self.internal_state = list()
# @torchsnooper.snoop()
def forward(self, input, step):
x = input # the input is a single image, shape is N C H W
for i in range(self.num_layers):
if step == 0 and i==0:
self.internal_state = list() # 清空state中的状态,因为换到下一个video clip了
name = 'cell{}'.format(i)
if step == 0:
# all cells are initialized in the first step
bsize, _, height, width = x.size()
(h, c) = getattr(self, name).init_hidden(batch_size=bsize, hidden=self.hidden_channels[i], shape=(height, width))
self.internal_state.append((h, c))
# do forward
(h, c) = self.internal_state[i]
x, new_c = getattr(self, name)(x, h, c)
self.internal_state[i] = (x, new_c)
return x, new_c
class PEP(nn.Module):
def __init__(self, c_in, c_out, bilinear=False):
super(PEP, self).__init__()
self.c_in = c_in
self.c_out = c_out
self.bilinear = bilinear
self.inc = DoubleConv(self.c_in, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256,512)
self.up1 = Up(768, 512, 256, self.bilinear)
self.up2 = Up(384,256,128, self.bilinear)
self.up3 = Up(192,128,64, self.bilinear)
# @torchsnooper.snoop()
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x = self.up1(x4, x3)
x = self.up2(x, x2)
x = self.up3(x, x1)
return x
class PCM(nn.Module):
def __init__(self):
super(PCM, self).__init__()
self.convlstm = SingleStampConvLSTM(input_channels=64, hidden_channels=[128, 64], kernel_size=3)
self.pep = PEP(c_in=3, c_out=64, bilinear=True)
# self.fr = Conv2dLeakly(c_in=64, c_out=3,kernel_size=3, stride=1, padding=1)
self.fr = nn.Sequential(
nn.Conv2d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 16, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(16, 3, 3, 1, 1)
)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
if isinstance(m, nn.ConvTranspose2d):
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
# @torchsnooper.snoop()
def forward(self, video_clip):
# the video_clip is [N C D H W]
len_video = video_clip.shape[2]
frames = torch.chunk(video_clip, len_video, 2)
for time_stamp in range(len_video):
# print(time_stamp)
frame = frames[time_stamp].squeeze(2)
if time_stamp == 0:
E = torch.zeros_like(frame)
else:
E = torch.sub(frame, temp)
R = self.pep(E)
x, _ = self.convlstm(R, time_stamp)
Ihat = self.fr(x)
# import ipdb; ipdb.set_trace()
# temp = Ihat.detach()
temp = Ihat
if time_stamp == len_video-1: # 最后一个
result = Ihat
return result
class ConvLSTM(nn.Module):
# input_channels corresponds to the first input feature map
# hidden state is a list of succeeding lstm layers.
def __init__(self, input_channels, hidden_channels, kernel_size, step=1, effective_step=[1]):
super(ConvLSTM, self).__init__()
# the part to initalize the convLSTM
self.input_channels = [input_channels] + hidden_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_layers = len(hidden_channels)
self.step = step
self.effective_step = effective_step
self._all_layers = []
for i in range(self.num_layers):
name = 'cell{}'.format(i)
cell = ConvLSTMCell(self.input_channels[i], self.hidden_channels[i], self.kernel_size)
setattr(self, name, cell)
self._all_layers.append(cell)
self.pep = PEP(c_in=3, c_out=64, bilinear=False)
def forward(self, input):
internal_state = []
outputs = []
for step in range(self.step):
x = input
for i in range(self.num_layers):
# all cells are initialized in the first step
name = 'cell{}'.format(i)
print(name)
if step == 0:
bsize, _, height, width = x.size()
(h, c) = getattr(self, name).init_hidden(batch_size=bsize, hidden=self.hidden_channels[i],
shape=(height, width))
internal_state.append((h, c))
# do forward
(h, c) = internal_state[i]
x, new_c = getattr(self, name)(x, h, c)
internal_state[i] = (x, new_c)
# only record effective steps
if step in self.effective_step:
outputs.append(x)
return outputs, (x, new_c)
if __name__ == '__main__':
data = torch.randn([8,3,4,128,192]).cuda()
label = torch.randn([8,3,128,192]).cuda()
model = PCM().cuda()
result = model(data)
loss = nn.L1Loss()
l = loss(result, label)
l.backward()
import ipdb; ipdb.set_trace() |
vincent/scales.py | ahlusar1989/vincent | 1,052 | 11093064 | <reponame>ahlusar1989/vincent
# -*- coding: utf-8 -*-
"""
Scales: Classes to define Vega scales
"""
from .core import grammar, GrammarClass
from ._compat import str_types
class DataRef(GrammarClass):
"""Definitions for how data is referenced by scales
Data can be referenced in multiple ways, and sometimes it makes sense to
reference multiple data fields at once.
"""
@grammar(str_types)
def data(value):
"""string : Name of data-set containing the domain values"""
@grammar((list,) + str_types)
def field(value):
"""string or list of strings : Reference to desired data field(s)
If multiple fields are given, then the values of all fields are
included in the domain.
"""
class Scale(GrammarClass):
"""Definitions for mapping from data space to visual space
Scales determine the way in which data is mapped from a data space (such
as numbers, time stamps, etc.) to a visual space (length of a line,
height of a bar, etc.), for both independent and dependent variables.
"""
@grammar(str_types)
def name(value):
"""string : Unique name for the scale
This is used for referencing by other components (mainly ``Mark``).
"""
@grammar(str_types)
def type(value):
"""string : Type of the scale
Valid types are as follows:
* ``'ordinal'``: ordinal scale types
* ``'time'`` or ``'utc'``: time scale types
* ``'linear'``, ``'log'``, ``'pow'``, ``'sqrt'``, ``'quantile'``,
``'quantize'``, and ``'threshold'``: quantitative scale types
For time scales, the value should be a Javascript-style numeric
value of seconds. ``'time'`` implies the value is in local time.
If unspecified, then the scale is assumed to be linear. See the d3
documentation for scale type details.
"""
@grammar((list, DataRef))
def domain(value):
"""list or DataRef : Domain of the scale
"""
@grammar(grammar_type=(float, int, DataRef), grammar_name='domainMin')
def domain_min(value):
"""float, int, or DataRef : Minimum domain value
Only used for quantitative/time scales. This takes precedence over
the minimum of the ``domain`` property.
"""
@grammar(grammar_type=(float, int, DataRef),
grammar_name='domainMax')
def domain_max(value):
"""float, int, or DataRef : Maximum domain value
Only used for quantitative/time scales. This takes precedence over
the maximum of the ``domain`` property.
"""
@grammar((list,) + str_types)
def range(value):
"""list or string : Range of the scale
For quantitative scales, the range may be specified as a two-element
list of min/max values. For ordinal scales, the range should be a
list of output values mapped to the input values.
String values may be used to automatically set a range:
- ``'width'`` - Set the range to the width of the visualization
- ``'height'`` - Set the range to the height of the visualization
- ``'shapes'`` - Equivalent to the symbol types ``['circle',
'cross', 'diamond', 'square', 'triangle-down',
'triangle-up']``
- ``'category10'`` - A pre-determined 10-color pallet
- ``'category20'`` - A pre-determined 20-color pallet
"""
@grammar(grammar_type=(float, int, DataRef), grammar_name='rangeMin')
def range_min(value):
"""float, int, or DataRef : Minimum range value
Only used for quantitative/time scales. This takes precedence over
the minimum of the ``range`` property.
"""
@grammar(grammar_type=(float, int, DataRef), grammar_name='rangeMax')
def range_max(value):
"""float, int, or DataRef : Maximum range value
Only used for quantitative/time scales. This takes precedence over
the maximum of the ``range`` property.
"""
@grammar(bool)
def reverse(value):
"""boolean : If True, flip the scale range"""
@grammar(bool)
def round(value):
"""boolean : If True, numeric output values are rounded to
integers"""
@grammar(bool)
def points(value):
"""boolean : If True, distribute ordinal values over evenly spaced
points between ``range_min`` and ``range_max``
Ignored for non-ordinal scales.
"""
@grammar(bool)
def clamp(value):
"""boolean : If True, values that exceed the domain are clamped to
within the domain
Ignored for ordinal scales.
"""
@grammar((bool,) + str_types)
def nice(value):
"""boolean or string : scale the domain to a more human-friendly set
If the scale ``type`` is ``'time'`` or ``'utc'``, then the value
should be one of ``'second'``, ``'minute'``, ``'hour'``, ``'day'``,
``'week'``, ``'month'``, or ``'year'``.
If the scale ``type`` is a quantitative scale, then the value should
be a boolean. The input values are rounded to a more human-friendly
value. The details of the rounding are in the d3 documentation.
Ignored for ordinal scales.
"""
@grammar((float, int))
def exponent(value):
"""float or int : Exponent for ``'pow'`` scale types
Ignored for all scale types other than ``'pow'``.
"""
@grammar(bool)
def zero(value):
"""boolean : If True, include zero in the domain
Only valid for quantitative scale types. This is useful if the
domain is defined as a DataRef that may not include 0 exactly.
"""
@grammar((float, int))
def padding(value):
"""string: Ordinal element padding
Only valid for ordinal scale types
"""
|
chemberta/utils/roberta_regression.py | BogdanDidenko/bert-loves-chemistry | 183 | 11093107 | import math
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.utils.checkpoint
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import PreTrainedModel, RobertaModel
from transformers.file_utils import ModelOutput
from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@dataclass
class SequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
class RobertaForSequenceClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = ["position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
if labels is None:
return logits
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(
logits.view(-1, self.num_labels), labels.long().view(-1)
)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaForRegression(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = ["position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.register_buffer("norm_mean", torch.tensor(config.norm_mean))
# Replace any 0 stddev norms with 1
self.register_buffer(
"norm_std",
torch.tensor(
[label_std if label_std != 0 else 1 for label_std in config.norm_std]
),
)
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.regression = RobertaRegressionHead(config)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = (
outputs.last_hidden_state
) # shape = (batch, seq_len, hidden_size)
logits = self.regression(sequence_output)
if labels is None:
return self.unnormalize_logits(logits)
if labels is not None:
normalized_labels = self.normalize_logits(labels)
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), normalized_labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return RegressionOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def normalize_logits(self, tensor):
return (tensor - self.norm_mean) / self.norm_std
def unnormalize_logits(self, tensor):
return (tensor * self.norm_std) + self.norm_mean
class RobertaRegressionHead(nn.Module):
"""Head for multitask regression models."""
def __init__(self, config):
super(RobertaRegressionHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.relu(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@dataclass
class RegressionOutput(ModelOutput):
"""
Base class for outputs of regression models. Supports single and multi-task regression.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided)
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Regression scores for each task (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
dnachisel/biotools/bowtie.py | simone-pignotti/DnaChisel | 124 | 11093122 | import subprocess
import tempfile
import os
def run_process(name, parameters):
process = subprocess.run(
parameters,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
if process.returncode:
error = process.stderr.decode()
raise OSError("%s failed:\n\n%s" % (name, error))
return process.stdout
def create_bowtie_index_from_sequences(sequences, path):
fasta_path = os.path.join(path, 'sequences.fa')
bowtie_path = os.path.join(path, 'bowtie')
with open(fasta_path, 'w') as f:
f.write("\n".join([
">%d\n%s" % (i, sequence)
for i, sequence in enumerate(sequences)
]))
run_process("build-bowtie", [
"bowtie-build", "-f", fasta_path, bowtie_path, "--quiet"
])
return bowtie_path
def find_all_bowtie_matches(
sequence, bowtie_index_path, match_length, max_mismatches=0
):
"""Return (short) matches between a sequence and a Bowtie index.
The result is of the form [(start, end), n_mismatches)] where (start, end)
indicates the position of the match in the sequence, and n_mismatches is
the number of mismatches with the closest homology in the index.
"""
# CREATE THE PARAMETERS
parameters = ["bowtie"]
parameters += ["--best", "-k", "1"] # only return the best alignments
parameters += ["-v", str(max_mismatches)] # only allow that N mismatches
parameters += [bowtie_index_path]
parameters += ["--quiet", "--suppress", "2,3,4,5,6,7"] # small output
k = match_length
kmers = [sequence[i : i + k] for i in range(len(sequence) - k + 1)]
if k * len(kmers) < 10000:
# Input the sequences directly
tmp_fasta_path = None
parameters += ["-c", ",".join(kmers)]
else:
# Write sequences to a file if too many.
tmp_fasta_path = tempfile.mktemp(".fa")
with open(tmp_fasta_path, "w") as f:
entries = [">%d\n%s" % (i, s) for i, s in enumerate(kmers)]
f.write("\n\n".join(entries))
parameters += ["-f", tmp_fasta_path]
# RUN THE PROCESS
try:
output = run_process("BOWTIE", parameters)
except Exception as err:
raise err
finally:
if tmp_fasta_path is not None:
os.remove(tmp_fasta_path)
output_records = [
line.split("\t") for line in output.decode().split("\n") if len(line)
]
return [
((int(index), int(index) + k), edits.count(":"))
for index, edits in output_records
] |
hashtable/linear_probing.py | x899/algorithms | 472 | 11093155 | <reponame>x899/algorithms
class LinearProbing:
def __init__(self):
self.size = 10
self.arr = [None for _ in range(self.size)]
def get_hash(self, key):
hash_value = 0
for char in key:
hash_value += ord(char)
return hash_value % self.size
def put(self, key, value):
index = self.get_hash(key)
if self.arr[index] is not None:
if self.arr[index][0] == key:
# update existing value
self.arr[index] = (key, value)
return
# rehash try to find another slot
index = (index + 1) % self.size
# insert new value
self.arr[index] = (key, value)
def get(self, key):
index = self.get_hash(key)
if self.arr[index] is not None:
if self.arr[index][0] == key:
return self.arr[index][1]
index = (index + 1) % self.size
# if key is not present
return None
def main():
""" operational function """
table = LinearProbing()
table.put("apple", 10)
table.put("orange", 20)
table.put("car", 30)
table.put("table", 40)
print(table.get("orange")) # 20
print(table.get("kevin")) # None
table.put("orange", 50)
print(table.get("orange")) # 50
if __name__ == "__main__":
main()
|
google_or_tools/send_more_money_scalar_product_sat.py | tias/hakank | 279 | 11093156 | # Copyright 2021 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SEND+MORE=MONEY in 'any' base in OR-tools CP-SAT Solver.
Alphametic problem SEND+MORE=MONEY in any base.
Examples:
Base 10 has one solution:
{9, 5, 6, 7, 1, 0, 8, 2}
Base 11 has three soltutions:
{10, 5, 6, 8, 1, 0, 9, 2}
{10, 6, 7, 8, 1, 0, 9, 3}
{10, 7, 8, 6, 1, 0, 9, 2}
This model was created by <NAME> (<EMAIL>)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
from cp_sat_utils import ListPrinter
import sys
import string
def main(base=10):
model = cp.CpModel()
# data
# declare variables
x = [model.NewIntVar(0,base-1,f"x[{i}") for i in range(8)]
s,e,n,d,m,o,r,y = x
xx = [s,e,n,d, m,o,r,e, m,o,n,e,y]
coeffs = [1000, 100, 10, 1, # S E N D +
1000, 100, 10, 1, # M O R E
-10000,-1000, -100,-10,-1 # == M O N E Y
]
#
# constraints
#
model.AddAllDifferent(x)
model.Add(0 == cp.LinearExpr.ScalProd(xx, coeffs))
model.Add(s > 0)
model.Add(m > 0)
#
# solution and search
#
solver = cp.CpSolver()
solution_printer = ListPrinter(x)
status = solver.SearchForAllSolutions(model, solution_printer)
if status != cp.OPTIMAL:
print("No solution!")
print()
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
print()
base = 10
if __name__ == '__main__':
# for base in range(10,30):
# main(base)
if len(sys.argv) > 1:
base=int(sys.argv[1])
main(base)
|
nussl/separation/base/separation_base.py | JTTurner/nussl | 259 | 11093160 | import copy
import warnings
import numpy as np
from ... import AudioSignal, play_utils
class SeparationBase(object):
"""Base class for all separation algorithms in nussl.
Do not call this. It will not do anything.
Parameters:
input_audio_signal (AudioSignal). AudioSignal` object.
This will always be a copy of the provided AudioSignal object.
"""
def __init__(self, input_audio_signal):
self.metadata = {}
self._audio_signal = None
self.audio_signal = input_audio_signal
@property
def sample_rate(self):
"""
(int): Sample rate of :attr:`audio_signal`.
Literally :attr:`audio_signal.sample_rate`.
"""
return self.audio_signal.sample_rate
@property
def stft_params(self):
"""
STFTParams object containing the STFT parameters of the copied AudioSignal.
"""
return self.audio_signal.stft_params
@property
def audio_signal(self):
"""
Copy of AudioSignal that is made on initialization.
"""
return self._audio_signal
def _preprocess_audio_signal(self):
"""
This function should be implemented by the subclass. It can do things like
take the STFT of the audio signal, or resample it to a desired sample rate,
build the input data for a deep model, etc. Here, it does nothing.
"""
pass
@audio_signal.setter
def audio_signal(self, input_audio_signal):
"""
When setting the AudioSignal object for a separation algorithm (which
can happen on initialization or later one), it is copied on set so
as to not alter the data within the original audio signal. If the
AudioSignal object has data, then it the function `_preprocess_audio_signal`
is run, which is implemented by the subclass.
Args:
input_audio_signal ([type]): [description]
"""
if not isinstance(input_audio_signal, AudioSignal):
raise ValueError('input_audio_signal is not an AudioSignal object!')
self._audio_signal = copy.deepcopy(input_audio_signal)
if self.audio_signal is not None:
if not self.audio_signal.has_data:
warnings.warn('input_audio_signal has no data!')
# initialize to empty arrays so that we don't crash randomly
self.audio_signal.audio_data = np.array([])
self.audio_signal.stft_data = np.array([[]])
else:
self._preprocess_audio_signal()
def interact(self, add_residual=False, source='upload', label=None,
ext='.wav', separate_fn=None, outputs="html",
inline=None, inbrowser=None, share=False, debug=False, auth=None,
**kwargs):
"""
Uses gradio to create a small interactive interface
for the separation algorithm. Fair warning, there
may be some race conditions with this...
When you call this from a notebook, the interface will be displayed
below the cell. When you call this from a regular Python script, you'll see a
link print out (a localhost link and a gradio link if you
called this with sharing on). The sessions will last for the duration
of the notebook or the script.
To use this functionality, you must install gradio: `pip install gradio`.
Args:
add_residual: Whether or not to add the residual signal.
source: Either "upload" (upload a file to separate), or "microphone", record.
label (str): Label of interface.
ext (str): Extension for audio file returned.
separate_fn (function): Function that takes in a file object and then returns a matching
element for audio_out.
outputs (str): Defaults to "html", the type of output interface for Gradio to display.
inline (bool): whether to display in the interface inline on python notebooks.
inbrowser (bool): whether to automatically launch the interface in a new tab on the default browser.
share (bool): whether to create a publicly shareable link from your computer for the interface.
debug (bool): if True, and the interface was launched from Google Colab, prints the errors in the cell output.
auth (Tuple[str, str]): If provided, username and password required to access interface.
kwargs: Keyword arguments to gradio.Interface.
Example:
>>> import nussl
>>> nussl.separation.primitive.HPSS(
>>> nussl.AudioSignal()).interact()
"""
try:
import gradio
except: # pragma: no cover
raise ImportError(
"To use this functionality, you must install gradio: "
"pip install gradio.")
def _separate(file_obj): # pragma: no cover
mix = AudioSignal(file_obj.name)
self.audio_signal = mix
estimates = self()
if add_residual:
estimates.append(mix - estimates[0])
estimates = {f'Estimate {i}': s for i, s in enumerate(estimates)}
html = play_utils.multitrack(estimates, ext=ext, display=False)
return html
if label is None: label = f"Separation via {type(self).__name__}"
audio_in = gradio.inputs.Audio(source=source, type="file", label=label)
if separate_fn is None:
separate_fn = _separate
gradio.Interface(
fn=separate_fn,
inputs=audio_in,
outputs=outputs,
**kwargs
).launch(
inline=inline,
inbrowser=inbrowser,
debug=debug,
auth=auth,
share=share
)
def run(self, *args, audio_signal=None, **kwargs):
"""
Runs separation algorithm.
Raises:
NotImplementedError: Cannot call base class
"""
raise NotImplementedError('Cannot call base class.')
def make_audio_signals(self):
"""
Makes :class:`audio_signal.AudioSignal` objects after separation algorithm is run
Raises:
NotImplementedError: Cannot call base class
"""
raise NotImplementedError('Cannot call base class.')
def get_metadata(self, to_str=False, **kwargs):
"""
Returns metadata associated with this separation algorithm.
Args:
to_str (bool): Whether to return the metadata as a string.
Returns:
Formatted metadata if `to_str` is True, else metadata dict.
Raises:
NotImplementedError: Cannot call base class
"""
raise NotImplementedError('Cannot call base class.')
def __call__(self, *args, audio_signal=None, **kwargs):
if audio_signal is not None:
self.audio_signal = audio_signal
self.run(*args, **kwargs)
return self.make_audio_signals()
def __repr__(self):
return f"{self.__class__.__name__} on {str(self.audio_signal)}"
def __eq__(self, other):
for k, v in list(self.__dict__.items()):
if isinstance(v, np.ndarray):
if not np.array_equal(v, other.__dict__[k]):
return False
elif v != other.__dict__[k]:
return False
return True
def __ne__(self, other):
return not self == other
class SeparationException(Exception):
pass
|
homeassistant/components/youless/const.py | MrDelik/core | 30,023 | 11093195 | """Constants for the youless integration."""
DOMAIN = "youless"
|
tests/e2e/scale/noobaa/test_scale_obc_creation_repsin_noobaa_pods.py | annagitel/ocs-ci | 130 | 11093203 | import logging
import pytest
from ocs_ci.ocs import constants, scale_noobaa_lib
from ocs_ci.framework.testlib import scale, E2ETest
from ocs_ci.ocs.resources.objectconfigfile import ObjectConfFile
from ocs_ci.framework.pytest_customization.marks import on_prem_platform_required
log = logging.getLogger(__name__)
@pytest.fixture(autouse=True)
def teardown(request):
def finalizer():
scale_noobaa_lib.cleanup(constants.OPENSHIFT_STORAGE_NAMESPACE)
request.addfinalizer(finalizer)
@scale
class TestScaleOCBCreation(E2ETest):
"""
OBC scale creation, creating up to max support number of OBCs.
OBCs are created in the Multicloud Object Gateway and
Ceph Object Gateway (RGW)
"""
namespace = constants.OPENSHIFT_STORAGE_NAMESPACE
scale_obc_count = 1000
# Will increase number of obc with i/o when issue is fixed
# BZ https://bugzilla.redhat.com/show_bug.cgi?id=2010560
scale_obc_count_io = 5
num_obc_batch = 50
@pytest.mark.parametrize(
argnames=["pod_name", "sc_name"],
argvalues=[
pytest.param(
*["noobaa-core", constants.NOOBAA_SC],
marks=[
pytest.mark.polarion_id("OCS-2645"),
],
),
pytest.param(
*["noobaa-db", constants.NOOBAA_SC],
marks=[
pytest.mark.polarion_id("OCS-2646"),
],
),
pytest.param(
*["noobaa-core", constants.DEFAULT_STORAGECLASS_RGW],
marks=[
on_prem_platform_required,
pytest.mark.polarion_id("OCS-2647"),
],
),
pytest.param(
*["noobaa-db", constants.DEFAULT_STORAGECLASS_RGW],
marks=[
on_prem_platform_required,
pytest.mark.polarion_id("OCS-2648"),
],
),
],
)
def test_scale_obc_creation_noobaa_pod_respin(
self, tmp_path, pod_name, sc_name, mcg_job_factory
):
"""
OBC creation using RGW storage class
This test case only runs on vSphere cluster deployment
"""
# Create OBCs with FIO running using mcg_job_factory()
for i in range(self.scale_obc_count_io):
exec(f"job{i} = mcg_job_factory()")
log.info(
f"Start creating {self.scale_obc_count} "
f"OBC in a batch of {self.num_obc_batch}"
)
for i in range(int(self.scale_obc_count / self.num_obc_batch)):
obc_dict_list = (
scale_noobaa_lib.construct_obc_creation_yaml_bulk_for_kube_job(
no_of_obc=self.num_obc_batch,
sc_name=sc_name,
namespace=self.namespace,
)
)
# Create job profile
job_file = ObjectConfFile(
name="job_profile",
obj_dict_list=obc_dict_list,
project=self.namespace,
tmp_path=tmp_path,
)
# Create kube_job
job_file.create(namespace=self.namespace)
# Check all the OBCs reached Bound state
obc_bound_list = (
scale_noobaa_lib.check_all_obc_reached_bound_state_in_kube_job(
kube_job_obj=job_file,
namespace=self.namespace,
no_of_obc=self.num_obc_batch,
)
)
log.info(f"Number of OBCs in Bound state: {len(obc_bound_list)}")
# Reset node which noobaa pods is running on
# And validate noobaa pods are re-spinned and in running state
scale_noobaa_lib.noobaa_running_node_restart(pod_name=pod_name)
# Verify all OBCs are in Bound state after node restart
log.info("Verify all OBCs are in Bound state after node restart.....")
obc_status_list = scale_noobaa_lib.check_all_obcs_status(
namespace=self.namespace
)
log.info(
f"Number of OBCs in Bound state after node reset: "
f"{len(obc_status_list)}"
)
assert (
len(obc_status_list) == self.scale_obc_count
), "Not all OBCs in Bound state"
|
tests/test_api/test_paste.py | cipherboy/modern-paste | 271 | 11093217 | <filename>tests/test_api/test_paste.py
# coding=utf-8
import json
import random
import time
import mock
from sqlalchemy.exc import SQLAlchemyError
import config
import constants.api
import database.attachment
import database.paste
import database.user
import util.cryptography
import util.testing
from uri.authentication import *
from uri.main import *
from uri.paste import *
class TestPaste(util.testing.DatabaseTestCase):
def test_submit_paste_invalid(self):
# Invalid input
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.INCOMPLETE_PARAMS_FAILURE_CODE)
self.assertEqual(json.loads(resp.data), constants.api.INCOMPLETE_PARAMS_FAILURE)
def test_submit_paste_login_required(self):
# Config requires authentication to post paste
config.REQUIRE_LOGIN_TO_PASTE = True
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'paste',
}),
content_type='application/json',
)
self.assertEqual(constants.api.UNAUTHENTICATED_PASTES_DISABLED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.UNAUTHENTICATED_PASTES_DISABLED_FAILURE, json.loads(resp.data))
user = util.testing.UserFactory.generate()
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'paste',
'api_key': user.api_key,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
def test_submit_paste_no_auth(self):
# Successful paste without authentication
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
resp_data = json.loads(resp.data)
self.assertIsNotNone(resp_data['post_time'])
self.assertIsNotNone(resp_data['paste_id_repr'])
self.assertTrue(resp_data['is_active'])
self.assertEquals('contents', resp_data['contents'])
self.assertIsNotNone(resp_data['deactivation_token'])
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
'user_id': 1,
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
resp_data = json.loads(resp.data)
self.assertIsNotNone(resp_data['post_time'])
self.assertIsNotNone(resp_data['paste_id_repr'])
self.assertTrue(resp_data['is_active'])
self.assertEquals('contents', resp_data['contents'])
self.assertIsNone(database.paste.get_paste_by_id(1).user_id)
def test_submit_paste_logged_in(self):
# Paste should automatically be associated with user who is logged in
user = util.testing.UserFactory.generate(username='username', password='password')
resp = self.client.post(
LoginUserURI.uri(),
data=json.dumps({
'username': 'username',
'password': 'password',
}),
content_type='application/json',
)
self.assertEquals(resp.status_code, constants.api.SUCCESS_CODE)
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
resp_data = json.loads(resp.data)
self.assertIsNotNone(resp_data['post_time'])
self.assertIsNotNone(resp_data['paste_id_repr'])
self.assertTrue(resp_data['is_active'])
self.assertEquals('contents', resp_data['contents'])
self.assertIsNotNone(resp_data['deactivation_token'])
self.assertEqual(user.user_id, database.paste.get_paste_by_id(util.cryptography.get_decid(resp_data['paste_id_repr'])).user_id)
def test_submit_paste_api_post(self):
# Ensure that the is_api_post flag is appropriately set
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
paste_id = util.cryptography.get_decid(json.loads(resp.data)['paste_id_repr'], force=True)
self.assertTrue(database.paste.get_paste_by_id(paste_id).is_api_post)
def test_submit_paste_non_api_post(self):
for referrer in [PastePostInterfaceURI.full_uri(), HomeURI.full_uri(), PastePostInterfaceURI.full_uri() + '/?extra=stuff']:
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
}),
content_type='application/json',
headers={
'referer': referrer, # TIL "referer" is a deliberate misspelling of "referrer"
},
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
paste_id = util.cryptography.get_decid(json.loads(resp.data)['paste_id_repr'], force=True)
self.assertFalse(database.paste.get_paste_by_id(paste_id).is_api_post)
def test_submit_paste_non_ascii(self):
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': '어머',
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': json.loads(resp.data)['paste_id_repr'],
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
self.assertEqual(json.loads(resp.data)['details']['contents'], unicode('어머', 'utf8'))
def test_submit_paste_attachments_disabled(self):
config.ENABLE_PASTE_ATTACHMENTS = False
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
'attachments': [
{
'name': 'file name',
'size': 12345,
'mime_type': 'image/png',
'data': 'binary data',
}
]
}),
content_type='application/json',
)
self.assertEqual(constants.api.PASTE_ATTACHMENTS_DISABLED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.PASTE_ATTACHMENTS_DISABLED_FAILURE, json.loads(resp.data))
def test_submit_paste_with_attachments(self):
with mock.patch.object(database.attachment, '_store_attachment_file') as mock_store_attachment_file:
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
'attachments': [
{
'name': 'file name',
'size': 12345,
'mime_type': 'image/png',
'data': 'binary data',
},
{
'name': 'file name 2',
'size': 12345,
'mime_type': 'image/png',
'data': 'binary data 2',
}
]
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual(2, mock_store_attachment_file.call_count)
resp_data = json.loads(resp.data)
self.assertEqual('file_name', resp_data['attachments'][0]['name'])
self.assertEqual(12345, resp_data['attachments'][0]['size'])
self.assertEqual('image/png', resp_data['attachments'][0]['mime_type'])
self.assertIsNotNone(database.attachment.get_attachment_by_name(
util.cryptography.get_decid(resp_data['paste_id_repr']),
'file_name')
)
self.assertEqual('file_name_2', resp_data['attachments'][1]['name'])
self.assertEqual(12345, resp_data['attachments'][1]['size'])
self.assertEqual('image/png', resp_data['attachments'][1]['mime_type'])
self.assertIsNotNone(database.attachment.get_attachment_by_name(
util.cryptography.get_decid(resp_data['paste_id_repr']),
'file_name_2')
)
def test_submit_paste_invalid_attachments(self):
with mock.patch.object(database.attachment, '_store_attachment_file') as mock_store_attachment_file:
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
'attachments': [
{
'name': 'file name',
}
]
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual(1, mock_store_attachment_file.call_count)
def test_submit_paste_too_large(self):
config.MAX_ATTACHMENT_SIZE = 10.0 / (1000 * 1000) # 10 B
with mock.patch.object(database.attachment, '_store_attachment_file'):
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
'attachments': [
{
'name': 'file name',
'size': 12345,
'mime_type': 'image/png',
'data': util.testing.random_alphanumeric_string(length=20),
},
]
}),
content_type='application/json',
)
self.assertEqual(constants.api.PASTE_ATTACHMENT_TOO_LARGE_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.PASTE_ATTACHMENT_TOO_LARGE_FAILURE, json.loads(resp.data))
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
'attachments': [
{
'name': 'file name',
'size': 12345,
'mime_type': 'image/png',
'data': util.testing.random_alphanumeric_string(length=5),
},
]
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
def test_submit_paste_base64_size_threshold(self):
config.MAX_ATTACHMENT_SIZE = 3.0 / (1000 * 1000) # 3 B
with mock.patch.object(database.attachment, '_store_attachment_file'):
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
'attachments': [
{
'name': 'file name',
'size': 12345,
'mime_type': 'image/png',
'data': util.testing.random_alphanumeric_string(length=5),
},
]
}),
content_type='application/json',
)
self.assertEqual(constants.api.PASTE_ATTACHMENT_TOO_LARGE_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.PASTE_ATTACHMENT_TOO_LARGE_FAILURE, json.loads(resp.data))
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
'attachments': [
{
'name': 'file name',
'size': 12345,
'mime_type': 'image/png',
'data': util.testing.random_alphanumeric_string(length=4),
},
]
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
def test_submit_paste_server_error(self):
with mock.patch.object(database.paste, 'create_new_paste', side_effect=SQLAlchemyError):
resp = self.client.post(
PasteSubmitURI.uri(),
data=json.dumps({
'contents': 'contents',
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.UNDEFINED_FAILURE_CODE)
self.assertEqual(json.loads(resp.data), constants.api.UNDEFINED_FAILURE)
def test_deactivate_paste_invalid(self):
resp = self.client.post(
PasteDeactivateURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.INCOMPLETE_PARAMS_FAILURE_CODE)
self.assertEqual(json.loads(resp.data), constants.api.INCOMPLETE_PARAMS_FAILURE)
resp = self.client.post(
PasteDeactivateURI.uri(),
data=json.dumps({
'paste_id': -1,
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.NONEXISTENT_PASTE_FAILURE_CODE)
self.assertEqual(json.loads(resp.data), constants.api.NONEXISTENT_PASTE_FAILURE)
def test_deactivate_paste_auth(self):
# Deactivate paste by being authenticated and owning the paste
user = util.testing.UserFactory.generate(username='username', password='password')
paste = util.testing.PasteFactory.generate(user_id=user.user_id)
resp = self.client.post(
PasteDeactivateURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.AUTH_FAILURE_CODE)
resp = self.client.post(
LoginUserURI.uri(),
data=json.dumps({
'username': 'username',
'password': 'password',
}),
content_type='application/json',
)
self.assertEquals(resp.status_code, constants.api.SUCCESS_CODE)
resp = self.client.post(
PasteDeactivateURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
self.assertFalse(database.paste.get_paste_by_id(paste.paste_id).is_active)
def test_deactivate_paste_api_key(self):
# Deactivate paste by authentication via an API key
user = util.testing.UserFactory.generate()
paste = util.testing.PasteFactory.generate(user_id=user.user_id)
resp = self.client.post(
PasteDeactivateURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'api_key': user.api_key,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertFalse(database.paste.get_paste_by_id(paste.paste_id).is_active)
def test_deactivate_paste_token(self):
# Deactivate paste using deactivation token
paste = util.testing.PasteFactory.generate()
resp = self.client.post(
PasteDeactivateURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'deactivation_token': 'invalid',
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.AUTH_FAILURE_CODE)
resp = self.client.post(
PasteDeactivateURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'deactivation_token': paste.deactivation_token,
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
self.assertFalse(database.paste.get_paste_by_id(paste.paste_id).is_active)
def test_deactivate_paste_already_deactivated(self):
# Deactivate paste using deactivation token
paste = util.testing.PasteFactory.generate()
database.paste.deactivate_paste(paste.paste_id)
resp = self.client.post(
PasteDeactivateURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'deactivation_token': paste.deactivation_token,
}),
content_type='application/json',
)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE, json.loads(resp.data))
def test_deactivate_paste_server_error(self):
with mock.patch.object(database.paste, 'deactivate_paste', side_effect=SQLAlchemyError):
paste = util.testing.PasteFactory.generate()
resp = self.client.post(
PasteDeactivateURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'deactivation_token': paste.deactivation_token,
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.UNDEFINED_FAILURE_CODE)
self.assertEqual(json.loads(resp.data), constants.api.UNDEFINED_FAILURE)
def test_set_paste_password(self):
user = util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
paste = util.testing.PasteFactory.generate(user_id=user.user_id)
old_password_hash = paste.password_hash
resp = self.client.post(
PasteSetPasswordURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertNotEqual(database.paste.get_paste_by_id(paste.paste_id).password_hash, old_password_hash)
def test_set_paste_password_unauth(self):
# Modifying your own paste without authorization
user = util.testing.UserFactory.generate(username='username', password='password')
paste = util.testing.PasteFactory.generate(user_id=user.user_id)
resp = self.client.post(
PasteSetPasswordURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.AUTH_FAILURE_CODE, resp.status_code)
self.assertEqual('auth_failure', json.loads(resp.data)[constants.api.FAILURE])
def test_set_paste_password_invalid_auth(self):
# Modifying someone else's paste
user = util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
paste = util.testing.PasteFactory.generate(user_id=user.user_id + 1)
resp = self.client.post(
PasteSetPasswordURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.AUTH_FAILURE_CODE, resp.status_code)
self.assertEqual('auth_failure', json.loads(resp.data)[constants.api.FAILURE])
def test_set_paste_password_nonexistent(self):
util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
resp = self.client.post(
PasteSetPasswordURI.uri(),
data=json.dumps({
'paste_id': -1,
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE, json.loads(resp.data))
def test_add_paste_password(self):
user = util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
paste = util.testing.PasteFactory.generate(user_id=user.user_id, password=None)
self.assertIsNone(paste.password_hash)
resp = self.client.post(
PasteSetPasswordURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertIsNotNone(database.paste.get_paste_by_id(paste.paste_id).password_hash)
def test_remove_paste_password(self):
user = util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
paste = util.testing.PasteFactory.generate(user_id=user.user_id, password='password')
self.assertIsNotNone(paste.password_hash)
resp = self.client.post(
PasteSetPasswordURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'password': <PASSWORD>,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertIsNone(database.paste.get_paste_by_id(paste.paste_id).password_hash)
def test_set_paste_password_server_error(self):
with mock.patch.object(database.paste, 'set_paste_password', side_effect=SQLAlchemyError):
user = util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
paste = util.testing.PasteFactory.generate(user_id=user.user_id)
resp = self.client.post(
PasteSetPasswordURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(constants.api.UNDEFINED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.UNDEFINED_FAILURE, json.loads(resp.data))
def test_paste_details_invalid(self):
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.INCOMPLETE_PARAMS_FAILURE_CODE)
self.assertEqual(json.loads(resp.data), constants.api.INCOMPLETE_PARAMS_FAILURE)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': -1,
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.NONEXISTENT_PASTE_FAILURE_CODE)
self.assertEqual(json.loads(resp.data), constants.api.NONEXISTENT_PASTE_FAILURE)
def test_paste_details_no_password(self):
user = util.testing.UserFactory.generate(username='username')
paste = util.testing.PasteFactory.generate(password=None, user_id=user.user_id)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
paste_details = database.paste.get_paste_by_id(paste.paste_id).as_dict()
paste_details['poster_username'] = 'username'
paste_details['attachments'] = []
self.assertEqual(paste_details, json.loads(resp.data)['details'])
def test_paste_details_password(self):
user = util.testing.UserFactory.generate(username='username')
paste = util.testing.PasteFactory.generate(password='<PASSWORD>', user_id=user.user_id)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.AUTH_FAILURE_CODE)
paste = util.testing.PasteFactory.generate(password='password', user_id=user.user_id)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.AUTH_FAILURE_CODE)
paste = util.testing.PasteFactory.generate(password='password', user_id=user.user_id)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'password': '<PASSWORD>',
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.AUTH_FAILURE_CODE)
paste = util.testing.PasteFactory.generate(password='password', user_id=user.user_id)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
'password': 'password',
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.SUCCESS_CODE)
paste_details = database.paste.get_paste_by_id(paste.paste_id).as_dict()
paste_details['poster_username'] = 'username'
paste_details['attachments'] = []
self.assertEqual(paste_details, json.loads(resp.data)['details'])
def test_paste_details_anonymous(self):
paste = util.testing.PasteFactory.generate(password=<PASSWORD>, user_id=None)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual('Anonymous', json.loads(resp.data)['details']['poster_username'])
user = util.testing.UserFactory.generate(username='username')
paste = util.testing.PasteFactory.generate(password=<PASSWORD>, user_id=user.user_id)
database.user.deactivate_user(user.user_id)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE, json.loads(resp.data))
def test_paste_details_nonexistent(self):
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(1),
}),
content_type='application/json',
)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE, json.loads(resp.data))
def test_paste_details_inactive(self):
paste = util.testing.PasteFactory.generate(password=<PASSWORD>, user_id=None)
database.paste.deactivate_paste(paste.paste_id)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE, json.loads(resp.data))
def test_paste_details_expired(self):
paste = util.testing.PasteFactory.generate(password=<PASSWORD>, user_id=None, expiry_time=int(time.time()) - 1000)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.NONEXISTENT_PASTE_FAILURE, json.loads(resp.data))
def test_paste_details_with_attachments(self):
paste = util.testing.PasteFactory.generate(password=<PASSWORD>, user_id=None)
attachments = [
util.testing.AttachmentFactory.generate(paste_id=paste.paste_id).as_dict()
for _ in range(5)
]
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual(5, len(json.loads(resp.data)['details']['attachments']))
for attachment in attachments:
self.assertIn(attachment, json.loads(resp.data)['details']['attachments'])
def test_paste_details_server_error(self):
with mock.patch.object(database.paste, 'get_paste_by_id', side_effect=SQLAlchemyError):
paste = util.testing.PasteFactory.generate(password=None)
resp = self.client.post(
PasteDetailsURI.uri(),
data=json.dumps({
'paste_id': util.cryptography.get_id_repr(paste.paste_id),
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.UNDEFINED_FAILURE_CODE)
self.assertEqual(json.loads(resp.data), constants.api.UNDEFINED_FAILURE)
def test_pastes_for_user_unauthorized(self):
resp = self.client.post(
PastesForUserURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.AUTH_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.AUTH_FAILURE, json.loads(resp.data))
def test_pastes_for_user_empty(self):
util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
resp = self.client.post(
PastesForUserURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual([], json.loads(resp.data)['pastes'])
def test_pastes_for_user_no_inactive(self):
user = util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
pastes = [util.testing.PasteFactory.generate(user_id=user.user_id).as_dict() for i in range(10)]
[database.paste.deactivate_paste(util.cryptography.get_decid(paste['paste_id_repr'], force=True)) for paste in pastes]
resp = self.client.post(
PastesForUserURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual(0, len(json.loads(resp.data)['pastes']))
def test_pastes_for_user_valid(self):
user = util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
pastes = [util.testing.PasteFactory.generate(user_id=user.user_id).as_dict() for i in range(10)]
resp = self.client.post(
PastesForUserURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual(len(pastes), len(json.loads(resp.data)['pastes']))
for paste in json.loads(resp.data)['pastes']:
self.assertIn(paste, pastes)
def test_pastes_for_user_server_error(self):
user = util.testing.UserFactory.generate(username='username', password='password')
self.api_login_user('username', 'password')
for i in range(3):
util.testing.PasteFactory.generate(user_id=user.user_id)
with mock.patch.object(database.paste, 'get_all_pastes_for_user', side_effect=SQLAlchemyError):
resp = self.client.post(
PastesForUserURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.UNDEFINED_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.UNDEFINED_FAILURE, json.loads(resp.data))
def test_recent_pastes_invalid(self):
resp = self.client.post(
RecentPastesURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE, json.loads(resp.data))
resp = self.client.post(
RecentPastesURI.uri(),
data=json.dumps({
'page_num': 0,
}),
content_type='application/json',
)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE, json.loads(resp.data))
def test_recent_pastes_no_results(self):
resp = self.client.post(
RecentPastesURI.uri(),
data=json.dumps({
'page_num': 0,
'num_per_page': 5,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual([], json.loads(resp.data)['pastes'])
resp = self.client.post(
RecentPastesURI.uri(),
data=json.dumps({
'page_num': 3,
'num_per_page': 5,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual([], json.loads(resp.data)['pastes'])
def test_recent_pastes_results(self):
pastes = []
for i in range(15):
with mock.patch.object(time, 'time', return_value=time.time() + random.randint(-10000, 10000)):
pastes.append(util.testing.PasteFactory.generate(expiry_time=None))
recent_pastes_sorted = map(
lambda paste: paste.as_dict(),
sorted(pastes, key=lambda paste: paste.post_time, reverse=True),
)
resp = self.client.post(
RecentPastesURI.uri(),
data=json.dumps({
'page_num': 0,
'num_per_page': 5,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual(recent_pastes_sorted[0:5], json.loads(resp.data)['pastes'])
def test_top_pastes_invalid(self):
resp = self.client.post(
TopPastesURI.uri(),
data=json.dumps({}),
content_type='application/json',
)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE, json.loads(resp.data))
resp = self.client.post(
TopPastesURI.uri(),
data=json.dumps({
'page_num': 0,
}),
content_type='application/json',
)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE_CODE, resp.status_code)
self.assertEqual(constants.api.INCOMPLETE_PARAMS_FAILURE, json.loads(resp.data))
def test_top_pastes_no_results(self):
resp = self.client.post(
TopPastesURI.uri(),
data=json.dumps({
'page_num': 0,
'num_per_page': 5,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual([], json.loads(resp.data)['pastes'])
resp = self.client.post(
TopPastesURI.uri(),
data=json.dumps({
'page_num': 3,
'num_per_page': 5,
}),
content_type='application/json',
)
self.assertEqual(constants.api.SUCCESS_CODE, resp.status_code)
self.assertEqual([], json.loads(resp.data)['pastes'])
def test_recent_pastes_server_error(self):
with mock.patch.object(database.paste, 'get_recent_pastes', side_effect=SQLAlchemyError):
resp = self.client.post(
RecentPastesURI.uri(),
data=json.dumps({
'page_num': 0,
'num_per_page': 5,
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.UNDEFINED_FAILURE_CODE)
self.assertEqual(json.loads(resp.data), constants.api.UNDEFINED_FAILURE)
def test_top_pastes_results(self):
pastes = [util.testing.PasteFactory.generate() for i in range(15)]
for paste in pastes:
for i in range(random.randint(0, 50)):
database.paste.increment_paste_views(paste.paste_id)
for page_num in range(3):
resp = self.client.post(
TopPastesURI.uri(),
data=json.dumps({
'page_num': page_num,
'num_per_page': 5,
}),
content_type='application/json',
)
self.assertEqual(5, len(json.loads(resp.data)['pastes']))
for i in range(4):
self.assertGreaterEqual(
json.loads(resp.data)['pastes'][i]['views'],
json.loads(resp.data)['pastes'][i + 1]['views']
)
resp = self.client.post(
TopPastesURI.uri(),
data=json.dumps({
'page_num': 3,
'num_per_page': 5,
}),
content_type='application/json',
)
self.assertEqual([], json.loads(resp.data)['pastes'])
def test_top_pastes_server_error(self):
with mock.patch.object(database.paste, 'get_top_pastes', side_effect=SQLAlchemyError):
resp = self.client.post(
TopPastesURI.uri(),
data=json.dumps({
'page_num': 0,
'num_per_page': 5,
}),
content_type='application/json',
)
self.assertEqual(resp.status_code, constants.api.UNDEFINED_FAILURE_CODE)
self.assertEqual(json.loads(resp.data), constants.api.UNDEFINED_FAILURE)
|
src/lime_python/__init__.py | mirlarof/lime-python | 400 | 11093227 | <reponame>mirlarof/lime-python
from .protocol import *
|
apex/pyprof/prof/base.py | oyj0594/apex | 6,523 | 11093254 | <reponame>oyj0594/apex<filename>apex/pyprof/prof/base.py
from abc import ABC, abstractmethod
class OperatorLayerBase(ABC):
"""
Base class for all layers and operators.
Every derived class should have the following functions.
"""
@abstractmethod
def tc(self):
"""
Tensor core usage by the kernel.
Return "1" (yes), "0" (no, but possible), "-" (not applicable)
"""
pass
@abstractmethod
def params(self):
"""
Kernel parameters to be printed.
"""
pass
@abstractmethod
def flops(self):
"""
Note that 1 FMA = 2 flops.
"""
pass
@abstractmethod
def bytes(self):
pass
@abstractmethod
def mod(self):
"""
Name of the module/class e.g. torch.nn.functional.
"""
pass
@abstractmethod
def op(self):
"""
Name of the operator e.g. sigmoid.
"""
pass
|
docx/enum/table.py | thebongy/MakeMyOutputs | 169 | 11093287 | # encoding: utf-8
"""
Enumerations related to tables in WordprocessingML files
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from .base import (
Enumeration, EnumMember, XmlEnumeration, XmlMappedEnumMember
)
class WD_TABLE_ALIGNMENT(XmlEnumeration):
"""
Specifies table justification type.
Example::
from docx.enum.table import WD_TABLE_ALIGNMENT
table = document.add_table(3, 3)
table.alignment = WD_TABLE_ALIGNMENT.CENTER
"""
__ms_name__ = 'WdRowAlignment'
__url__ = ' http://office.microsoft.com/en-us/word-help/HV080607259.aspx'
__members__ = (
XmlMappedEnumMember(
'LEFT', 0, 'left', 'Left-aligned'
),
XmlMappedEnumMember(
'CENTER', 1, 'center', 'Center-aligned.'
),
XmlMappedEnumMember(
'RIGHT', 2, 'right', 'Right-aligned.'
),
)
class WD_TABLE_DIRECTION(Enumeration):
"""
Specifies the direction in which an application orders cells in the
specified table or row.
Example::
from docx.enum.table import WD_TABLE_DIRECTION
table = document.add_table(3, 3)
table.direction = WD_TABLE_DIRECTION.RTL
"""
__ms_name__ = 'WdTableDirection'
__url__ = ' http://msdn.microsoft.com/en-us/library/ff835141.aspx'
__members__ = (
EnumMember(
'LTR', 0, 'The table or row is arranged with the first column '
'in the leftmost position.'
),
EnumMember(
'RTL', 1, 'The table or row is arranged with the first column '
'in the rightmost position.'
),
)
|
Python/interview-questions/rotateImage.py | saneravi/ML_Stuff | 209 | 11093303 | def rotateImage(a):
"""
>>> rotateImage([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
[[7, 4, 1], [8, 5, 2], [9, 6, 3]]
"""
return transpose(flip(a))
def flip(a):
"""
>>> flip([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
[[7, 8, 9], [4, 5, 6], [1, 2, 3]]
"""
# n = len(a)
# for x in range(n // 2):
# for y in range(n):
# a[n-x-1][y], a[x][y] = a[x][y], a[n-x-1][y]
return a[::-1]
def transpose(a):
"""
>>> transpose([[7, 8, 9], [4, 5, 6], [1, 2, 3]])
[[7, 4, 1], [8, 5, 2], [9, 6, 3]]
"""
n = len(a)
for x in range(n - 1):
for y in range(x + 1, n):
a[y][x], a[x][y] = a[x][y], a[y][x]
return a
if __name__ == "__main__":
import doctest
doctest.testmod()
|
senseact_mod/test/test_rtrl_base_env.py | homayoonfarrahi/cycle-time-study | 188 | 11093309 | <gh_stars>100-1000
# Copyright (c) 2018, The SenseAct Authors.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import numpy as np
import unittest
import time
import psutil
from multiprocessing import Process, Value, Array
from senseact import utils
from senseact.rtrl_base_env import RTRLBaseEnv
from senseact.communicator import Communicator
class MockCommunicator(Communicator):
"""
Basic barebone test communicator that can crash on demand.
"""
def __init__(self):
self._dt = 0.008
# shared variable that all processes will see
self.crash_flag = Value('i', 0)
sensor_args = {'array_len': 1, 'array_type': 'd', 'np_array_type': 'd'}
actuator_args = {'array_len': 1, 'array_type': 'd', 'np_array_type': 'd'}
super().__init__(use_sensor=True, use_actuator=True, sensor_args=sensor_args, actuator_args=actuator_args)
def _sensor_handler(self):
if self.crash_flag.value == 1:
raise Exception("Random sensor exception encountering")
self.sensor_buffer.write(0)
time.sleep(self._dt)
def _actuator_handler(self):
if self.crash_flag.value == 2:
raise Exception("Random actuator exception encountering")
if self.actuator_buffer.updated():
actuation, _, _ = self.actuator_buffer.read_update()
time.sleep(self._dt)
class MockEnv(RTRLBaseEnv):
"""
Basic barebone test environment that can crash on demand.
"""
def __init__(self, action_dim, observation_dim, **kwargs):
# shared variable that all processes will see
self.crash_flag = Value('i', 0)
self.reset_call_flag = Value('i', 0)
# Communicator Parameters
communicator_setups = {'generic1': {'Communicator': MockCommunicator,
'kwargs': {}},
'generic2': {'Communicator': MockCommunicator,
'kwargs': {}}
}
self._uniform_array_ = np.frombuffer(Array('d', 3).get_obj(), dtype=np.float64)
super().__init__(communicator_setups=communicator_setups,
action_dim=action_dim,
observation_dim=observation_dim,
**kwargs)
def _write_action(self, action):
if self.crash_flag.value == 3:
raise Exception("Write action crash triggered.")
super(MockEnv, self)._write_action(action)
def _compute_sensation_(self, name, sensor_window, timestamp_window, index_window):
if self.crash_flag.value == 1:
raise Exception("Compute sensation crash triggered.")
return [3,2,1]
def _compute_actuation_(self, action, timestamp, index):
if self.crash_flag.value == 2:
raise Exception("Compute actuation crash triggered.")
self._actuation_packet_['generic1'] = action
self._actuation_packet_['generic2'] = action
values = self._rand_obj_.uniform(-1, +1, 3)
rand_state_array_type, rand_state_array_size, rand_state_array = utils.get_random_state_array(
self._rand_obj_.get_state()
)
np.copyto(self._shared_rstate_array_, np.frombuffer(rand_state_array, dtype=rand_state_array_type))
np.copyto(self._uniform_array_, values)
def _reset_(self):
self.reset_call_flag.value = 1
class TestRTRLBaseEnv(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
def testInit(self):
env = RTRLBaseEnv({}, 2, 3)
self.assertFalse(env._running)
self.assertEqual(env._action_buffer.array_len, 2)
self.assertEqual(env._sensation_buffer.array_len, 5)
def testInitWithCommunicator(self):
env = RTRLBaseEnv({'generic': {'Communicator': MockCommunicator, 'kwargs': {}}}, 2, 3)
self.assertFalse(env._running)
self.assertEqual(len(env._all_comms), 1)
self.assertEqual(env._action_buffer.array_len, 2)
self.assertEqual(env._sensation_buffer.array_len, 5)
def testStartSingalthread(self):
env = RTRLBaseEnv({}, 2, 3, run_mode='singlethread')
env.start()
self.assertTrue(env._running)
env.close()
self.assertFalse(env._running)
def testStartMultithread(self):
env = RTRLBaseEnv({}, 2, 3, run_mode='multithread')
env.start()
self.assertTrue(env._running)
time.sleep(0.5)
self.assertTrue(env._polling_loop.is_alive())
env.close()
self.assertFalse(env._running)
self.assertFalse(env._polling_loop.is_alive())
def testStartMultiprocess(self):
env = RTRLBaseEnv({}, 2, 3, run_mode='multiprocess')
env.start()
self.assertTrue(env._running)
time.sleep(0.5)
self.assertTrue(env._polling_loop.is_alive())
env.close()
self.assertFalse(env._running)
self.assertFalse(env._polling_loop.is_alive())
def testNotImplementedError(self):
env = RTRLBaseEnv({}, 2, 3, run_mode='singlethread')
env.start()
with self.assertRaises(NotImplementedError):
env.step(0)
env.close()
def testStartWithCommunicator(self):
env = RTRLBaseEnv({'generic': {'Communicator': MockCommunicator, 'kwargs': {}}}, 2, 3, run_mode='singlethread')
env.start()
time.sleep(0.5)
self.assertTrue(env._running)
self.assertTrue(env._all_comms['generic'].is_alive())
env.close()
self.assertFalse(env._running)
self.assertFalse(env._all_comms['generic'].is_alive())
def testStepWithSinglethread(self):
env = MockEnv(1, 1, run_mode='singlethread')
env.start()
time.sleep(0.5)
obs, reward, done, info = env.step(0)
self.assertEqual(obs, [3])
self.assertEqual(reward, 2)
self.assertEqual(done, 1)
env.close()
def testStepWithMultithread(self):
env = MockEnv(1, 1, run_mode='multithread')
env.start()
time.sleep(0.5)
obs, reward, done, info = env.step(0)
self.assertEqual(obs, [3])
self.assertEqual(reward, 2)
self.assertEqual(done, 1)
env.close()
def testStepWithMultiprocess(self):
env = MockEnv(1, 1, run_mode='multiprocess')
env.start()
time.sleep(0.5)
obs, reward, done, info = env.step(0)
self.assertEqual(obs, [3])
self.assertEqual(reward, 2)
self.assertEqual(done, 1)
env.close()
def testResetSinglethread(self):
env = MockEnv(1, 1, run_mode='singlethread')
env.start()
time.sleep(0.5)
obs = env.reset()
self.assertEqual(obs, [3])
self.assertEqual(env.reset_call_flag.value, 1)
env.close()
def testResetMultithreadBlocking(self):
env = MockEnv(1, 1, run_mode='multithread')
env.start()
time.sleep(0.5)
obs = env.reset()
self.assertEqual(obs, [3])
self.assertEqual(env.reset_call_flag.value, 1)
env.close()
def testResetMultithreadNonblocking(self):
env = MockEnv(1, 1, run_mode='multithread')
env.start()
time.sleep(0.5)
obs = env.reset(blocking=False)
time.sleep(0.5)
self.assertEqual(obs, [3])
self.assertEqual(env.reset_call_flag.value, 1)
env.close()
def testResetMultiprocessBlocking(self):
env = MockEnv(1, 1, run_mode='multiprocess')
env.start()
time.sleep(0.5)
obs = env.reset()
self.assertEqual(obs, [3])
self.assertEqual(env.reset_call_flag.value, 1)
env.close()
def testResetMultiprocessNonblocking(self):
env = MockEnv(1, 1, run_mode='multiprocess')
env.start()
time.sleep(0.5)
obs = env.reset(blocking=False)
time.sleep(0.5)
self.assertEqual(obs, [3])
self.assertEqual(env.reset_call_flag.value, 1)
env.close()
def testSinglethreadCommNotAlive(self):
self._env = MockEnv(action_dim=1, observation_dim=1, run_mode='singlethread')
self._env.start()
self._env.step(0)
# set the communicator flag to 1, wait a few time steps and check that step will crash
self._env._all_comms['generic1'].crash_flag.value = 1
time.sleep(1.5)
with self.assertRaises(Exception):
self._env.step(0)
# give some time for process to completely close
time.sleep(1.5)
# check all communicators has been closed
for comm in self._env._all_comms.values():
self.assertFalse(comm.is_alive())
self._env.close()
def testMultithreadCommNotAlive(self):
self._env = MockEnv(action_dim=1, observation_dim=1, run_mode='multithread')
self._env.start()
self._env.step(0)
# set the communicator flag to 1, wait a few time steps and check that step will crash
self._env._all_comms['generic1'].crash_flag.value = 1
time.sleep(1.5)
with self.assertRaises(Exception):
self._env.step(0)
# give some time for process to completely close
time.sleep(1.5)
# check all communicators has been closed
for comm in self._env._all_comms.values():
self.assertFalse(comm.is_alive())
# check the polling thread has been closed
self.assertFalse(self._env._polling_loop.is_alive())
self._env.close()
def testMultiprocessCommNotAlive(self):
self._env = MockEnv(action_dim=1, observation_dim=1, run_mode='multiprocess')
self._env.start()
self._env.step(0)
# set the communicator flag to 1, wait a few time steps and check that step will crash
self._env._all_comms['generic1'].crash_flag.value = 1
time.sleep(1.5)
with self.assertRaises(Exception):
self._env.step(0)
# give some time for process to completely close
time.sleep(1.5)
# check all communicators has been closed
for comm in self._env._all_comms.values():
self.assertFalse(comm.is_alive())
# check the polling thread has been closed
self.assertFalse(self._env._polling_loop.is_alive())
self._env.close()
def testMultithreadPollingDead(self):
self._env = MockEnv(action_dim=1, observation_dim=1, run_mode='multithread')
self._env.start()
self._env.step(0)
self._env.crash_flag.value = 1
time.sleep(1.5)
with self.assertRaises(Exception):
self._env.step(0)
# give some time for process to completely close
time.sleep(1.5)
# check all communicators has been closed
for comm in self._env._all_comms.values():
self.assertFalse(comm.is_alive())
# check the polling thread has been closed
self.assertFalse(self._env._polling_loop.is_alive())
self._env.close()
def testMultiprocessPollingDead(self):
self._env = MockEnv(action_dim=1, observation_dim=1, run_mode='multiprocess')
self._env.start()
self._env.step(0)
self._env.crash_flag.value = 1
time.sleep(1.5)
with self.assertRaises(Exception):
self._env.step(0)
# give some time for process to completely close
time.sleep(1.5)
# check all communicators has been closed
for comm in self._env._all_comms.values():
self.assertFalse(comm.is_alive())
# check the polling thread has been closed
self.assertFalse(self._env._polling_loop.is_alive())
self._env.close()
def testSinglethreadMainProcessDead(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='singlethread')
env.start()
while True:
env.step(0)
curr_process_util = psutil.Process()
main_process = Process(target=spawn_main_process)
main_process.start()
# give some time to make sure everything running
time.sleep(1.0)
child_processes = curr_process_util.children(recursive=True)
self.assertEqual(len(child_processes), 3)
main_process.terminate()
main_process.join()
time.sleep(2.0)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testMultithreadMainProcessDead(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='multithread')
env.start()
while True:
env.step(0)
curr_process_util = psutil.Process()
main_process = Process(target=spawn_main_process)
main_process.start()
# give some time to make sure everything running
time.sleep(1.0)
child_processes = curr_process_util.children(recursive=True)
self.assertEqual(len(child_processes), 3)
main_process.terminate()
main_process.join()
time.sleep(2.0)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testMultiprocessMainProcessDead(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='multiprocess')
env.start()
while True:
env.step(0)
curr_process_util = psutil.Process()
main_process = Process(target=spawn_main_process)
main_process.start()
# give some time to make sure everything running
time.sleep(1.0)
child_processes = curr_process_util.children(recursive=True)
self.assertEqual(len(child_processes), 4)
main_process.terminate()
main_process.join()
time.sleep(2.0)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testSinglethreadMainProcessException(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='singlethread')
env.start()
env.crash_flag.value = 3
env.step(0)
main_process = Process(target=spawn_main_process)
main_process.start()
time.sleep(1.0)
main_process.join()
curr_process_util = psutil.Process()
child_processes = curr_process_util.children(recursive=True)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testMultithreadMainProcessException(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='multithread')
env.start()
env.crash_flag.value = 3
env.step(0)
main_process = Process(target=spawn_main_process)
main_process.start()
time.sleep(1.0)
main_process.join()
curr_process_util = psutil.Process()
child_processes = curr_process_util.children(recursive=True)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testMultiprocessMainProcessException(self):
def spawn_main_process():
env = MockEnv(action_dim=1, observation_dim=1, run_mode='multiprocess')
env.start()
env.crash_flag.value = 3
env.step(0)
main_process = Process(target=spawn_main_process)
main_process.start()
time.sleep(1.0)
main_process.join()
curr_process_util = psutil.Process()
child_processes = curr_process_util.children(recursive=True)
self.assertFalse(any([c.is_running() for c in child_processes]))
def testSharedRandomState(self):
env = MockEnv(action_dim=1, observation_dim=1, run_mode='multiprocess')
initial_rand_obj = copy.deepcopy(env._rand_obj_)
initial_values = initial_rand_obj.uniform(-1, +1, 3)
env.start()
for _ in range(3):
env.step(0)
updated_rand_obj = np.random.RandomState()
updated_rand_obj.set_state(utils.get_random_state_from_array(env._shared_rstate_array_))
safe_final_values = updated_rand_obj.uniform(-1, +1, 3)
unsafe_final_values = env._rand_obj_.uniform(-1, +1, 3)
env.step(0)
env.close()
assert np.all(initial_values == unsafe_final_values)
assert np.all(initial_values != safe_final_values)
assert np.all(safe_final_values == env._uniform_array_)
if __name__ == '__main__':
unittest.main(buffer=True)
|
Validation/HGCalValidation/python/simhitValidation_cff.py | ckamtsikis/cmssw | 852 | 11093371 | import FWCore.ParameterSet.Config as cms
from Validation.HGCalValidation.hgcalSimHitValidationEE_cfi import *
from Configuration.ProcessModifiers.dd4hep_cff import dd4hep
dd4hep.toModify( hgcalSimHitValidationEE, fromDDD = False )
hgcalSimHitValidationHEF = hgcalSimHitValidationEE.clone(
DetectorName = cms.string("HGCalHESiliconSensitive"),
CaloHitSource = cms.string("HGCHitsHEfront"))
hgcalSimHitValidationHEB = hgcalSimHitValidationEE.clone(
DetectorName = cms.string("HGCalHEScintillatorSensitive"),
CaloHitSource = cms.string("HGCHitsHEback"),
)
|
deepscm/datasets/morphomnist/__init__.py | mobarakol/deepscm | 183 | 11093375 | import os
from typing import Tuple
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from deepscm.morphomnist import io
def _get_paths(root_dir, train):
prefix = "train" if train else "t10k"
images_filename = prefix + "-images-idx3-ubyte.gz"
labels_filename = prefix + "-labels-idx1-ubyte.gz"
metrics_filename = prefix + "-morpho.csv"
images_path = os.path.join(root_dir, images_filename)
labels_path = os.path.join(root_dir, labels_filename)
metrics_path = os.path.join(root_dir, metrics_filename)
return images_path, labels_path, metrics_path
def load_morphomnist_like(root_dir, train: bool = True, columns=None) \
-> Tuple[np.ndarray, np.ndarray, pd.DataFrame]:
"""
Args:
root_dir: path to data directory
train: whether to load the training subset (``True``, ``'train-*'`` files) or the test
subset (``False``, ``'t10k-*'`` files)
columns: list of morphometrics to load; by default (``None``) loads the image index and
all available metrics: area, length, thickness, slant, width, and height
Returns:
images, labels, metrics
"""
images_path, labels_path, metrics_path = _get_paths(root_dir, train)
images = io.load_idx(images_path)
labels = io.load_idx(labels_path)
if columns is not None and 'index' not in columns:
usecols = ['index'] + list(columns)
else:
usecols = columns
metrics = pd.read_csv(metrics_path, usecols=usecols, index_col='index')
return images, labels, metrics
def save_morphomnist_like(images: np.ndarray, labels: np.ndarray, metrics: pd.DataFrame,
root_dir, train: bool):
"""
Args:
images: array of MNIST-like images
labels: array of class labels
metrics: data frame of morphometrics
root_dir: path to the target data directory
train: whether to save as the training subset (``True``, ``'train-*'`` files) or the test
subset (``False``, ``'t10k-*'`` files)
"""
assert len(images) == len(labels)
assert len(images) == len(metrics)
images_path, labels_path, metrics_path = _get_paths(root_dir, train)
os.makedirs(root_dir, exist_ok=True)
io.save_idx(images, images_path)
io.save_idx(labels, labels_path)
metrics.to_csv(metrics_path, index_label='index')
class MorphoMNISTLike(Dataset):
def __init__(self, root_dir, train: bool = True, columns=None):
"""
Args:
root_dir: path to data directory
train: whether to load the training subset (``True``, ``'train-*'`` files) or the test
subset (``False``, ``'t10k-*'`` files)
columns: list of morphometrics to load; by default (``None``) loads the image index and
all available metrics: area, length, thickness, slant, width, and height
"""
self.root_dir = root_dir
self.train = train
images, labels, metrics_df = load_morphomnist_like(root_dir, train, columns)
self.images = torch.as_tensor(images)
self.labels = torch.as_tensor(labels)
if columns is None:
columns = metrics_df.columns
self.metrics = {col: torch.as_tensor(metrics_df[col]) for col in columns}
self.columns = columns
assert len(self.images) == len(self.labels) and len(self.images) == len(metrics_df)
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
item = {col: values[idx] for col, values in self.metrics.items()}
item['image'] = self.images[idx]
item['label'] = self.labels[idx]
return item
if __name__ == '__main__':
from torch.utils.data import DataLoader
# Example usage
dataset = MorphoMNISTLike(root_dir="/vol/biomedic/users/dc315/mnist/original",
columns=['slant', 'thickness'], train=False)
print(dataset.columns)
data_loader = DataLoader(dataset, batch_size=10, shuffle=True)
for batch in data_loader:
print(batch)
break
|
dbReports/iondb/product_integration/tests/ThermoFisherCloudAccountTests.py | konradotto/TS | 125 | 11093380 | <filename>dbReports/iondb/product_integration/tests/ThermoFisherCloudAccountTests.py
# Copyright (C) 2017 Ion Torrent Systems, Inc. All Rights Reserved
from django.test import TestCase
from iondb.product_integration.models import ThermoFisherCloudAccount
class ThermoFisherCloudAccountTest(TestCase):
"""This will test the thermo fisher cloud account model"""
def test_init(self):
"""Tests the constructor"""
ThermoFisherCloudAccount("MyUserName")
|
tf_agents/utils/xla.py | Francis777/agents | 3,175 | 11093413 | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XLA utilities for TF-Agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
# Dictionary mapping a device name to a python bool.
_IS_XLA_AVAILABLE = {}
def is_xla_available():
"""Is XLA compilation available for the current device context?"""
global _IS_XLA_AVAILABLE
# There's unfortunately no cleaner way to get the device other than creating a
# new op and querying it.
with tf.name_scope("is_xla_available"):
device = tf.constant(0.0).device
if device not in _IS_XLA_AVAILABLE:
try:
# Take ourselves outside of any tf.function calls.
with tf.init_scope():
# Create temporary xla subgraph
with tf.compat.v1.Graph().as_default():
# We'll use a session so we can be compatible with both TF1 and TF2
with tf.compat.v1.Session() as sess:
# Check for XLA on the given device.
with tf.device(device):
sess.run(tf.xla.experimental.compile(lambda: tf.constant(0.0)))
except (ValueError, tf.errors.InvalidArgumentError):
_IS_XLA_AVAILABLE[device] = False
else:
_IS_XLA_AVAILABLE[device] = True
return _IS_XLA_AVAILABLE[device]
def compile_in_graph_mode(fn):
"""Decorator for XLA compilation iff in graph mode and XLA is available.
Example:
```python
@compile_in_graph_mode
def fn(x, y, z):
return {'a': x + y, 'b': y * z}
@common.function
def calls_fn(inputs):
return fn(inputs.x, inputs.y, inputs.z)
# Call calls_fn().
Args:
fn: A callable that accepts a list of possibly nested tensor arguments.
kwargs and inputs taking the value `None` are not supported. Non-tensor
arguments are treated as nest objects, and leaves are converted to
tensors.
Returns:
A function that, when called, checks if XLA is compiled in and enabled
for the current device, and that it's being built in graph mode, and
returns an XLA-compiled version of `fn`. If in eager mode, or XLA
is not available, then `fn` is called directly.
```
"""
@functools.wraps(fn)
def _compiled(*args, **kwargs):
"""Helper function for optionally XLA compiling `fn`."""
if kwargs:
raise ValueError(
"kwargs are not supported for functions that are XLA-compiled, "
"but saw kwargs: {}".format(kwargs))
args = tf.nest.map_structure(tf.convert_to_tensor, args)
if tf.compat.v1.executing_eagerly() or not is_xla_available():
return fn(*args)
else:
# The flattening/unpacking is necessary because xla compile only allows
# flat inputs and outputs: no substructures. But we provide support for
# nested inputs and outputs.
outputs_for_structure = [None]
flat_args = tf.nest.flatten(args)
def _fn(*flattened_args):
unflattened_args = tf.nest.pack_sequence_as(args, flattened_args)
fn_outputs = fn(*unflattened_args)
outputs_for_structure[0] = fn_outputs
return tf.nest.flatten(fn_outputs)
outputs = tf.xla.experimental.compile(_fn, flat_args)
return tf.nest.pack_sequence_as(outputs_for_structure[0], outputs)
return _compiled
|
alipay/aop/api/response/AlipayDataDataexchangeSfasdfResponse.py | snowxmas/alipay-sdk-python-all | 213 | 11093435 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.AlipayItemGoodsList import AlipayItemGoodsList
class AlipayDataDataexchangeSfasdfResponse(AlipayResponse):
def __init__(self):
super(AlipayDataDataexchangeSfasdfResponse, self).__init__()
self._azxfghd = None
self._gfdhsdasafg = None
self._gfhjfdsa = None
self._ghjfdsafgh = None
self._sdvsdv = None
self._wrty = None
@property
def azxfghd(self):
return self._azxfghd
@azxfghd.setter
def azxfghd(self, value):
if isinstance(value, list):
self._azxfghd = list()
for i in value:
self._azxfghd.append(i)
@property
def gfdhsdasafg(self):
return self._gfdhsdasafg
@gfdhsdasafg.setter
def gfdhsdasafg(self, value):
if isinstance(value, list):
self._gfdhsdasafg = list()
for i in value:
self._gfdhsdasafg.append(i)
@property
def gfhjfdsa(self):
return self._gfhjfdsa
@gfhjfdsa.setter
def gfhjfdsa(self, value):
if isinstance(value, list):
self._gfhjfdsa = list()
for i in value:
self._gfhjfdsa.append(i)
@property
def ghjfdsafgh(self):
return self._ghjfdsafgh
@ghjfdsafgh.setter
def ghjfdsafgh(self, value):
if isinstance(value, list):
self._ghjfdsafgh = list()
for i in value:
self._ghjfdsafgh.append(i)
@property
def sdvsdv(self):
return self._sdvsdv
@sdvsdv.setter
def sdvsdv(self, value):
self._sdvsdv = value
@property
def wrty(self):
return self._wrty
@wrty.setter
def wrty(self, value):
if isinstance(value, list):
self._wrty = list()
for i in value:
if isinstance(i, AlipayItemGoodsList):
self._wrty.append(i)
else:
self._wrty.append(AlipayItemGoodsList.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayDataDataexchangeSfasdfResponse, self).parse_response_content(response_content)
if 'azxfghd' in response:
self.azxfghd = response['azxfghd']
if 'gfdhsdasafg' in response:
self.gfdhsdasafg = response['gfdhsdasafg']
if 'gfhjfdsa' in response:
self.gfhjfdsa = response['gfhjfdsa']
if 'ghjfdsafgh' in response:
self.ghjfdsafgh = response['ghjfdsafgh']
if 'sdvsdv' in response:
self.sdvsdv = response['sdvsdv']
if 'wrty' in response:
self.wrty = response['wrty']
|
exadata/exastat.py | nenadnoveljic/tpt-oracle | 483 | 11093502 | #!/usr/bin/env python
################################################################################
##
## File name: exastat.py (v1.0)
## Purpose: Show cumulative exadata metrics from CELLCLI and their deltas
## in multicolumn format
##
## Author: <NAME> ( <EMAIL> | @tanelpoder | blog.tanelpoder.com )
## Copyright: <NAME>. All Rights Reserved.
##
## Usage: Save LIST METRICHISTORY into a file or pipe directly to exastat
##
## Example: cellcli -e "LIST METRICHISTORY WHERE name LIKE 'FL_.*' AND collectionTime > '"`date --date \
## '1 day ago' "+%Y-%m-%dT%H:%M:%S%:z"`"'" | ./exastat FL_DISK_FIRST FL_FLASH_FIRST
##
## The above example lists you two metrics FL_DISK_FIRST and FL_FLASH_FIRST in columnar format
## You can list any number of metrics (you're not restricted to only two)
##
##
################################################################################
import fileinput, re, datetime, time, sys
DEBUG=False
rawmetrics = {} # main metric array
errors = [] # unparsable lines
timestamps = []
cell_pattern = re.compile(r"^\s*(?P<name>\w+)\s+(?P<obj>\w+)\s+(?P<value>[\w,]+)\s(?P<unit>.*)\s+(?P<timestamp>.{25})$")
def extract_metric_value(s, pattern):
match = pattern.match(s)
if match:
name = match.group("name").strip()
obj = match.group("obj").strip()
value = int(match.group("value").strip().replace(',',''))
unit = match.group("unit").strip()
timestamp = datetime.datetime.fromtimestamp(time.mktime(time.strptime(match.group("timestamp").strip()[:-6], "%Y-%m-%dT%H:%M:%S")))
return {"METRIC_NAME":name, "METRIC_OBJECT":obj, "METRIC_VALUE":value, "METRIC_UNIT":unit, "TIMESTAMP":timestamp}
def get_timestamps(m):
t = []
for i in (key for key in sorted(m.keys(), key=lambda x: x[1])):
if not t.__contains__(i[1]):
t.append( i[1] )
return t
def get_ordered_metric_values(m, metric_name):
r = []
for i in (key for key in sorted(m.keys(), key=lambda x: x[1]) if key[0]==metric_name):
if DEBUG: print "key = %s value = %s" % (i, m[i])
r.append({ "METRIC_NAME":i[0], "TIMESTAMP":i[1], "METRIC_OBJECT":i[2], "METRIC_VALUE":m[i]["METRIC_VALUE"], "METRIC_UNIT":m[i]["METRIC_UNIT"] })
return r
def get_delta_metric_values(m, metric_name):
r = {}
prev_metric_value = None
# requires ordered input
for i in (key for key in (get_ordered_metric_values(m, metric_name))):
if prev_metric_value:
if DEBUG: print "%s delta %s = %s (%s - %s)" % ( i["TIMESTAMP"], i["METRIC_NAME"], i["METRIC_VALUE"] - prev_metric_value, i["METRIC_VALUE"], prev_metric_value )
r[i["TIMESTAMP"]] = ( i["TIMESTAMP"], i["METRIC_NAME"], i["METRIC_VALUE"] - prev_metric_value, i["METRIC_VALUE"], prev_metric_value )
prev_metric_value = i["METRIC_VALUE"]
else:
prev_metric_value = i["METRIC_VALUE"]
return r
# main()
metric_list = sys.argv[1:]
for line in sys.stdin.readlines():
e = extract_metric_value(line, cell_pattern)
if e:
if e["METRIC_NAME"] in metric_list:
rawmetrics[e["METRIC_NAME"], e["TIMESTAMP"], e["METRIC_OBJECT"]] = { "METRIC_VALUE":e["METRIC_VALUE"], "METRIC_UNIT":e["METRIC_UNIT"] }
else:
errors.append(line)
if DEBUG: print "len(rawmetrics) = %s len(errors) = %s" % (len(rawmetrics), len(errors))
m = {}
for mn in metric_list:
m[mn] = get_delta_metric_values(rawmetrics, mn)
timestamps = get_timestamps(rawmetrics)
if DEBUG: print timestamps.pop(0) # 0-th sample doesn't have delta
output_header = ("%-26s %10s" % ("TIMESTAMP", "SECONDS"))
output_separator = "%-26s %10s" % ("-" * 26, "-" * 10)
for x in metric_list:
output_header += ("%" + str(len(x)+1) +"s") % x
output_separator += ' ' + '-' * len(x)
print ""
print output_header
print output_separator
prev_ts = None
for ts in iter(timestamps):
if prev_ts:
out = "%-26s %10s" % (ts, (ts - prev_ts).seconds)
prev_ts = ts
else:
out = "%-26s %10s" % (ts, "")
prev_ts = ts
for mn in metric_list:
if ts in m[mn]:
v = m[mn][ts][2]
else:
v = 0
out += (" %"+str(len(mn)) +"d") % v
print out
print ""
|
mistral/tests/unit/engine/test_task_pause_resume.py | soda-research/mistral | 205 | 11093504 | # Copyright 2015 - StackStorm, Inc.
# Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.db.v2 import api as db_api
from mistral.services import workflows as wf_service
from mistral.tests.unit.engine import base
from mistral.workflow import states
from mistral_lib import actions as ml_actions
class TaskPauseResumeTest(base.EngineTestCase):
def test_pause_resume_action_ex(self):
workflow = """
version: '2.0'
wf:
tasks:
task1:
action: std.async_noop
on-success:
- task2
task2:
action: std.noop
"""
wf_service.create_workflows(workflow)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_state(wf_ex.id, states.RUNNING)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
wf_ex = self._assert_single_item(wf_execs, name='wf')
task_execs = wf_ex.task_executions
task_1_ex = self._assert_single_item(
wf_ex.task_executions,
name='task1'
)
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id
)
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertEqual(1, len(task_execs))
self.assertEqual(states.RUNNING, task_1_ex.state)
self.assertEqual(1, len(task_1_action_exs))
self.assertEqual(states.RUNNING, task_1_action_exs[0].state)
# Pause the action execution of task 1.
self.engine.on_action_update(task_1_action_exs[0].id, states.PAUSED)
self.await_task_paused(task_1_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_1_ex = self._assert_single_item(
wf_ex.task_executions,
name='task1'
)
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id
)
self.assertEqual(states.PAUSED, wf_ex.state)
self.assertEqual(1, len(task_execs))
self.assertEqual(states.PAUSED, task_1_ex.state)
self.assertEqual(1, len(task_1_action_exs))
self.assertEqual(states.PAUSED, task_1_action_exs[0].state)
# Resume the action execution of task 1.
self.engine.on_action_update(task_1_action_exs[0].id, states.RUNNING)
self.await_task_running(task_1_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_1_ex = self._assert_single_item(
wf_ex.task_executions,
name='task1'
)
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id
)
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertEqual(1, len(task_execs))
self.assertEqual(states.RUNNING, task_1_ex.state)
self.assertEqual(1, len(task_1_action_exs))
self.assertEqual(states.RUNNING, task_1_action_exs[0].state)
# Complete action execution of task 1.
self.engine.on_action_complete(
task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
# Wait for the workflow execution to complete.
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_1_ex = self._assert_single_item(task_execs, name='task1')
task_1_action_exs = db_api.get_action_executions(
task_execution_id=task_1_ex.id
)
task_2_ex = self._assert_single_item(task_execs, name='task2')
self.assertEqual(states.SUCCESS, wf_ex.state)
self.assertEqual(2, len(task_execs))
self.assertEqual(states.SUCCESS, task_1_ex.state)
self.assertEqual(1, len(task_1_action_exs))
self.assertEqual(states.SUCCESS, task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, task_2_ex.state)
def test_pause_resume_action_ex_with_items_task(self):
workflow = """
version: '2.0'
wf:
tasks:
task1:
with-items: i in <% range(3) %>
action: std.async_noop
on-success:
- task2
task2:
action: std.noop
"""
wf_service.create_workflows(workflow)
wf_ex = self.engine.start_workflow('wf')
self.await_workflow_state(wf_ex.id, states.RUNNING)
with db_api.transaction():
wf_execs = db_api.get_workflow_executions()
wf_ex = self._assert_single_item(wf_execs, name='wf')
task_execs = wf_ex.task_executions
task_1_ex = self._assert_single_item(
wf_ex.task_executions,
name='task1'
)
task_1_action_exs = sorted(
db_api.get_action_executions(task_execution_id=task_1_ex.id),
key=lambda x: x['runtime_context']['index']
)
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertEqual(1, len(task_execs))
self.assertEqual(states.RUNNING, task_1_ex.state)
self.assertEqual(3, len(task_1_action_exs))
self.assertEqual(states.RUNNING, task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, task_1_action_exs[1].state)
self.assertEqual(states.RUNNING, task_1_action_exs[2].state)
# Pause the 1st action execution of task 1.
self.engine.on_action_update(task_1_action_exs[0].id, states.PAUSED)
self.await_task_paused(task_1_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_1_ex = self._assert_single_item(
wf_ex.task_executions,
name='task1'
)
task_1_action_exs = sorted(
db_api.get_action_executions(task_execution_id=task_1_ex.id),
key=lambda x: x['runtime_context']['index']
)
self.assertEqual(states.PAUSED, wf_ex.state)
self.assertEqual(1, len(task_execs))
self.assertEqual(states.PAUSED, task_1_ex.state)
self.assertEqual(3, len(task_1_action_exs))
self.assertEqual(states.PAUSED, task_1_action_exs[0].state)
self.assertEqual(states.RUNNING, task_1_action_exs[1].state)
self.assertEqual(states.RUNNING, task_1_action_exs[2].state)
# Complete 2nd and 3rd action executions of task 1.
self.engine.on_action_complete(
task_1_action_exs[1].id,
ml_actions.Result(data={'result': 'two'})
)
self.engine.on_action_complete(
task_1_action_exs[2].id,
ml_actions.Result(data={'result': 'three'})
)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_1_ex = self._assert_single_item(
wf_ex.task_executions,
name='task1'
)
task_1_action_exs = sorted(
db_api.get_action_executions(task_execution_id=task_1_ex.id),
key=lambda x: x['runtime_context']['index']
)
self.assertEqual(states.PAUSED, wf_ex.state)
self.assertEqual(1, len(task_execs))
self.assertEqual(states.PAUSED, task_1_ex.state)
self.assertEqual(3, len(task_1_action_exs))
self.assertEqual(states.PAUSED, task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, task_1_action_exs[1].state)
self.assertEqual(states.SUCCESS, task_1_action_exs[2].state)
# Resume the 1st action execution of task 1.
self.engine.on_action_update(task_1_action_exs[0].id, states.RUNNING)
self.await_task_running(task_1_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_1_ex = self._assert_single_item(
wf_ex.task_executions,
name='task1'
)
task_1_action_exs = sorted(
db_api.get_action_executions(task_execution_id=task_1_ex.id),
key=lambda x: x['runtime_context']['index']
)
self.assertEqual(states.RUNNING, wf_ex.state)
self.assertEqual(1, len(task_execs))
self.assertEqual(states.RUNNING, task_1_ex.state)
self.assertEqual(3, len(task_1_action_exs))
self.assertEqual(states.RUNNING, task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, task_1_action_exs[1].state)
self.assertEqual(states.SUCCESS, task_1_action_exs[2].state)
# Complete the 1st action execution of task 1.
self.engine.on_action_complete(
task_1_action_exs[0].id,
ml_actions.Result(data={'result': 'foobar'})
)
# Wait for the workflow execution to complete.
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
task_execs = wf_ex.task_executions
task_1_ex = self._assert_single_item(task_execs, name='task1')
task_1_action_exs = sorted(
db_api.get_action_executions(task_execution_id=task_1_ex.id),
key=lambda x: x['runtime_context']['index']
)
task_2_ex = self._assert_single_item(task_execs, name='task2')
self.assertEqual(states.SUCCESS, wf_ex.state)
self.assertEqual(2, len(task_execs))
self.assertEqual(states.SUCCESS, task_1_ex.state)
self.assertEqual(3, len(task_1_action_exs))
self.assertEqual(states.SUCCESS, task_1_action_exs[0].state)
self.assertEqual(states.SUCCESS, task_1_action_exs[1].state)
self.assertEqual(states.SUCCESS, task_1_action_exs[2].state)
self.assertEqual(states.SUCCESS, task_2_ex.state)
|
sigal/plugins/encrypt/__init__.py | Lucas-C/sigal | 648 | 11093559 | """
Plugin to protect gallery by encrypting image files using a password.
Options::
encrypt_options = {
'password': 'password',
'ask_password': False,
'gcm_tag': 'randomly_generated',
'kdf_salt': 'randomly_generated',
'kdf_iters': 10000,
}
- ``password``: The password used to encrypt the images on gallery build,
and decrypt them when viewers access the gallery. No default value. You must
specify a password.
- ``ask_password``: Whether or not viewers are asked for the password to view
the gallery. If set to ``False``, the password will be present in the HTML
files so the images are decrypted automatically. Defaults to ``False``.
- ``gcm_tag``, ``kdf_salt``, ``kdf_iters``: Cryptographic parameters used when
encrypting the files. ``gcm_tag``, ``kdf_salt`` are meant to be randomly
generated, ``kdf_iters`` defaults to 10000. Do not specify them in the config
file unless you have good reasons to do so.
Note: The plugin caches the cryptographic parameters (but not the password)
after the first build, so that incremental builds can share the same
credentials. DO NOT CHANGE THE PASSWORD OR OTHER CRYPTOGRAPHIC PARAMETERS ONCE
A GALLERY IS BUILT, or there will be inconsistency in encrypted files and
viewers will not be able to see some of the images any more.
.. _compatibility-with-encrypt:
Compatibility with other plugins:
- ``zip_gallery``: if you enable both this plugin and the ``zip_gallery``
plugin, the generated zip archives will contain encrypted images, which is
generally meaningless since viewers cannot easily decrypt them outside
a browser.
"""
from .encrypt import register # noqa
|
gunnery/event/tests/test_models.py | timgates42/gunnery | 314 | 11093569 | <filename>gunnery/event/tests/test_models.py
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from account.tests.fixtures import UserFactory
from core.tests.fixtures import ApplicationFactory, DepartmentFactory
from event.models import NotificationPreferences
class TestNotificationPreferences(TestCase):
def setUp(self):
self.department = DepartmentFactory()
self.user = UserFactory()
self.user.groups.add(self.department.groups.get(system_name='user'))
self.user.save()
self.application = ApplicationFactory(department=self.department)
def test_on_save_application(self):
content_type = ContentType.objects.get_for_model(type(self.application))
notifications = NotificationPreferences.objects.filter(user=self.user,
content_type=content_type,
object_id=self.application.id,
event_type='ExecutionFinish',
is_active=True)
self.assertEqual(len(notifications), 1)
|
Game24/modules/endInterface.py | dwlcreat/dsf | 4,013 | 11093570 | '''
Function:
游戏结束界面
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import sys
import pygame
'''游戏结束界面'''
def endInterface(screen, cfg):
font_size_big = 60
font_size_small = 30
font_color = (255, 255, 255)
font_big = pygame.font.Font(cfg.FONTPATH, font_size_big)
font_small = pygame.font.Font(cfg.FONTPATH, font_size_small)
surface = screen.convert_alpha()
surface.fill((0, 0, 0, 5))
text = font_big.render('Game Over!', True, font_color)
text_rect = text.get_rect()
text_rect.centerx, text_rect.centery = cfg.SCREENSIZE[0]/2, cfg.SCREENSIZE[1]/2-50
surface.blit(text, text_rect)
button_width, button_height = 100, 40
button_start_x_left = cfg.SCREENSIZE[0] / 2 - button_width - 20
button_start_x_right = cfg.SCREENSIZE[0] / 2 + 20
button_start_y = cfg.SCREENSIZE[1] / 2 - button_height / 2 + 20
pygame.draw.rect(surface, (128, 128, 128), (button_start_x_left, button_start_y, button_width, button_height))
text_restart = font_small.render('Restart', True, font_color)
text_restart_rect = text_restart.get_rect()
text_restart_rect.centerx, text_restart_rect.centery = button_start_x_left + button_width / 2, button_start_y + button_height / 2
surface.blit(text_restart, text_restart_rect)
pygame.draw.rect(surface, (128, 128, 128), (button_start_x_right, button_start_y, button_width, button_height))
text_quit = font_small.render('Quit', True, font_color)
text_quit_rect = text_quit.get_rect()
text_quit_rect.centerx, text_quit_rect.centery = button_start_x_right + button_width / 2, button_start_y + button_height / 2
surface.blit(text_quit, text_quit_rect)
while True:
screen.blit(surface, (0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN and event.button:
if text_quit_rect.collidepoint(pygame.mouse.get_pos()):
return False
if text_restart_rect.collidepoint(pygame.mouse.get_pos()):
return True
pygame.display.update() |
src/openfermion/linalg/wave_fitting_test.py | Emieeel/OpenFermion | 1,291 | 11093582 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for wave_fitting.py"""
import numpy
from .wave_fitting import prony, fit_known_frequencies
def test_prony_zeros():
signal = numpy.zeros(10)
amplitudes, phases = prony(signal)
assert (len(amplitudes) == 5)
assert (len(phases) == 5)
for j in range(5):
numpy.testing.assert_allclose(amplitudes[j], 0)
numpy.testing.assert_allclose(phases[j], 0)
def test_prony_signal():
x_vec = numpy.linspace(0, 1, 11)
y_vec = (0.5 * numpy.exp(1j * x_vec * 3) + 0.3 * numpy.exp(1j * x_vec * 5) +
0.15 * numpy.exp(1j * x_vec * 1.5) +
0.1 * numpy.exp(1j * x_vec * 4) +
0.05 * numpy.exp(1j * x_vec * 1.2))
print(y_vec)
amplitudes, phases = prony(y_vec)
assert (len(amplitudes) == 5)
assert (len(phases) == 5)
for a, p in zip(amplitudes, phases):
print(a, numpy.angle(p))
numpy.testing.assert_allclose(numpy.abs(amplitudes[0]), 0.5, atol=1e-4)
numpy.testing.assert_allclose(numpy.abs(amplitudes[1]), 0.3, atol=1e-4)
numpy.testing.assert_allclose(numpy.abs(amplitudes[2]), 0.15, atol=1e-4)
numpy.testing.assert_allclose(numpy.abs(amplitudes[3]), 0.1, atol=1e-4)
numpy.testing.assert_allclose(numpy.abs(amplitudes[4]), 0.05, atol=1e-4)
numpy.testing.assert_allclose(numpy.angle(phases[0]), 0.3, atol=1e-4)
numpy.testing.assert_allclose(numpy.angle(phases[1]), 0.5, atol=1e-4)
numpy.testing.assert_allclose(numpy.angle(phases[2]), 0.15, atol=1e-4)
numpy.testing.assert_allclose(numpy.angle(phases[3]), 0.4, atol=1e-4)
numpy.testing.assert_allclose(numpy.angle(phases[4]), 0.12, atol=1e-4)
def test_fitting_signal():
frequencies = numpy.array([0.4, 0.5, 0.8])
amplitudes = numpy.array([0.2, 0.4, 0.4])
times = numpy.linspace(0, 10, 21)
signal = numpy.array([
numpy.sum([
amp * numpy.exp(1j * time * freq)
for freq, amp in zip(frequencies, amplitudes)
])
for time in times
])
amplitudes_guess = fit_known_frequencies(signal, times, frequencies)
assert len(amplitudes_guess == 3)
for index in range(3):
assert numpy.isclose(amplitudes_guess[index], amplitudes[index])
|
anipose/label_videos_3d.py | SjoerdBruijn/anipose | 230 | 11093586 | #!/usr/bin/env python3
from mayavi import mlab
mlab.options.offscreen = True
import numpy as np
from glob import glob
import pandas as pd
import os.path
import cv2
import sys
import skvideo.io
from tqdm import tqdm, trange
import sys
from collections import defaultdict
from matplotlib.pyplot import get_cmap
from .common import make_process_fun, get_nframes, get_video_name, get_video_params, get_data_length, natural_keys
def connect(points, bps, bp_dict, color):
ixs = [bp_dict[bp] for bp in bps]
return mlab.plot3d(points[ixs, 0], points[ixs, 1], points[ixs, 2],
np.ones(len(ixs)), reset_zoom=False,
color=color, tube_radius=None, line_width=10)
def connect_all(points, scheme, bp_dict, cmap):
lines = []
for i, bps in enumerate(scheme):
line = connect(points, bps, bp_dict, color=cmap(i)[:3])
lines.append(line)
return lines
def update_line(line, points, bps, bp_dict):
ixs = [bp_dict[bp] for bp in bps]
# ixs = [bodyparts.index(bp) for bp in bps]
new = np.vstack([points[ixs, 0], points[ixs, 1], points[ixs, 2]]).T
line.mlab_source.points = new
def update_all_lines(lines, points, scheme, bp_dict):
for line, bps in zip(lines, scheme):
update_line(line, points, bps, bp_dict)
def visualize_labels(config, labels_fname, outname, fps=300):
try:
scheme = config['labeling']['scheme']
except KeyError:
scheme = []
data = pd.read_csv(labels_fname)
cols = [x for x in data.columns if '_error' in x]
if len(scheme) == 0:
bodyparts = [c.replace('_error', '') for c in cols]
else:
bodyparts = sorted(set([x for dx in scheme for x in dx]))
bp_dict = dict(zip(bodyparts, range(len(bodyparts))))
all_points = np.array([np.array(data.loc[:, (bp+'_x', bp+'_y', bp+'_z')])
for bp in bodyparts], dtype='float64')
all_errors = np.array([np.array(data.loc[:, bp+'_error'])
for bp in bodyparts], dtype='float64')
all_scores = np.array([np.array(data.loc[:, bp+'_score'])
for bp in bodyparts], dtype='float64')
if config['triangulation']['optim']:
all_errors[np.isnan(all_errors)] = 0
else:
all_errors[np.isnan(all_errors)] = 10000
good = (all_errors < 100)
all_points[~good] = np.nan
all_points_flat = all_points.reshape(-1, 3)
check = ~np.isnan(all_points_flat[:, 0])
if np.sum(check) < 10:
print('too few points to plot, skipping...')
return
low, high = np.percentile(all_points_flat[check], [5, 95], axis=0)
nparts = len(bodyparts)
framedict = dict(zip(data['fnum'], data.index))
writer = skvideo.io.FFmpegWriter(outname, inputdict={
# '-hwaccel': 'auto',
'-framerate': str(fps),
}, outputdict={
'-vcodec': 'h264', '-qp': '28', '-pix_fmt': 'yuv420p'
})
cmap = get_cmap('tab10')
points = np.copy(all_points[:, 20])
points[0] = low
points[1] = high
s = np.arange(points.shape[0])
good = ~np.isnan(points[:, 0])
fig = mlab.figure(bgcolor=(1,1,1), size=(500,500))
fig.scene.anti_aliasing_frames = 2
low, high = np.percentile(points[good, 0], [10,90])
scale_factor = (high - low) / 12.0
mlab.clf()
pts = mlab.points3d(points[:, 0], points[:, 1], points[:, 2], s,
color=(0.8, 0.8, 0.8),
scale_mode='none', scale_factor=scale_factor)
lines = connect_all(points, scheme, bp_dict, cmap)
mlab.orientation_axes()
view = list(mlab.view())
mlab.view(focalpoint='auto', distance='auto')
for framenum in trange(data.shape[0], ncols=70):
fig.scene.disable_render = True
if framenum in framedict:
points = all_points[:, framenum]
else:
points = np.ones((nparts, 3))*np.nan
s = np.arange(points.shape[0])
good = ~np.isnan(points[:, 0])
new = np.vstack([points[:, 0], points[:, 1], points[:, 2]]).T
pts.mlab_source.points = new
update_all_lines(lines, points, scheme, bp_dict)
fig.scene.disable_render = False
img = mlab.screenshot()
mlab.view(*view, reset_roll=False)
writer.writeFrame(img)
mlab.close(all=True)
writer.close()
def process_session(config, session_path, filtered=False):
pipeline_videos_raw = config['pipeline']['videos_raw']
if filtered:
pipeline_videos_labeled_3d = config['pipeline']['videos_labeled_3d_filter']
pipeline_3d = config['pipeline']['pose_3d_filter']
else:
pipeline_videos_labeled_3d = config['pipeline']['videos_labeled_3d']
pipeline_3d = config['pipeline']['pose_3d']
video_ext = config['video_extension']
vid_fnames = glob(os.path.join(session_path,
pipeline_videos_raw, "*."+video_ext))
orig_fnames = defaultdict(list)
for vid in vid_fnames:
vidname = get_video_name(config, vid)
orig_fnames[vidname].append(vid)
labels_fnames = glob(os.path.join(session_path,
pipeline_3d, '*.csv'))
labels_fnames = sorted(labels_fnames, key=natural_keys)
outdir = os.path.join(session_path, pipeline_videos_labeled_3d)
if len(labels_fnames) > 0:
os.makedirs(outdir, exist_ok=True)
for fname in labels_fnames:
basename = os.path.basename(fname)
basename = os.path.splitext(basename)[0]
out_fname = os.path.join(outdir, basename+'.mp4')
if os.path.exists(out_fname) and \
abs(get_nframes(out_fname) - get_data_length(fname)) < 100:
continue
print(out_fname)
some_vid = orig_fnames[basename][0]
params = get_video_params(some_vid)
visualize_labels(config, fname, out_fname, params['fps'])
label_videos_3d_all = make_process_fun(process_session, filtered=False)
label_videos_3d_filtered_all = make_process_fun(process_session, filtered=True)
|
docs/introduction/codeexamples/twistdPlugin.py | tristanlatr/klein | 643 | 11093621 | <filename>docs/introduction/codeexamples/twistdPlugin.py
from klein import Klein
app = Klein()
@app.route("/")
def hello(request):
return "Hello, world!"
resource = app.resource
|
doc/example8.py | ajschumacher/plac | 233 | 11093665 | <filename>doc/example8.py
# example8.py
def main(command: ("SQL query", 'option', 'c'), dsn):
if command:
print('executing %s on %s' % (command, dsn))
# ...
if __name__ == '__main__':
import plac; plac.call(main)
|
autocolorize/__init__.py | gustavla/autocolorizer | 234 | 11093677 | from __future__ import division, print_function, absolute_import
from .extraction import (extract_sparse, extract, load_classifier,
load_default_classifier, colorize)
from .color import match_lightness
from . import image
from .datasets import load_image_list
from .checker import checker_main
VERSION = (0, 2, 2)
ISRELEASED = False
__version__ = '{0}.{1}.{2}'.format(*VERSION)
if not ISRELEASED:
__version__ += '.git'
__all__ = ['extract_sparse',
'extract',
'load_classifier',
'load_default_classifier',
'colorize',
'match_lightness',
'image',
'load_image_list',
'checker_main']
|
flatdata-generator/flatdata/generator/app.py | gferon/flatdata | 140 | 11093692 | <reponame>gferon/flatdata
'''
Copyright (c) 2017 HERE Europe B.V.
See the LICENSE file in the root of this project for license details.
'''
import argparse
import logging
import os.path
import sys
# check that requirements are installed here
try:
# pylint: disable=unused-import
import pyparsing
import jinja2
except ModuleNotFoundError as exc:
print("Cannot import `%s`, you probably need to install it. See `generator/requirements.txt` or `README.md`." % exc.name, file=sys.stderr)
sys.exit(2)
from flatdata.generator.engine import Engine
from flatdata.generator.tree.errors import FlatdataSyntaxError
def _parse_command_line():
parser = argparse.ArgumentParser(
description="Generates code for a given flatdata schema file.")
parser.add_argument("-s", "--schema", type=str, required=True,
help="Path to the flatdata schema file")
parser.add_argument("-g", "--gen", type=str, required=True,
help="Language to generate bindings for. Supported values: %s" %
(', '.join(Engine.available_generators())))
parser.add_argument("-O", "--output-file", type=str, required=True,
default=None,
help="Destination file. Forces all output to be stored in one file")
parser.add_argument("-v", "--verbose", action="store_true",
help="Enable verbose mode")
parser.add_argument("--debug", action="store_true",
help="Enable debug output")
return parser.parse_args()
def _setup_logging(args):
level = logging.WARNING
if args.debug:
level = logging.DEBUG
elif args.verbose:
level = logging.INFO
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s",
datefmt="%H:%M:%S",
level=level)
def _check_args(args):
if not os.path.isfile(args.schema):
logging.fatal("Cannot find schema file at %s", args.schema)
sys.exit(1)
def _run(args):
_setup_logging(args)
_check_args(args)
with open(args.schema, 'r') as input_file:
schema = input_file.read()
try:
engine = Engine(schema)
logging.debug("Tree: %s", engine.tree)
except FlatdataSyntaxError as ex:
logging.fatal("Error reading schema: %s ", ex)
sys.exit(1)
try:
logging.info("Generating %s...", args.gen)
output_content = engine.render(args.gen)
except ValueError as ex:
logging.fatal("%s", ex)
sys.exit(1)
dirname = os.path.dirname(os.path.abspath(args.output_file))
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(args.output_file, "w") as output:
output.write(output_content)
logging.info("Code for %s is written to %s", args.gen, args.output_file)
def main():
"""Entrypoint"""
_run(_parse_command_line()) |
vstruct/defs/pe.py | rnui2k/vivisect | 716 | 11093717 | <reponame>rnui2k/vivisect
import vstruct
from vstruct.primitives import *
class IMAGE_BASE_RELOCATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_uint32()
self.SizeOfBlock = v_uint32()
class IMAGE_DATA_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VirtualAddress = v_uint32()
self.Size = v_uint32()
class IMAGE_DOS_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.e_magic = v_uint16()
self.e_cblp = v_uint16()
self.e_cp = v_uint16()
self.e_crlc = v_uint16()
self.e_cparhdr = v_uint16()
self.e_minalloc = v_uint16()
self.e_maxalloc = v_uint16()
self.e_ss = v_uint16()
self.e_sp = v_uint16()
self.e_csum = v_uint16()
self.e_ip = v_uint16()
self.e_cs = v_uint16()
self.e_lfarlc = v_uint16()
self.e_ovno = v_uint16()
self.e_res = vstruct.VArray([v_uint16() for i in range(4)])
self.e_oemid = v_uint16()
self.e_oeminfo = v_uint16()
self.e_res2 = vstruct.VArray([v_uint16() for i in range(10)])
self.e_lfanew = v_uint32()
class IMAGE_EXPORT_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Characteristics = v_uint32()
self.TimeDateStamp = v_uint32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.Name = v_uint32()
self.Base = v_uint32()
self.NumberOfFunctions = v_uint32()
self.NumberOfNames = v_uint32()
self.AddressOfFunctions = v_uint32()
self.AddressOfNames = v_uint32()
self.AddressOfOrdinals = v_uint32()
class IMAGE_DEBUG_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Characteristics = v_uint32()
self.TimeDateStamp = v_uint32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.Type = v_uint32()
self.SizeOfData = v_uint32()
self.AddressOfRawData= v_uint32()
self.PointerToRawData= v_uint32()
class CV_INFO_PDB70(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CvSignature = v_uint32()
self.Signature = GUID()
self.Age = v_uint32()
self.PdbFileName = v_str(260)
def vsParse(self, bytez, offset=0):
bsize = len(bytez) - offset
self.vsGetField('PdbFileName').vsSetLength( bsize - 24 )
return vstruct.VStruct.vsParse(self, bytez, offset=offset)
class IMAGE_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Machine = v_uint16()
self.NumberOfSections = v_uint16()
self.TimeDateStamp = v_uint32()
self.PointerToSymbolTable = v_uint32()
self.NumberOfSymbols = v_uint32()
self.SizeOfOptionalHeader = v_uint16()
self.Characteristics = v_uint16()
class IMAGE_IMPORT_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OriginalFirstThunk = v_uint32()
self.TimeDateStamp = v_uint32()
self.ForwarderChain = v_uint32()
self.Name = v_uint32()
self.FirstThunk = v_uint32()
# https://docs.microsoft.com/en-us/cpp/build/reference/structure-and-constant-definitions?view=vs-2019
class IMAGE_DELAY_IMPORT_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.grAttrs = v_uint32()
self.rvaDLLName = v_uint32()
self.rvaHmod = v_uint32()
self.rvaIAT = v_uint32()
self.rvaINT = v_uint32()
self.rvaBoundIAT = v_uint32()
self.rvaUnloadIAT = v_uint32()
self.dwTimeStamp = v_uint32()
class IMAGE_IMPORT_BY_NAME(vstruct.VStruct):
def __init__(self, namelen=128):
vstruct.VStruct.__init__(self)
self.Hint = v_uint16()
self.Name = v_str(size=namelen)
class IMAGE_LOAD_CONFIG_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint32()
self.TimeDateStamp = v_uint32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.GlobalFlagsClear = v_uint32()
self.GlobalFlagsSet = v_uint32()
self.CriticalSectionDefaultTimeout = v_uint32()
self.DeCommitFreeBlockThreshold = v_uint32()
self.DeCommitTotalFreeThreshold = v_uint32()
self.LockPrefixTable = v_uint32()
self.MaximumAllocationSize = v_uint32()
self.VirtualMemoryThreshold = v_uint32()
self.ProcessHeapFlags = v_uint32()
self.ProcessAffinityMask = v_uint32()
self.CSDVersion = v_uint16()
self.Reserved1 = v_uint16()
self.EditList = v_uint32()
self.SecurityCookie = v_uint32()
self.SEHandlerTable = v_uint32()
self.SEHandlerCount = v_uint32()
class IMAGE_NT_HEADERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_bytes(4)
self.FileHeader = IMAGE_FILE_HEADER()
self.OptionalHeader = IMAGE_OPTIONAL_HEADER()
class IMAGE_NT_HEADERS64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_bytes(4)
self.FileHeader = IMAGE_FILE_HEADER()
self.OptionalHeader = IMAGE_OPTIONAL_HEADER64()
class IMAGE_OPTIONAL_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Magic = v_bytes(2)
self.MajorLinkerVersion = v_uint8()
self.MinorLinkerVersion = v_uint8()
self.SizeOfCode = v_uint32()
self.SizeOfInitializedData = v_uint32()
self.SizeOfUninitializedData = v_uint32()
self.AddressOfEntryPoint = v_uint32()
self.BaseOfCode = v_uint32()
self.BaseOfData = v_uint32()
self.ImageBase = v_uint32()
self.SectionAlignment = v_uint32()
self.FileAlignment = v_uint32()
self.MajorOperatingSystemVersion = v_uint16()
self.MinorOperatingSystemVersion = v_uint16()
self.MajorImageVersion = v_uint16()
self.MinorImageVersion = v_uint16()
self.MajorSubsystemVersion = v_uint16()
self.MinorSubsystemVersion = v_uint16()
self.Win32VersionValue = v_uint32()
self.SizeOfImage = v_uint32()
self.SizeOfHeaders = v_uint32()
self.CheckSum = v_uint32()
self.Subsystem = v_uint16()
self.DllCharacteristics = v_uint16()
self.SizeOfStackReserve = v_uint32()
self.SizeOfStackCommit = v_uint32()
self.SizeOfHeapReserve = v_uint32()
self.SizeOfHeapCommit = v_uint32()
self.LoaderFlags = v_uint32()
self.NumberOfRvaAndSizes = v_uint32()
self.DataDirectory = vstruct.VArray([IMAGE_DATA_DIRECTORY() for i in range(16)])
class IMAGE_OPTIONAL_HEADER64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Magic = v_bytes(2)
self.MajorLinkerVersion = v_uint8()
self.MinorLinkerVersion = v_uint8()
self.SizeOfCode = v_uint32()
self.SizeOfInitializedData = v_uint32()
self.SizeOfUninitializedData = v_uint32()
self.AddressOfEntryPoint = v_uint32()
self.BaseOfCode = v_uint32()
self.ImageBase = v_uint64()
self.SectionAlignment = v_uint32()
self.FileAlignment = v_uint32()
self.MajorOperatingSystemVersion = v_uint16()
self.MinorOperatingSystemVersion = v_uint16()
self.MajorImageVersion = v_uint16()
self.MinorImageVersion = v_uint16()
self.MajorSubsystemVersion = v_uint16()
self.MinorSubsystemVersion = v_uint16()
self.Win32VersionValue = v_uint32()
self.SizeOfImage = v_uint32()
self.SizeOfHeaders = v_uint32()
self.CheckSum = v_uint32()
self.Subsystem = v_uint16()
self.DllCharacteristics = v_uint16()
self.SizeOfStackReserve = v_uint64()
self.SizeOfStackCommit = v_uint64()
self.SizeOfHeapReserve = v_uint64()
self.SizeOfHeapCommit = v_uint64()
self.LoaderFlags = v_uint32()
self.NumberOfRvaAndSizes = v_uint32()
self.DataDirectory = vstruct.VArray([IMAGE_DATA_DIRECTORY() for i in range(16)])
class IMAGE_RESOURCE_DIRECTORY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Characteristics = v_uint32()
self.TimeDateStamp = v_uint32()
self.MajorVersion = v_uint16()
self.MinorVersion = v_uint16()
self.NumberOfNamedEntries = v_uint16()
self.NumberOfIdEntries = v_uint16()
class IMAGE_RESOURCE_DIRECTORY_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = v_uint32()
self.OffsetToData = v_uint32()
class IMAGE_RESOURCE_DATA_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.OffsetToData = v_uint32()
self.Size = v_uint32()
self.CodePage = v_uint32()
self.Reserved = v_uint32()
class VS_FIXEDFILEINFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Signature = v_uint32()
self.StrucVersion = v_uint32()
self.FileVersionMS = v_uint32()
self.FileVersionLS = v_uint32()
self.ProductVersionMS = v_uint32()
self.ProductVersionLS = v_uint32()
self.FileFlagsMask = v_uint32()
self.FileFlags = v_uint32()
self.FileOS = v_uint32()
self.FileType = v_uint32()
self.FileSubtype = v_uint32()
self.FileDateMS = v_uint32()
self.FileDateLS = v_uint32()
class IMAGE_SECTION_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Name = v_str(8)
self.VirtualSize = v_uint32()
self.VirtualAddress = v_uint32()
self.SizeOfRawData = v_uint32()
self.PointerToRawData = v_uint32()
self.PointerToRelocations = v_uint32()
self.PointerToLineNumbers = v_uint32()
self.NumberOfRelocations = v_uint16()
self.NumberOfLineNumbers = v_uint16()
self.Characteristics = v_uint32()
class IMAGE_RUNTIME_FUNCTION_ENTRY(vstruct.VStruct):
"""
Used in the .pdata section of a PE32+ for all non
leaf functions.
"""
def __init__(self):
vstruct.VStruct.__init__(self)
self.BeginAddress = v_uint32()
self.EndAddress = v_uint32()
self.UnwindInfoAddress = v_uint32()
class UNWIND_INFO(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.VerFlags = v_uint8()
self.SizeOfProlog = v_uint8()
self.CountOfCodes = v_uint8()
self.FrameRegister = v_uint8()
class SignatureEntry(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.size = v_int32(bigend=False)
# should always be 0x00020200
self.magic = v_bytes(size=4)
self.pkcs7 = v_bytes()
def pcb_size(self):
size = self.size
self.vsGetField('pkcs7').vsSetLength(size)
|
tests/backend/topology/conftest.py | goncalogteixeira/pyswarns | 959 | 11093725 | <reponame>goncalogteixeira/pyswarns<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Fixtures for tests"""
# Import modules
import numpy as np
import pytest
# Import from pyswarms
# Import from package
from pyswarms.backend.swarms import Swarm
@pytest.fixture(scope="module")
def swarm():
"""A contrived instance of the Swarm class at a certain timestep"""
# fmt: off
attrs_at_t = {
"position": np.array([[40.95838686e-01, 5.87433429e-04, 6.49113772e-03],
[1.00559609e+00, 3.96477697e-02, 7.67205397e-01],
[8.87990950e-01, -3.64932609e-02, 1.89750725e-02],
[1.11646877e+00, 3.12037361e-03, 1.97885369e-01],
[8.96117216e-01, -9.79602053e-03, -1.66139336e-01],
[9.90423669e-01, 1.99307974e-03, -1.23386797e-02],
[2.06800701e-01, -1.67869387e-02, 1.14268810e-01],
[4.21786494e-01, 2.58755510e-02, 6.62254843e-01],
[9.90350831e-01, 3.81575154e-03, 8.80833545e-01],
[9.94353749e-01, -4.85086205e-02, 9.85313500e-03]]),
"velocity": np.array([[2.09076818e-02, 2.04936403e-03, 1.06761248e-02],
[1.64940497e-03, 5.67924469e-03, 9.74902301e-02],
[1.50445516e-01, 9.11699158e-03, 1.51474794e-02],
[2.94238740e-01, 5.71545680e-04, 1.54122294e-02],
[4.10430034e-02, 6.51847479e-04, 6.25109226e-02],
[6.71076116e-06, 1.89615516e-04, 4.65023770e-03],
[4.76081378e-02, 4.24416089e-03, 7.11856172e-02],
[1.33832808e-01, 1.81818698e-02, 1.16947941e-01],
[1.22849955e-03, 1.55685312e-03, 1.67819003e-02],
[5.60617396e-03, 4.31819608e-02, 2.52217220e-02]]),
"current_cost": np.array([1.07818462, 5.5647911, 19.6046078, 14.05300016, 3.72597614, 1.01169386,
16.51846203, 32.72262829, 3.80274901, 1.05237138]),
"pbest_cost": np.array([1.00362006, 2.39151041, 2.55208424, 5.00176207, 1.04510827, 1.00025284,
6.31216654, 2.53873121, 2.00530884, 1.05237138]),
"pbest_pos": np.array([[9.98033031e-01, 4.97392619e-03, 3.07726256e-03],
[1.00665809e+00, 4.22504014e-02, 9.84334657e-01],
[1.12159389e-02, 1.11429739e-01, 2.86388193e-02],
[1.64059236e-01, 6.85791237e-03, -2.32137604e-02],
[9.93740665e-01, -6.16501403e-03, -1.46096578e-02],
[9.90438476e-01, 2.50379538e-03, 1.87405987e-05],
[1.12301876e-01, 1.77099784e-03, 1.45382457e-01],
[4.41204876e-02, 4.84059652e-02, 1.05454822e+00],
[9.89348409e-01, -1.31692358e-03, 9.88291764e-01],
[9.99959923e-01, -5.32665972e-03, -1.53685870e-02]]),
"best_cost": 1.0002528364353296,
"best_pos": np.array([9.90438476e-01, 2.50379538e-03, 1.87405987e-05]),
"options": {'c1': 0.5, 'c2': 0.3, 'w': 0.9},
}
# fmt: on
return Swarm(**attrs_at_t)
|
tests/gdata_tests/apps/organization/data_test.py | gauravuniverse/gdata-python-client | 483 | 11093727 | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model tests for the Organization Unit Provisioning API."""
__author__ = '<NAME> <<EMAIL>>'
import unittest
import atom.core
from gdata import test_data
import gdata.apps.organization.data
import gdata.test_config as conf
class CustomerIdEntryTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(test_data.ORGANIZATION_UNIT_CUSTOMER_ID_ENTRY,
gdata.apps.organization.data.CustomerIdEntry)
def testCustomerIdEntryFromString(self):
self.assert_(isinstance(self.entry,
gdata.apps.organization.data.CustomerIdEntry))
self.assertEquals(self.entry.customer_id, 'C123A456B')
self.assertEquals(self.entry.customer_org_unit_name, 'example.com')
self.assertEquals(self.entry.customer_org_unit_description, 'example.com')
self.assertEquals(self.entry.org_unit_name, 'example.com')
self.assertEquals(self.entry.org_unit_description, 'tempdescription')
class OrgUnitEntryTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(test_data.ORGANIZATION_UNIT_ORGUNIT_ENTRY,
gdata.apps.organization.data.OrgUnitEntry)
self.feed = atom.core.parse(test_data.ORGANIZATION_UNIT_ORGUNIT_FEED,
gdata.apps.organization.data.OrgUnitFeed)
def testOrgUnitEntryFromString(self):
self.assert_(isinstance(self.entry,
gdata.apps.organization.data.OrgUnitEntry))
self.assertEquals(self.entry.org_unit_description, 'New Test Org')
self.assertEquals(self.entry.org_unit_name, 'Test Organization')
self.assertEquals(self.entry.org_unit_path, 'Test/Test+Organization')
self.assertEquals(self.entry.parent_org_unit_path, 'Test')
self.assertEquals(self.entry.org_unit_block_inheritance, 'false')
def testOrgUnitFeedFromString(self):
self.assertEquals(len(self.feed.entry), 2)
self.assert_(isinstance(self.feed,
gdata.apps.organization.data.OrgUnitFeed))
self.assert_(isinstance(self.feed.entry[0],
gdata.apps.organization.data.OrgUnitEntry))
self.assert_(isinstance(self.feed.entry[1],
gdata.apps.organization.data.OrgUnitEntry))
self.assertEquals(
self.feed.entry[0].find_edit_link(),
('https://apps-apis.google.com/a/feeds/orgunit/2.0/'
'C123A456B/testOrgUnit92'))
self.assertEquals(self.feed.entry[0].org_unit_description, 'test92')
self.assertEquals(self.feed.entry[0].org_unit_name, 'testOrgUnit92')
self.assertEquals(self.feed.entry[0].org_unit_path, 'Test/testOrgUnit92')
self.assertEquals(self.feed.entry[0].parent_org_unit_path, 'Test')
self.assertEquals(self.feed.entry[0].org_unit_block_inheritance, 'false')
self.assertEquals(
self.feed.entry[1].find_edit_link(),
('https://apps-apis.google.com/a/feeds/orgunit/2.0/'
'C123A456B/testOrgUnit93'))
self.assertEquals(self.feed.entry[1].org_unit_description, 'test93')
self.assertEquals(self.feed.entry[1].org_unit_name, 'testOrgUnit93')
self.assertEquals(self.feed.entry[1].org_unit_path, 'Test/testOrgUnit93')
self.assertEquals(self.feed.entry[1].parent_org_unit_path, 'Test')
self.assertEquals(self.feed.entry[1].org_unit_block_inheritance, 'false')
class OrgUserEntryTest(unittest.TestCase):
def setUp(self):
self.entry = atom.core.parse(test_data.ORGANIZATION_UNIT_ORGUSER_ENTRY,
gdata.apps.organization.data.OrgUserEntry)
self.feed = atom.core.parse(test_data.ORGANIZATION_UNIT_ORGUSER_FEED,
gdata.apps.organization.data.OrgUserFeed)
def testOrgUserEntryFromString(self):
self.assert_(isinstance(self.entry,
gdata.apps.organization.data.OrgUserEntry))
self.assertEquals(self.entry.user_email, '<EMAIL>')
self.assertEquals(self.entry.org_unit_path, 'Test')
def testOrgUserFeedFromString(self):
self.assertEquals(len(self.feed.entry), 2)
self.assert_(isinstance(self.feed,
gdata.apps.organization.data.OrgUserFeed))
self.assert_(isinstance(self.feed.entry[0],
gdata.apps.organization.data.OrgUserEntry))
self.assert_(isinstance(self.feed.entry[1],
gdata.apps.organization.data.OrgUserEntry))
self.assertEquals(
self.feed.entry[0].find_edit_link(),
('https://apps-apis.google.com/a/feeds/orguser/2.0/'
'C123A456B/user720430%40example.com'))
self.assertEquals(self.feed.entry[0].user_email, '<EMAIL>')
self.assertEquals(self.feed.entry[0].org_unit_path, 'Test')
self.assertEquals(
self.feed.entry[1].find_edit_link(),
('https://apps-apis.google.com/a/feeds/orguser/2.0/'
'C123A456B/user832648%40example.com'))
self.assertEquals(self.feed.entry[1].user_email, '<EMAIL>')
self.assertEquals(self.feed.entry[1].org_unit_path, 'Test')
def suite():
return conf.build_suite([OrgUnitEntryTest, OrgUserEntryTest])
if __name__ == '__main__':
unittest.main()
|
OpenAttack/attackers/pso/__init__.py | e-tornike/OpenAttack | 444 | 11093737 | from typing import List, Optional
import numpy as np
import copy
from ..classification import ClassificationAttacker, Classifier, ClassifierGoal
from ...text_process.tokenizer import Tokenizer, get_default_tokenizer
from ...attack_assist.substitute.word import WordSubstitute, get_default_substitute
from ...utils import get_language, check_language, language_by_name
from ...exceptions import WordNotInDictionaryException
from ...tags import Tag
from ...attack_assist.filter_words import get_default_filter_words
class PSOAttacker(ClassificationAttacker):
@property
def TAGS(self):
return { self.__lang_tag, Tag("get_pred", "victim"), Tag("get_prob", "victim")}
def __init__(self,
pop_size : int = 20,
max_iters : int = 20,
tokenizer : Optional[Tokenizer] = None,
substitute : Optional[WordSubstitute] = None,
filter_words : List[str] = None,
lang = None
):
"""
Word-level Textual Adversarial Attacking as Combinatorial Optimization. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. ACL 2020.
`[pdf] <https://www.aclweb.org/anthology/2020.acl-main.540.pdf>`__
`[code] <https://github.com/thunlp/SememePSO-Attack>`__
Args:
pop_size: Genetic algorithm popluation size. **Default:** 20
max_iter: Maximum generations of pso algorithm. **Default:** 20
tokenizer: A tokenizer that will be used during the attack procedure. Must be an instance of :py:class:`.Tokenizer`
substitute: A substitute that will be used during the attack procedure. Must be an instance of :py:class:`.WordSubstitute`
lang: The language used in attacker. If is `None` then `attacker` will intelligently select the language based on other parameters.
filter_words: A list of words that will be preserved in the attack procesudre.
:Classifier Capacity:
* get_pred
* get_prob
"""
lst = []
if tokenizer is not None:
lst.append(tokenizer)
if substitute is not None:
lst.append(substitute)
if len(lst) > 0:
self.__lang_tag = get_language(lst)
else:
self.__lang_tag = language_by_name(lang)
if self.__lang_tag is None:
raise ValueError("Unknown language `%s`" % lang)
if substitute is None:
substitute = get_default_substitute(self.__lang_tag)
self.substitute = substitute
if tokenizer is None:
tokenizer = get_default_tokenizer(self.__lang_tag)
self.tokenizer = tokenizer
self.pop_size = pop_size
self.max_iters = max_iters
if filter_words is None:
filter_words = get_default_filter_words(self.__lang_tag)
self.filter_words = set(filter_words)
check_language([self.tokenizer, self.substitute], self.__lang_tag)
def attack(self, victim: Classifier, sentence, goal: ClassifierGoal):
self.invoke_dict = {}
x_orig = sentence.lower()
x_orig = self.tokenizer.tokenize(x_orig)
x_pos = list(map(lambda x: x[1], x_orig))
x_orig = list(map(lambda x: x[0], x_orig))
x_len = len(x_orig)
neighbours_nums = [
min(self.get_neighbour_num(word, pos),10) if word not in self.filter_words else 0
for word, pos in zip(x_orig, x_pos)
]
neighbours = [
self.get_neighbours(word, pos)
if word not in self.filter_words
else []
for word, pos in zip(x_orig, x_pos)
]
if np.sum(neighbours_nums) == 0:
return None
w_select_probs = neighbours_nums / np.sum(neighbours_nums)
pop = self.generate_population(x_orig, neighbours, w_select_probs, x_len)
part_elites = pop
if goal.targeted:
all_elite_score = 100
part_elites_scores = [100 for _ in range(self.pop_size)]
else:
all_elite_score = -1
part_elites_scores = [-1 for _ in range(self.pop_size)]
all_elite = pop[0]
Omega_1 = 0.8
Omega_2 = 0.2
C1_origin = 0.8
C2_origin = 0.2
V = [np.random.uniform(-3, 3) for _ in range(self.pop_size)]
V_P = [[V[t] for _ in range(x_len)] for t in range(self.pop_size)]
for i in range(self.max_iters):
pop_preds = self.predict_batch(victim, pop)
pop_scores = pop_preds[:, goal.target]
if goal.targeted:
pop_ranks = np.argsort(pop_scores)[::-1]
top_attack = pop_ranks[0]
if np.max(pop_scores) > all_elite_score:
all_elite = pop[top_attack]
all_elite_score = np.max(pop_scores)
for k in range(self.pop_size):
if pop_scores[k] > part_elites_scores[k]:
part_elites[k] = pop[k]
part_elites_scores[k] = pop_scores[k]
if np.argmax(pop_preds[top_attack, :]) == goal.target:
return self.tokenizer.detokenize(pop[top_attack])
else:
pop_ranks = np.argsort(pop_scores)
top_attack = pop_ranks[0]
if np.min(pop_scores) < all_elite_score:
all_elite = pop[top_attack]
all_elite_score = np.min(pop_scores)
for k in range(self.pop_size):
if pop_scores[k] < part_elites_scores[k]:
part_elites[k] = pop[k]
part_elites_scores[k] = pop_scores[k]
if np.argmax(pop_preds[top_attack, :]) != goal.target:
return self.tokenizer.detokenize(pop[top_attack])
Omega = (Omega_1 - Omega_2) * (self.max_iters - i) / self.max_iters + Omega_2
C1 = C1_origin - i / self.max_iters * (C1_origin - C2_origin)
C2 = C2_origin + i / self.max_iters * (C1_origin - C2_origin)
for id in range(self.pop_size):
for dim in range(x_len):
V_P[id][dim] = Omega * V_P[id][dim] + (1 - Omega) * (
self.equal(pop[id][dim], part_elites[id][dim]) + self.equal(pop[id][dim],
all_elite[dim]))
turn_prob = [self.sigmod(V_P[id][d]) for d in range(x_len)]
P1 = C1
P2 = C2
if np.random.uniform() < P1:
pop[id] = self.turn(part_elites[id], pop[id], turn_prob, x_len)
if np.random.uniform() < P2:
pop[id] = self.turn(all_elite, pop[id], turn_prob, x_len)
pop_preds = self.predict_batch(victim, pop)
pop_scores = pop_preds[:, goal.target]
if goal.targeted:
pop_ranks = np.argsort(pop_scores)[::-1]
top_attack = pop_ranks[0]
if np.max(pop_scores) > all_elite_score:
all_elite = pop[top_attack]
all_elite_score = np.max(pop_scores)
for k in range(self.pop_size):
if pop_scores[k] > part_elites_scores[k]:
part_elites[k] = pop[k]
part_elites_scores[k] = pop_scores[k]
if np.argmax(pop_preds[top_attack, :]) == goal.target:
return self.tokenizer.detokenize( pop[top_attack] )
else:
pop_ranks = np.argsort(pop_scores)
top_attack = pop_ranks[0]
if np.min(pop_scores) < all_elite_score:
all_elite = pop[top_attack]
all_elite_score = np.min(pop_scores)
for k in range(self.pop_size):
if pop_scores[k] < part_elites_scores[k]:
part_elites[k] = pop[k]
part_elites_scores[k] = pop_scores[k]
if np.argmax(pop_preds[top_attack, :]) != goal.target:
return self.tokenizer.detokenize( pop[top_attack] )
new_pop = []
for x in pop:
change_ratio = self.count_change_ratio(x, x_orig, x_len)
p_change = 1 - 2 * change_ratio
if np.random.uniform() < p_change:
tem = self.mutate( x, x_orig, neighbours, w_select_probs)
new_pop.append(tem)
else:
new_pop.append(x)
pop = new_pop
return None #Failed
def predict_batch(self, victim, sentences):
return np.array([self.predict(victim, s) for s in sentences])
def predict(self, victim, sentence):
if tuple(sentence) in self.invoke_dict:
return self.invoke_dict[tuple(sentence)]
tem = victim.get_prob(self.make_batch([sentence]))[0]
self.invoke_dict[tuple(sentence)] = tem
return tem
def do_replace(self, x_cur, pos, new_word):
x_new = x_cur.copy()
x_new[pos] = new_word
return x_new
def generate_population(self, x_orig, neighbours_list, w_select_probs, x_len):
pop = []
x_len = w_select_probs.shape[0]
for i in range(self.pop_size):
r = np.random.choice(x_len, 1, p=w_select_probs)[0]
replace_list = neighbours_list[r]
sub = np.random.choice(replace_list, 1)[0]
tem = self.do_replace(x_orig, r, sub)
pop.append(tem)
return pop
def turn(self, x1, x2, prob, x_len):
x_new = copy.deepcopy(x2)
for i in range(x_len):
if np.random.uniform() < prob[i]:
x_new[i] = x1[i]
return x_new
def mutate(self, x, x_orig, neigbhours_list, w_select_probs):
x_len = w_select_probs.shape[0]
rand_idx = np.random.choice(x_len, 1,p=w_select_probs)[0]
while x[rand_idx] != x_orig[rand_idx] and self.sum_diff(x_orig,x) < np.sum(np.sign(w_select_probs)):
rand_idx = np.random.choice(x_len, 1,p=w_select_probs)[0]
replace_list = neigbhours_list[rand_idx]
sub_idx= np.random.choice(len(replace_list), 1)[0]
new_x=copy.deepcopy(x)
new_x[rand_idx]=replace_list[sub_idx]
return new_x
def sum_diff(self, x_orig, x_cur):
ret = 0
for wa, wb in zip(x_orig, x_cur):
if wa != wb:
ret += 1
return ret
def norm(self, n):
tn = []
for i in n:
if i <= 0:
tn.append(0)
else:
tn.append(i)
s = np.sum(tn)
if s == 0:
for i in range(len(tn)):
tn[i] = 1
return [t / len(tn) for t in tn]
new_n = [t / s for t in tn]
return new_n
def get_neighbour_num(self, word, pos):
try:
return len(self.substitute(word, pos))
except WordNotInDictionaryException:
return 0
def get_neighbours(self, word, pos):
try:
return list(
map(
lambda x: x[0],
self.substitute(word, pos),
)
)
except WordNotInDictionaryException:
return []
def make_batch(self, sents):
return [self.tokenizer.detokenize(sent) for sent in sents]
def equal(self, a, b):
if a == b:
return -3
else:
return 3
def sigmod(self, n):
return 1 / (1 + np.exp(-n))
def count_change_ratio(self, x, x_orig, x_len):
change_ratio = float(np.sum(np.array(x) != np.array(x_orig))) / float(x_len)
return change_ratio
|
pyroomacoustics/transform/tests/test_stft_timing.py | Womac/pyroomacoustics | 915 | 11093779 | <reponame>Womac/pyroomacoustics<filename>pyroomacoustics/transform/tests/test_stft_timing.py
from __future__ import division, print_function
import numpy as np
import pyroomacoustics as pra
from pyroomacoustics.transform import STFT
import time
import warnings
# test signal
np.random.seed(0)
num_mic = 25
signals = np.random.randn(100000, num_mic).astype(np.float32)
fs = 16000
# STFT parameters
block_size = 512
hop = block_size // 2
win = pra.hann(block_size)
x_r = np.zeros(signals.shape)
num_times = 50
print()
"""
One frame at a time
"""
print(
"Averaging computation time over %d cases of %d channels of %d samples (%0.1f s at %0.1f kHz)."
% (num_times, num_mic, len(signals), (len(signals) / fs), fs / 1000)
)
print()
print("----- SINGLE FRAME AT A TIME -----")
print("With STFT object (not fixed) : ", end="")
stft = STFT(block_size, hop=hop, channels=num_mic, streaming=True, analysis_window=win)
start = time.time()
for k in range(num_times):
x_r = np.zeros(signals.shape)
n = 0
while signals.shape[0] - n > hop:
stft.analysis(
signals[
n : n + hop,
]
)
x_r[
n : n + hop,
] = stft.synthesis()
n += hop
avg_time = (time.time() - start) / num_times
print("%0.3f sec" % avg_time)
err_dB = 20 * np.log10(
np.max(
np.abs(
signals[
: n - hop,
]
- x_r[
hop:n,
]
)
)
)
print("Error [dB] : %0.3f" % err_dB)
print("With STFT object (fixed) : ", end="")
stft = STFT(
block_size,
hop=hop,
channels=num_mic,
num_frames=1,
streaming=True,
analysis_window=win,
)
start = time.time()
for k in range(num_times):
x_r = np.zeros(signals.shape)
n = 0
while signals.shape[0] - n > hop:
stft.analysis(
signals[
n : n + hop,
]
)
x_r[
n : n + hop,
] = stft.synthesis()
n += hop
avg_time = (time.time() - start) / num_times
print("%0.3f sec" % avg_time)
err_dB = 20 * np.log10(
np.max(
np.abs(
signals[
: n - hop,
]
- x_r[
hop:n,
]
)
)
)
print("Error [dB] : %0.3f" % err_dB)
"""
Multiple frame at a time (non-streaming)
"""
print()
print("----- MULTIPLE FRAMES AT A TIME -----")
warnings.filterwarnings("ignore") # to avoid warning of appending zeros to be printed
print("With STFT object (not fixed) : ", end="")
stft = STFT(block_size, hop=hop, channels=num_mic, analysis_window=win, streaming=False)
start = time.time()
for k in range(num_times):
stft.analysis(signals)
x_r = stft.synthesis()
avg_time = (time.time() - start) / num_times
print("%0.3f sec" % avg_time)
err_dB = 20 * np.log10(
np.max(np.abs(signals[hop : len(x_r) - hop] - x_r[hop : len(x_r) - hop]))
)
print("Error [dB] : %0.3f" % err_dB)
warnings.filterwarnings("default")
print("With STFT object (fixed) : ", end="")
num_frames = (len(signals) - block_size) // hop + 1
stft = STFT(
block_size,
hop=hop,
channels=num_mic,
num_frames=num_frames,
analysis_window=win,
streaming=False,
)
start = time.time()
for k in range(num_times):
stft.analysis(signals[: (num_frames - 1) * hop + block_size, :])
x_r = stft.synthesis()
avg_time = (time.time() - start) / num_times
print("%0.3f sec" % avg_time)
err_dB = 20 * np.log10(
np.max(np.abs(signals[hop : len(x_r) - hop] - x_r[hop : len(x_r) - hop]))
)
print("Error [dB] : %0.3f" % err_dB)
|
nncf/version.py | MaximProshin/nncf | 136 | 11093785 | __version__ = '2.1.0'
BKC_TORCH_VERSION = '1.9.1'
BKC_TORCHVISION_VERSION = '0.10.1'
BKC_TF_VERSION = '2.5.*'
|
backend/services/tools/push_notify.py | sleepingAnt/viewfinder | 645 | 11093796 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Manual push notification facility.
"""
__author__ = '<EMAIL> (<NAME>)'
from tornado import options
from viewfinder.backend.services.apns import APNS
from viewfinder.backend.services.push_notification import PushNotification
from viewfinder.backend.www import www_main
options.define('token', default=None, help='push notification token (e.g. "apns-prod:(.*)")')
options.define('badge', default=1, help='badge value (integer)')
def _Start(callback):
"""Allows manual push notifications."""
assert options.options.token, 'must specify a push notification token'
assert options.options.badge is not None, 'must specify a badge value'
PushNotification.Push(options.options.token, badge=int(options.options.badge))
if __name__ == '__main__':
www_main.InitAndRun(_Start)
|
tests/test_soap_bridge.py | rexyeah/jira-cli | 125 | 11093847 | <filename>tests/test_soap_bridge.py
"""
"""
import os
import tempfile
import unittest
import jiracli
from jiracli.bridge import JiraSoapBridge
from jiracli.utils import Config
from .common_bridge_cases import BridgeTests, jiravcr
class SoapBridgeTests(unittest.TestCase, BridgeTests):
def setUp(self):
tmp_config = tempfile.mktemp()
self.config = Config(tmp_config)
jiracli.utils.CONFIG_FILE = tmp_config
self.cache_dir = tempfile.mkdtemp()
jiracli.cache.CACHE_DIR = self.cache_dir
self.config.username = "testuser"
self.config.password = "<PASSWORD>"
self.vcr_directory = "fixtures/soap"
with jiravcr.use_cassette(os.path.join(self.vcr_directory, "login.yaml")):
self.bridge = JiraSoapBridge("https://indydevs.atlassian.net",
self.config)
self.bridge.login(self.config.username, self.config.password)
|
grr/client/grr_response_client/unprivileged/windows/sandbox.py | khanhgithead/grr | 4,238 | 11093851 | <reponame>khanhgithead/grr
#!/usr/bin/env python
"""Interface to Windows Sandboxing for the `process` module.
Adds support for global sandbox initialization and handles checks for whether
sandboxing is supported by the current platform.
"""
import logging
import platform
from typing import Iterable, Optional
_sandbox_name: Optional[str] = None
class Error(Exception):
pass
def InitSandbox(name: str, paths_read_only: Iterable[str]) -> None:
"""Initializes a global sandbox.
Args:
name: The unique name of the Sandbox. Windows will create unique state
(directory tree, regisry tree and a SID) based on the name.
paths_read_only: Lists of paths which will be shared in read-only and
execute mode with the Sandbox SID.
Raises:
Error: if the global sandbox has been already initialized.
"""
if int(platform.release()) < 8:
logging.info(
"Skipping sandbox initialization. Unsupported platform release: %s.",
platform.release())
return
global _sandbox_name
if _sandbox_name is not None:
raise Error(
f"Sandbox has been already initialized with name {_sandbox_name}.")
logging.info("Initializing sandbox. Name: %s. Read-only paths: %s.", name,
paths_read_only)
# pylint:disable=g-import-not-at-top
from grr_response_client.unprivileged.windows import sandbox_lib
# pylint:enable=g-import-not-at-top
sandbox_lib.InitSandbox(name, paths_read_only)
_sandbox_name = name
class Sandbox:
"""Represents an optional, app container based sandbox.
Provides necessary data to be used for the win32 API `CreateProcesss` for
running a process in the context of a sandbox.
"""
@property
def sid_string(self) -> Optional[str]:
"""App container SID represented as string.
`None` if an App container is not available.
"""
return None
@property
def desktop_name(self) -> Optional[str]:
"""Full alternate desktop name of the App container.
`None` if an App container is not available.
"""
return None
def __enter__(self) -> "Sandbox":
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
pass
class NullSandbox(Sandbox):
"""A Sandbox implementation performing no sandboxing."""
class AppContainerSandbox(Sandbox):
"""A `Sandbox` implementation performing AppContainer based sandboxing.
See
https://docs.microsoft.com/en-us/windows/win32/secauthz/appcontainer-for-legacy-applications-
for details on AppContainers.
"""
def __init__(self, name: str) -> None:
"""Constructor.
Args:
name: Name of the app container.
"""
# pylint:disable=g-import-not-at-top
from grr_response_client.unprivileged.windows import sandbox_lib
# pylint:enable=g-import-not-at-top
self._sandbox = sandbox_lib.Sandbox(name)
@property
def sid_string(self) -> Optional[str]:
return self._sandbox.sid_string
@property
def desktop_name(self) -> Optional[str]:
return self._sandbox.desktop_name
def __enter__(self) -> "AppContainerSandbox":
self._sandbox.Open()
logging.info("Entering sandbox. SID: %s. Desktop: %s.", self.sid_string,
self.desktop_name)
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self._sandbox.Close()
def CreateSandbox() -> Sandbox:
"""Creates an app container based sandbox.
Returns:
A null `Sandbox` implementation in case of sandboxing is not available
or if `InitSandbox` has not been called.
"""
global _sandbox_name
if _sandbox_name is None:
return NullSandbox()
else:
return AppContainerSandbox(_sandbox_name)
def IsSandboxInitialized() -> bool:
"""Returns `True` if a global sandbox has been successfully initialized."""
return _sandbox_name is not None
|
glue/plugins/dendro_viewer/state.py | HPLegion/glue | 550 | 11093881 | # -*- coding: utf-8 -*-
from glue.core import BaseData
from glue.viewers.matplotlib.state import (MatplotlibDataViewerState,
MatplotlibLayerState,
DeferredDrawCallbackProperty as DDCProperty,
DeferredDrawSelectionCallbackProperty as DDSCProperty)
from glue.core.data_combo_helper import ComponentIDComboHelper
from echo import keep_in_sync
from .dendro_helpers import dendrogram_layout
__all__ = ['DendrogramViewerState', 'DendrogramLayerState']
class Layout(object):
def __init__(self, x, y):
self.x = x
self.y = y
@property
def xy(self):
return self.x, self.y
class DendrogramViewerState(MatplotlibDataViewerState):
"""
A state class that includes all the attributes for a dendrogram viewer.
"""
height_att = DDSCProperty()
parent_att = DDSCProperty()
order_att = DDSCProperty()
y_log = DDCProperty(False)
select_substruct = DDCProperty(True)
reference_data = DDCProperty()
_layout = DDCProperty()
def __init__(self, **kwargs):
super(DendrogramViewerState, self).__init__()
self.add_callback('layers', self._layers_changed)
self.height_att_helper = ComponentIDComboHelper(self, 'height_att')
self.parent_att_helper = ComponentIDComboHelper(self, 'parent_att')
self.order_att_helper = ComponentIDComboHelper(self, 'order_att')
self.add_callback('height_att', self._update_layout)
self.add_callback('parent_att', self._update_layout)
self.add_callback('order_att', self._update_layout)
self.add_callback('reference_data', self._on_reference_data_change)
self.update_from_dict(kwargs)
def _on_reference_data_change(self, data):
if self.reference_data is None:
return
self.height_att = self.reference_data.find_component_id('height')
self.parent_att = self.reference_data.find_component_id('parent')
self.order_att = self.height_att
def _update_layout(self, att):
if self.height_att is None or self.parent_att is None or self.order_att is None or self.reference_data is None:
self._layout = None
else:
height = self.reference_data[self.height_att].ravel()
parent = self.reference_data[self.parent_att].astype(int).ravel()
order = self.reference_data[self.order_att].ravel()
x, y = dendrogram_layout(parent, height, order)
self._layout = Layout(x, y)
def _layers_changed(self, *args):
layers_data = self.layers_data
layers_data_cache = getattr(self, '_layers_data_cache', [])
if layers_data == layers_data_cache:
return
self.height_att_helper.set_multiple_data(layers_data)
self.parent_att_helper.set_multiple_data(layers_data)
self.order_att_helper.set_multiple_data(layers_data)
for layer in layers_data:
if isinstance(layer, BaseData):
self.reference_data = layer
break
self._layers_data_cache = layers_data
class DendrogramLayerState(MatplotlibLayerState):
"""
A state class that includes all the attributes for layers in a dendrogram plot.
"""
linewidth = DDCProperty(1, docstring="The line width")
def __init__(self, viewer_state=None, **kwargs):
super(DendrogramLayerState, self).__init__(viewer_state=viewer_state, **kwargs)
self.linewidth = self.layer.style.linewidth
self._sync_linewidth = keep_in_sync(self, 'linewidth', self.layer.style, 'linewidth')
|
tests/test_fetch_cord_computer.py | TabulateJarl8/FetchCord | 286 | 11093887 | import unittest
from fetch_cord.computer.Computer import Computer
def make_orderer():
order = {}
def ordered(f):
order[f.__name__] = len(order)
return f
def compare(a, b):
return [1, -1][order[a] < order[b]]
return ordered, compare
ordered, compare = make_orderer()
unittest.defaultTestLoader.sortTestMethodsUsing = compare
class TestFetchCordComputer(unittest.TestCase):
"""Test class for Computer module"""
pc: Computer
@classmethod
def setUpClass(cls):
"""Setup the Computer class and load the neofetch info"""
cls.pc = Computer()
@classmethod
def tearDownClass(self):
"""Called once at the end"""
pass
@ordered
def test_detected_os(self):
"""Test the detected os result"""
print("Detected OS : " + self.pc.os)
@ordered
def test_detected_neofetch(self):
"""Test detected neofetch"""
print("Detected Neofetch : ", end="")
if self.pc.neofetch:
print("neofetch")
elif self.pc.neofetchwin:
print("neofetch-win")
else:
print("None")
@ordered
def test_detected_cpu(self):
"""Test detected CPU"""
if len(self.pc.cpu) == 0 or self.pc.cpu == ["N/A"]:
print("No CPU detected !")
else:
for cpu in self.pc.cpu:
print("Detected CPU : " + cpu.model)
print("Detected CPU Temp : " + str(cpu.temp) + "°c")
@ordered
def test_detected_gpu(self):
"""Test detected GPU"""
if len(self.pc.gpu) == 0 or self.pc.gpu == ["N/A"]:
print("No GPU detected !")
else:
for gpu in self.pc.gpu:
print("Detected GPU : " + gpu.model)
@ordered
def test_detected_disks(self):
"""Test detected disks"""
if len(self.pc.disks) == 0 or self.pc.disks == ["N/A"]:
print("No Disk detected !")
else:
for disk in self.pc.disks:
print("Detected Disk : " + disk)
@ordered
def test_detected_memory(self):
"""Test detected memory"""
print("Detected Memory : " + "\t".join(self.pc.memory))
@ordered
def test_detected_osinfo(self):
"""Test detected OS info"""
print("Detected OS info : " + "\t".join(self.pc.osinfo))
@ordered
def test_detected_motherboard(self):
"""Test detected motherboard"""
print("Detected Motherboard : " + "\t".join(self.pc.motherboard))
@ordered
def test_detected_host(self):
"""Test detected host"""
print("Detected host : " + "\t".join(self.pc.host))
@ordered
def test_detected_resolution(self):
"""Test detected resolution"""
print("Detected resolution : " + "\t".join(self.pc.resolution))
@ordered
def test_detected_theme(self):
"""Test detected theme"""
print("Detected theme : " + "\t".join(self.pc.theme))
@ordered
def test_detected_packages(self):
"""Test detected packages"""
print("Detected packages : " + "\t".join(self.pc.packages))
@ordered
def test_detected_shell(self):
"""Test detected shell"""
print("Detected shell : " + "\t".join(self.pc.shell))
@ordered
def test_detected_kernel(self):
"""Test detected kernel"""
print("Detected kernel : " + "\t".join(self.pc.kernel))
@ordered
def test_detected_terminal(self):
"""Test detected terminal"""
print("Detected terminal : " + "\t".join(self.pc.terminal))
@ordered
def test_detected_font(self):
"""Test detected font"""
print("Detected font : " + "\t".join(self.pc.font))
@ordered
def test_detected_de(self):
"""Test detected de"""
print("Detected de : " + "\t".join(self.pc.de))
@ordered
def test_detected_wm(self):
"""Test detected wm"""
print("Detected wm : " + "\t".join(self.pc.wm))
if __name__ == "__main__":
unittest.main()
|
api/core/agent.py | zaxtyson/anime-api | 543 | 11093901 | <reponame>zaxtyson/anime-api
from typing import Callable, Coroutine
from api.config import Config
from api.core.anime import *
from api.core.cache import CacheDB
from api.core.danmaku import *
from api.core.proxy import AnimeProxy
from api.core.scheduler import Scheduler
from api.iptv.iptv import TVSource, get_sources
from api.update.bangumi import Bangumi
class Agent:
"""
代理人, 代理响应路由的请求
从调度器获取、过滤、缓存数据, 发送给路由
"""
def __init__(self):
self._scheduler = Scheduler()
self._bangumi = Bangumi()
self._config = Config()
# Memory Database for cache
self._anime_db = CacheDB()
self._danmaku_db = CacheDB()
self._proxy_db = CacheDB()
self._others_db = CacheDB()
def cache_clear(self) -> float:
"""清空缓存, 返回释放的内存(KB)"""
mem_free = 0
mem_free += self._anime_db.clear()
mem_free += self._danmaku_db.clear()
mem_free += self._proxy_db.clear()
mem_free += self._others_db.clear()
return mem_free
def get_global_config(self):
"""获取全局配置"""
return self._config.all_configs
def change_module_state(self, module: str, enable: bool):
"""设置模块启用状态"""
return self._scheduler.change_module_state(module, enable)
async def get_bangumi_updates(self):
"""获取番组表信息"""
bangumi = self._others_db.fetch("bangumi")
if not bangumi: # 缓存起来
bangumi = await self._bangumi.get_bangumi_updates()
self._others_db.store(bangumi, "bangumi")
return bangumi
def get_iptv_sources(self) -> List[TVSource]:
"""获取 IPTV 源列表"""
return get_sources()
async def get_anime_metas(
self,
keyword: str,
*,
callback: Callable[[AnimeMeta], None] = None,
co_callback: Callable[[AnimeMeta], Coroutine] = None
) -> None:
"""搜索番剧, 返回摘要信息, 过滤相似度低的数据"""
# 番剧搜索不缓存, 异步推送
return await self._scheduler.search_anime(keyword, callback=callback, co_callback=co_callback)
async def get_danmaku_metas(
self,
keyword: str,
*,
callback: Callable[[DanmakuMeta], None] = None,
co_callback: Callable[[DanmakuMeta], Coroutine] = None
) -> None:
"""搜索弹幕库, 返回摘要信息, 过滤相似度低的数据"""
# TODO: Implement data filter
# 番剧搜索结果是相似的, 对应的弹幕搜索结果相对固定, 缓存备用
if metas := self._danmaku_db.fetch(keyword):
if callback is not None:
for meta in metas:
callback(meta)
return
if co_callback is not None:
for meta in metas:
await co_callback(meta)
return
# 没有缓存, 搜索一次
metas = []
def _callback(_meta):
metas.append(_meta) # 缓存一份
callback(_meta)
async def _co_callback(_meta):
metas.append(_meta)
await co_callback(_meta)
if callback is not None:
await self._scheduler.search_danmaku(keyword, callback=_callback)
elif co_callback is not None:
await self._scheduler.search_danmaku(keyword, co_callback=_co_callback)
if metas:
self._danmaku_db.store(metas, keyword)
async def get_anime_detail(self, token: str) -> Optional[AnimeDetail]:
"""获取番剧详情信息, 如果有缓存, 使用缓存的值"""
detail: AnimeDetail = self._anime_db.fetch(token)
if detail is not None:
logger.info(f"Using cached {detail}")
return detail
# 没有缓存, 通过 token 构建 AnimeMeta 对象, 解析一次
meta = AnimeMeta.build_from(token)
logger.debug(f"Build AnimeMeta from token: {meta.module} | {meta.detail_url}")
detail = await self._scheduler.parse_anime_detail(meta)
if not detail or detail.is_empty(): # 没解析出来或者解析出来是空信息
logger.error(f"Parse anime detail info failed")
return None
self._anime_db.store(detail, token) # 解析成功, 缓存起来
return detail
async def get_anime_real_url(self, token: str, playlist: int, episode: int) -> AnimeInfo:
"""获取资源直链, 如果存在未过期的缓存, 使用缓存的值, 否则重新解析"""
url_token = f"{token}|{playlist}|{episode}"
url: AnimeInfo = self._anime_db.fetch(url_token)
if url and url.is_available(): # 存在缓存且未过期
logger.info(f"Using cached real url: {url}")
return url
# 没有发现缓存或者缓存的直链过期, 解析一次
detail = await self.get_anime_detail(token)
if detail is not None:
anime: Anime = detail.get_anime(int(playlist), int(episode))
if anime is not None:
url = await self._scheduler.parse_anime_real_url(anime)
if url.is_available():
self._anime_db.store(url, url_token)
return url
# 其它各种情况, 解析失败
return AnimeInfo()
async def get_anime_proxy(self, token: str, playlist: int, episode: int) -> Optional[AnimeProxy]:
"""获取视频数据流代理器对象"""
proxy_token = f"{token}|{playlist}|{episode}"
proxy: AnimeProxy = self._proxy_db.fetch(proxy_token)
if proxy and proxy.is_available(): # 缓存的 proxy 对象可用
return proxy
url = await self.get_anime_real_url(token, int(playlist), int(episode))
if not url.is_available():
return
meta = AnimeMeta.build_from(token)
proxy_cls = self._scheduler.get_anime_proxy_class(meta)
proxy: AnimeProxy = proxy_cls(url) # 重新构建一个
self._proxy_db.store(proxy, proxy_token)
return proxy
async def get_danmaku_detail(self, token: str) -> DanmakuDetail:
"""获取弹幕库详情信息, 如果存在缓存, 使用缓存的值"""
detail: DanmakuDetail = self._danmaku_db.fetch(token)
if detail is not None:
logger.info(f"Using cached {detail}")
return detail
# 没有缓存, 通过 token 构建 AnimeMeta 对象, 解析一次
meta = DanmakuMeta.build_from(token)
logger.debug(f"Build DanmakuMeta from token: {meta.module} | {meta.play_url}")
detail = await self._scheduler.parse_danmaku_detail(meta)
if detail.is_empty(): # 没解析出来或者解析出来是空信息
logger.error(f"Parse anime detail info failed")
return detail
self._danmaku_db.store(detail, token) # 解析成功, 缓存起来
return detail
async def get_danmaku_data(self, token: str, episode: int) -> DanmakuData:
"""获取弹幕数据, 如果有缓存, 使用缓存的值"""
danmaku_token = f"{token}|{episode}"
data_token = f"{danmaku_token}|data"
data: DanmakuData = self._danmaku_db.fetch(data_token)
if data is not None:
logger.info(f"Using cached danmaku data: {data}")
return data
detail: DanmakuDetail = await self.get_danmaku_detail(token)
if not detail.is_empty():
danmaku = detail.get_danmaku(int(episode))
if danmaku is not None:
data = await self._scheduler.parse_danmaku_data(danmaku)
if not data.is_empty(): # 如果有数据就缓存起来
self._danmaku_db.store(data, data_token)
return data
return DanmakuData()
|
mmdet3d/core/utils/__init__.py | Guangyun-Xu/mmdetection3d | 2,216 | 11093903 | # Copyright (c) OpenMMLab. All rights reserved.
from .gaussian import draw_heatmap_gaussian, gaussian_2d, gaussian_radius
__all__ = ['gaussian_2d', 'gaussian_radius', 'draw_heatmap_gaussian']
|
HLTrigger/Configuration/python/extend_argparse.py | ckamtsikis/cmssw | 852 | 11093907 | import argparse as _argparse
import textwrap as _textwrap
# argparse's formatters remove newlines from comand descriptions, so we define a new one
class HelpFormatterRespectNewlines(_argparse.HelpFormatter):
"""Help message formatter which retains line breaks in argument descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
lines = []
for line in text.splitlines():
line = self._whitespace_matcher.sub(' ', line).strip()
lines.extend( _textwrap.wrap(line, width) )
return lines
# argparse's formatters are not really able to discover the terminale size, so we override them
def FixedWidthFormatter(formatter, width):
"""Adaptor for argparse formatters using an explicit fixed width
"""
def f(*args, **keywords):
# add or replace the "width" parameter
keywords['width'] = width
return formatter(*args, **keywords)
return f
|
bindings/python/cntk/ops/tests/transpose_test.py | shyamalschandra/CNTK | 17,702 | 11093944 | # ==============================================================================
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
from cntk import transpose
def test_transpose():
"""
Test for transpose()
:return: Nothing
"""
repeat_for = 5
for repeat in range(repeat_for):
for i in range(1, 5):
permutation = np.random.permutation(i + 1)
permutation = [int(p) for p in permutation]
shape = [np.random.randint(2, 5) for _ in range(i + 1)]
entries = np.product(shape)
data = np.arange(entries)
data.shape = shape
np_transposed = np.transpose(np.copy(data), np.copy(permutation))
by_transposeCNTK = transpose(np.ascontiguousarray(data), permutation).eval()
assert np.alltrue(np_transposed == by_transposeCNTK)
if __name__ == "__main__":
test_transpose()
|
elfi/examples/bignk.py | diadochos/elfi | 166 | 11093992 | <gh_stars>100-1000
"""Implementation of the bivariate g-and-k example model."""
from functools import partial
import numpy as np
import scipy.stats as ss
import elfi
from elfi.examples.gnk import euclidean_multiss, ss_robust
def BiGNK(A1, A2, B1, B2, g1, g2, k1, k2, rho, c=.8, n_obs=150, batch_size=1, random_state=None):
"""Sample the bivariate g-and-k distribution.
References
----------
[1] <NAME>., & <NAME>. (2011).
Likelihood-free Bayesian estimation of multivariate quantile distributions.
Computational Statistics & Data Analysis, 55(9), 2541-2556.
[2] <NAME>., <NAME>., & <NAME>. (2009).
Bayesian estimation of quantile distributions.
Statistics and Computing, 19(2), 189-201.
The quantile function of g-and-k distribution is defined as follows:
Q_{gnk} = A + B * (1 + c * (1 - exp(-g * z(p)) / 1 + exp(-g * z(p))))
* (1 + z(p)^2)^k * z(p), where
z(p) is the p-th standard normal quantile.
To sample from the g-and-k distribution, draw z(p) ~ N(0, 1) and evaluate Q_{gnk}.
Parameters
----------
A1 : float or array_like
Location parameter (the 1st dimension).
A2 : float or array_like
Location parameter (the 2nd dimension).
B1 : float or array_like
Scale parameter (the 1st dimension).
B2 : float or array_like
Scale parameter (the 2nd dimension).
g1 : float or array_like
Skewness parameter (the 1st dimension).
g2 : float or array_like
Skewness parameter (the 2nd dimension).
k1 : float or array_like
Kurtosis parameter (the 1st dimension).
k2 : float or array_like
Kurtosis parameter (the 2nd dimension).
rho : float or array_like
Parameters' covariance.
c : float, optional
Overall asymmetry parameter, by default fixed to 0.8 as in Allingham et al. (2009).
n_obs : int, optional
batch_size : int, optional
random_state : np.random.RandomState, optional
Returns
-------
array_like
Yielded points.
"""
# Transforming the arrays' shape to be compatible with batching.
A1 = np.asanyarray(A1).reshape((-1, 1))
A2 = np.asanyarray(A2).reshape((-1, 1))
B1 = np.asanyarray(B1).reshape((-1, 1))
B2 = np.asanyarray(B2).reshape((-1, 1))
g1 = np.asanyarray(g1).reshape((-1, 1))
g2 = np.asanyarray(g2).reshape((-1, 1))
k1 = np.asanyarray(k1).reshape((-1, 1, 1))
k2 = np.asanyarray(k2).reshape((-1, 1, 1))
rho = np.asanyarray(rho).reshape((-1, 1))
# Merging the multi-dimensional parameters.
A = np.hstack((A1, A2))
B = np.hstack((B1, B2))
g = np.hstack((g1, g2))
k = np.hstack((k1, k2))
# Obtaining z(p) ~ N(0, 1).
z_batches = []
for i in range(batch_size):
# Initialising a separate covariance matrix for each batch.
matrix_cov = np.array([[1, rho[i]], [rho[i], 1]])
z_batch = ss.multivariate_normal.rvs(cov=matrix_cov, size=n_obs, random_state=random_state)
z_batches.append(z_batch)
z = np.array(z_batches)
# Obtaining the first bracket term of the quantile function Q_{gnk}.
gdotz = np.einsum('ik,ijk->ijk', g, z)
term_exp = (1 - np.exp(-gdotz)) / (1 + np.exp(-gdotz))
term_first = np.einsum('ik,ijk->ijk', B, (1 + c * (term_exp)))
# Obtaining the second bracket term, of the quantile function Q_{gnk}.
term_second_unraised = 1 + np.power(z, 2)
k = np.repeat(k, n_obs, axis=2)
k = np.swapaxes(k, 1, 2)
term_second = np.power(term_second_unraised, k)
# Evaluating the quantile function Q_{gnk}.
term_product = term_first * term_second * z
term_product_misaligned = np.swapaxes(term_product, 1, 0)
y_misaligned = np.add(A, term_product_misaligned)
y_obs = np.swapaxes(y_misaligned, 1, 0)
return y_obs
def get_model(n_obs=150, true_params=None, seed=None):
"""Return an initialised bivariate g-and-k model.
Parameters
----------
n_obs : int, optional
Number of the observations.
true_params : array_like, optional
Parameters defining the model.
seed : np.random.RandomState, optional
Returns
-------
elfi.ElfiModel
"""
m = elfi.new_model()
# Initialising the parameters as in Drovandi & Pettitt (2011).
if true_params is None:
true_params = [3, 4, 1, 0.5, 1, 2, .5, .4, 0.6]
# Initialising the prior settings as in Drovandi & Pettitt (2011).
priors = []
priors.append(elfi.Prior('uniform', 0, 5, model=m, name='a1'))
priors.append(elfi.Prior('uniform', 0, 5, model=m, name='a2'))
priors.append(elfi.Prior('uniform', 0, 5, model=m, name='b1'))
priors.append(elfi.Prior('uniform', 0, 5, model=m, name='b2'))
priors.append(elfi.Prior('uniform', -5, 10, model=m, name='g1'))
priors.append(elfi.Prior('uniform', -5, 10, model=m, name='g2'))
priors.append(elfi.Prior('uniform', -.5, 5.5, model=m, name='k1'))
priors.append(elfi.Prior('uniform', -.5, 5.5, model=m, name='k2'))
EPS = np.finfo(float).eps
priors.append(elfi.Prior('uniform', -1 + EPS, 2 - 2 * EPS, model=m, name='rho'))
# Obtaining the observations.
y_obs = BiGNK(*true_params, n_obs=n_obs, random_state=np.random.RandomState(seed))
# Defining the simulator.
fn_simulator = partial(BiGNK, n_obs=n_obs)
elfi.Simulator(fn_simulator, *priors, observed=y_obs, name='BiGNK')
# Initialising the default summary statistics.
default_ss = elfi.Summary(ss_robust, m['BiGNK'], name='ss_robust')
# Using the customEuclidean distance function designed for
# the summary statistics of shape (batch_size, dim_ss, dim_ss_point).
elfi.Discrepancy(euclidean_multiss, default_ss, name='d')
return m
|
tests/lifecycles/test_real_invocation.py | tavaresrodrigo/kopf | 855 | 11093994 | <gh_stars>100-1000
import logging
import pytest
import kopf
from kopf._cogs.structs.bodies import Body
from kopf._cogs.structs.ephemera import Memo
from kopf._cogs.structs.patches import Patch
from kopf._core.actions.progression import State
from kopf._core.engines.indexing import OperatorIndexers
from kopf._core.intents.causes import ChangingCause, Reason
@pytest.mark.parametrize('lifecycle', [
kopf.lifecycles.all_at_once,
kopf.lifecycles.one_by_one,
kopf.lifecycles.randomized,
kopf.lifecycles.shuffled,
kopf.lifecycles.asap,
])
async def test_protocol_invocation(lifecycle, resource):
"""
To be sure that all kwargs are accepted properly.
Especially when the new kwargs are added or an invocation protocol changed.
"""
# The values are irrelevant, they can be anything.
state = State.from_scratch()
cause = ChangingCause(
logger=logging.getLogger('kopf.test.fake.logger'),
indices=OperatorIndexers().indices,
resource=resource,
patch=Patch(),
memo=Memo(),
body=Body({}),
initial=False,
reason=Reason.NOOP,
)
handlers = []
selected = lifecycle(handlers, state=state, **cause.kwargs)
assert isinstance(selected, (tuple, list))
assert len(selected) == 0
|
api/organisations/permissions/permissions.py | mevinbabuc/flagsmith | 1,259 | 11094025 | <gh_stars>1000+
from django.conf import settings
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import BasePermission
from organisations.models import Organisation
CREATE_PROJECT = "CREATE_PROJECT"
ORGANISATION_PERMISSIONS = (
(CREATE_PROJECT, "Allows the user to create projects in this organisation."),
)
class NestedOrganisationEntityPermission(BasePermission):
def has_permission(self, request, view):
organisation_pk = view.kwargs.get("organisation_pk")
if organisation_pk and request.user.is_admin(
Organisation.objects.get(pk=organisation_pk)
):
return True
raise PermissionDenied(
"User does not have sufficient privileges to perform this action"
)
def has_object_permission(self, request, view, obj):
organisation_id = view.kwargs.get("organisation_pk")
organisation = Organisation.objects.get(id=organisation_id)
return request.user.is_admin(organisation)
class OrganisationPermission(BasePermission):
def has_permission(self, request, view):
if view.action == "create" and settings.RESTRICT_ORG_CREATE_TO_SUPERUSERS:
return request.user.is_superuser
return True
def has_object_permission(self, request, view, obj):
if request.user.is_admin(obj) or (
view.action == "my_permissions" and obj in request.user.organisations.all()
):
return True
raise PermissionDenied(
"User does not have sufficient privileges to perform this action"
)
class OrganisationUsersPermission(BasePermission):
def has_permission(self, request, view):
organisation_id = view.kwargs.get("organisation_pk")
organisation = Organisation.objects.get(id=organisation_id)
if request.user.is_admin(organisation):
return True
if view.action == "list" and request.user.belongs_to(organisation.id):
return True
return False
def has_object_permission(self, request, view, obj):
organisation_id = view.kwargs.get("organisation_pk")
organisation = Organisation.objects.get(id=organisation_id)
if request.user.is_admin(organisation):
return True
return False
class UserPermissionGroupPermission(BasePermission):
def has_permission(self, request, view):
organisation_pk = view.kwargs.get("organisation_pk")
if organisation_pk and request.user.is_admin(
Organisation.objects.get(pk=organisation_pk)
):
return True
if view.action == "list" and request.user.belongs_to(int(organisation_pk)):
return True
return False
def has_object_permission(self, request, view, obj):
organisation_id = view.kwargs.get("organisation_pk")
organisation = Organisation.objects.get(id=organisation_id)
if request.user.is_admin(organisation):
return True
return False
|
03-adversarial-examples/dataset.py | PeiqinSun/tf-tutorials | 184 | 11094029 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import cv2
import numpy as np
import pickle
from common import config
class Dataset():
dataset_path = '../../cifar10-dataset/'
def __init__(self, dataset_name):
self.ds_name = dataset_name
self.minibatch_size = config.minibatch_size
self.rng = np.random
self.instances = config.nr_instances
if self.ds_name == 'train':
self.files = [self.dataset_path + 'data_batch_{}'.format(i + 1) for i in range(5)]
else:
self.files = [self.dataset_path + 'test_batch']
def load(self):
datas_list, labels_list = [], []
for file in self.files:
with open(file, 'rb') as f:
samples = pickle.load(f, encoding = 'bytes')
datas_list.extend(samples[b'data'])
labels_list.extend(samples[b'labels'])
self.samples_mat = {'X': datas_list, 'Y': labels_list}
return self
@property
def total_instances(self):
return self.instances
@property
def minibatches(self):
return self.instances // config.minibatch_size
def instance_generator(self):
for i in range(self.instances):
img_r = self.samples_mat['X'][i][:1024].reshape(config.image_shape[0], config.image_shape[1], 1)
img_g = self.samples_mat['X'][i][1024:2048].reshape(config.image_shape[0], config.image_shape[1], 1)
img_b = self.samples_mat['X'][i][2048:].reshape(config.image_shape[0], config.image_shape[1], 1)
img = np.concatenate((img_r, img_g, img_b), axis = 2)
label = self.samples_mat['Y'][i]
yield img.astype(np.float32), np.array(label, dtype=np.int32)
if __name__ == "__main__":
ds = Dataset('train')
ds = ds.load()
gen = ds.instance_generator()
imggrid = []
while True:
for i in range(25):
img, label = next(gen)
img = cv2.resize(img, (96, 96))
cv2.putText(img, str(label), (0, config.image_shape[0]), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 255, 0), 2)
imggrid.append(img)
imggrid = np.array(imggrid).reshape((5, 5, img.shape[0], img.shape[1], img.shape[2]))
imggrid = imggrid.transpose((0, 2, 1, 3, 4)).reshape((5*img.shape[0], 5*img.shape[1], 3))
cv2.imshow('', imggrid.astype('uint8'))
c = chr(cv2.waitKey(0) & 0xff)
if c == 'q':
exit()
imggrid = []
|
src/datasets/packaged_modules/text/text.py | rpatil524/datasets | 3,395 | 11094040 | <gh_stars>1000+
from dataclasses import dataclass
from io import StringIO
from typing import Optional
import pyarrow as pa
import datasets
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class TextConfig(datasets.BuilderConfig):
"""BuilderConfig for text files."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
chunksize: int = 10 << 20 # 10MB
keep_linebreaks: bool = False
sample_by: str = "line"
class Text(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = TextConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].
If str or List[str], then the dataset returns only the 'train' split.
If dict, then keys should be from the `datasets.Split` enum.
"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
data_files = dl_manager.download_and_extract(self.config.data_files)
if isinstance(data_files, (str, list, tuple)):
files = data_files
if isinstance(files, str):
files = [files]
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": dl_manager.iter_files(files)})
]
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": dl_manager.iter_files(files)}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa_table.cast(schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
else:
return pa_table.cast(pa.schema({"text": pa.string()}))
def _generate_tables(self, files):
pa_table_names = list(self.config.features) if self.config.features is not None else ["text"]
for file_idx, file in enumerate(files):
# open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
with open(file, encoding=self.config.encoding) as f:
if self.config.sample_by == "line":
batch_idx = 0
while True:
batch = f.read(self.config.chunksize)
if not batch:
break
batch += f.readline() # finish current line
# StringIO.readlines, by default splits only on "\n" (and keeps line breaks)
batch = StringIO(batch).readlines()
if not self.config.keep_linebreaks:
batch = [line.rstrip("\n") for line in batch]
pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
elif self.config.sample_by == "paragraph":
batch_idx = 0
batch = ""
while True:
batch += f.read(self.config.chunksize)
if not batch:
break
batch += f.readline() # finish current line
batch = batch.split("\n\n")
pa_table = pa.Table.from_arrays(
[pa.array([example for example in batch[:-1] if example])], names=pa_table_names
)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
batch = batch[-1]
elif self.config.sample_by == "document":
text = f.read()
pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names)
yield file_idx, self._cast_table(pa_table)
|
open/users/migrations/0006_auto_20200812_0936.py | lawrendran/open | 105 | 11094059 | # Generated by Django 2.2.13 on 2020-08-12 13:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("users", "0005_add_created_modified_to_user"),
]
operations = [
migrations.AlterModelOptions(name="user", options={"ordering": ["-id"]},),
]
|
tests/unit/test_merchant_account.py | futureironman/braintree_python | 182 | 11094064 | <reponame>futureironman/braintree_python
from tests.test_helper import *
class TestMerchantAccount(unittest.TestCase):
def test_create_new_merchant_account_with_all_params(self):
params = {
"id": "sub_merchant_account",
"status": "active",
"master_merchant_account": {
"id": "master_merchant_account",
"status": "active"
},
"individual": {
"first_name": "John",
"last_name": "Doe",
"email": "<EMAIL>",
"date_of_birth": "1970-01-01",
"phone": "3125551234",
"ssn_last_4": "6789",
"address": {
"street_address": "123 Fake St",
"locality": "Chicago",
"region": "IL",
"postal_code": "60622",
}
},
"business": {
"dba_name": "<NAME>",
"tax_id": "123456789",
},
"funding": {
"account_number_last_4": "8798",
"routing_number": "071000013",
"descriptor": "Joes Bloggs MI",
}
}
merchant_account = MerchantAccount(None, params)
self.assertEqual(merchant_account.status, "active")
self.assertEqual(merchant_account.id, "sub_merchant_account")
self.assertEqual(merchant_account.master_merchant_account.id, "master_merchant_account")
self.assertEqual(merchant_account.master_merchant_account.status, "active")
self.assertEqual(merchant_account.individual_details.first_name, "John")
self.assertEqual(merchant_account.individual_details.last_name, "Doe")
self.assertEqual(merchant_account.individual_details.email, "<EMAIL>")
self.assertEqual(merchant_account.individual_details.date_of_birth, "1970-01-01")
self.assertEqual(merchant_account.individual_details.phone, "3125551234")
self.assertEqual(merchant_account.individual_details.ssn_last_4, "6789")
self.assertEqual(merchant_account.individual_details.address_details.street_address, "123 Fake St")
self.assertEqual(merchant_account.individual_details.address_details.locality, "Chicago")
self.assertEqual(merchant_account.individual_details.address_details.region, "IL")
self.assertEqual(merchant_account.individual_details.address_details.postal_code, "60622")
self.assertEqual(merchant_account.business_details.dba_name, "<NAME>")
self.assertEqual(merchant_account.business_details.tax_id, "123456789")
self.assertEqual(merchant_account.funding_details.account_number_last_4, "8798")
self.assertEqual(merchant_account.funding_details.routing_number, "071000013")
self.assertEqual(merchant_account.funding_details.descriptor, "<NAME> MI")
|
examples/plot_5_unicode_everywhere.py | andriyor/sphinx-gallery | 309 | 11094072 | # -*- coding: utf-8 -*-
"""
Using Unicode everywhere 🤗
===========================
This example demonstrates how to include non-ASCII characters, mostly emoji 🎉
to stress test the build and test environments that parse the example files.
"""
from __future__ import unicode_literals
# 🎉 👍
# Code source: <NAME>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 20
plt.rcParams["font.monospace"] = ["DejaVu Sans Mono"]
plt.rcParams["font.family"] = "monospace"
plt.figure()
x = np.random.randn(100) * 2 + 1
y = np.random.randn(100) * 6 + 3
s = np.random.rand(*x.shape) * 800 + 500
plt.scatter(x, y, s, marker=r'$\oint$')
x = np.random.randn(60) * 7 - 4
y = np.random.randn(60) * 3 - 2
s = s[:x.size]
plt.scatter(x, y, s, alpha=0.5, c='g', marker=r'$\clubsuit$')
plt.xlabel('⇒')
plt.ylabel('⇒')
plt.title('♲' * 10)
print('Std out capture 😎')
# To avoid matplotlib text output
plt.show()
# %%
# Debug fonts
print(plt.rcParams)
|
ztag/annotations/common_smtp.py | justinbastress/ztag | 107 | 11094089 | from ztag.annotation import *
class CommonSMTP(Annotation):
protocol = protocols.SMTP
subprotocol = protocols.SMTP.STARTTLS
port = None
def process(self, obj, meta):
s_banner = obj["banner"][4:]
if "Postfix" in s_banner:
meta.local_metadata.product = "Postfix"
return meta
elif "Sendmail" in s_banner:
meta.local_metadata.product = "Sendmail"
return meta
elif "Exim" in s_banner:
meta.local_metadata.product = "Exim"
return meta
elif "gsmtp" in s_banner:
meta.local_metadata.manufacturer = "Google"
meta.local_metadata.product = "SMTP"
return meta
|
src/pymap3d/vallado.py | scivision/pymap3d | 108 | 11094102 | """
converts right ascension, declination to azimuth, elevation and vice versa.
Normally do this via AstroPy.
These functions are fallbacks for those wihtout AstroPy.
Michael Hirsch implementation of algorithms from <NAME>
"""
from __future__ import annotations
from datetime import datetime
from .mathfun import sin, cos, degrees, radians, asin, atan2
from .sidereal import datetime2sidereal
__all__ = ["azel2radec", "radec2azel"]
def azel2radec(
az_deg: float,
el_deg: float,
lat_deg: float,
lon_deg: float,
time: datetime,
) -> tuple[float, float]:
"""
converts azimuth, elevation to right ascension, declination
Parameters
----------
az_deg : float
azimuth (clockwise) to point [degrees]
el_deg : float
elevation above horizon to point [degrees]
lat_deg : float
observer WGS84 latitude [degrees]
lon_deg : float
observer WGS84 longitude [degrees]
time : datetime.datetime
time of observation
Results
-------
ra_deg : float
right ascension to target [degrees]
dec_deg : float
declination of target [degrees]
from D.Vallado Fundamentals of Astrodynamics and Applications
p.258-259
"""
if abs(lat_deg) > 90:
raise ValueError("-90 <= lat <= 90")
az = radians(az_deg)
el = radians(el_deg)
lat = radians(lat_deg)
lon = radians(lon_deg)
# %% Vallado "algorithm 28" p 268
dec = asin(sin(el) * sin(lat) + cos(el) * cos(lat) * cos(az))
lha = atan2(
-(sin(az) * cos(el)) / cos(dec), (sin(el) - sin(lat) * sin(dec)) / (cos(dec) * cos(lat))
)
lst = datetime2sidereal(time, lon) # lon, ra in RADIANS
""" by definition right ascension [0, 360) degrees """
return degrees(lst - lha) % 360, degrees(dec)
def radec2azel(
ra_deg: float,
dec_deg: float,
lat_deg: float,
lon_deg: float,
time: datetime,
) -> tuple[float, float]:
"""
converts right ascension, declination to azimuth, elevation
Parameters
----------
ra_deg : float
right ascension to target [degrees]
dec_deg : float
declination to target [degrees]
lat_deg : float
observer WGS84 latitude [degrees]
lon_deg : float
observer WGS84 longitude [degrees]
time : datetime.datetime
time of observation
Results
-------
az_deg : float
azimuth clockwise from north to point [degrees]
el_deg : float
elevation above horizon to point [degrees]
from <NAME> "Fundamentals of Astrodynamics and Applications "
4th Edition Ch. 4.4 pg. 266-268
"""
if abs(lat_deg) > 90:
raise ValueError("-90 <= lat <= 90")
ra = radians(ra_deg)
dec = radians(dec_deg)
lat = radians(lat_deg)
lon = radians(lon_deg)
lst = datetime2sidereal(time, lon) # RADIANS
# %% Eq. 4-11 p. 267 LOCAL HOUR ANGLE
lha = lst - ra
# %% #Eq. 4-12 p. 267
el = asin(sin(lat) * sin(dec) + cos(lat) * cos(dec) * cos(lha))
# %% combine Eq. 4-13 and 4-14 p. 268
az = atan2(
-sin(lha) * cos(dec) / cos(el), (sin(dec) - sin(el) * sin(lat)) / (cos(el) * cos(lat))
)
return degrees(az) % 360.0, degrees(el)
|
aliyun-python-sdk-alb/aliyunsdkalb/request/v20200616/ListLoadBalancersRequest.py | leafcoder/aliyun-openapi-python-sdk | 1,001 | 11094121 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalb.endpoint import endpoint_data
class ListLoadBalancersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alb', '2020-06-16', 'ListLoadBalancers','alb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_LoadBalancerNames(self): # Array
return self.get_query_params().get('LoadBalancerNames')
def set_LoadBalancerNames(self, LoadBalancerNames): # Array
for index1, value1 in enumerate(LoadBalancerNames):
self.add_query_param('LoadBalancerNames.' + str(index1 + 1) + '.LoadBalancerNames', value1)
def get_LoadBalancerIds(self): # Array
return self.get_query_params().get('LoadBalancerIds')
def set_LoadBalancerIds(self, LoadBalancerIds): # Array
for index1, value1 in enumerate(LoadBalancerIds):
self.add_query_param('LoadBalancerIds.' + str(index1 + 1) + '.LoadBalancerIds', value1)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_LoadBalancerBussinessStatus(self): # String
return self.get_query_params().get('LoadBalancerBussinessStatus')
def set_LoadBalancerBussinessStatus(self, LoadBalancerBussinessStatus): # String
self.add_query_param('LoadBalancerBussinessStatus', LoadBalancerBussinessStatus)
def get_AddressType(self): # String
return self.get_query_params().get('AddressType')
def set_AddressType(self, AddressType): # String
self.add_query_param('AddressType', AddressType)
def get_VpcIds(self): # Array
return self.get_query_params().get('VpcIds')
def set_VpcIds(self, VpcIds): # Array
for index1, value1 in enumerate(VpcIds):
self.add_query_param('VpcIds.' + str(index1 + 1) + '.VpcIds', value1)
def get_Tag(self): # Array
return self.get_query_params().get('Tag')
def set_Tag(self, Tag): # Array
for index1, value1 in enumerate(Tag):
for key2, value2 in value1.items():
self.add_query_param('Tag.' + str(index1 + 1) + '.' + key2 + '.Value', value2)
self.add_query_param('Tag.' + str(index1 + 1) + '.' + key2 + '.Key', value2)
def get_LoadBalancerStatus(self): # String
return self.get_query_params().get('LoadBalancerStatus')
def set_LoadBalancerStatus(self, LoadBalancerStatus): # String
self.add_query_param('LoadBalancerStatus', LoadBalancerStatus)
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType)
|
split_folder/__init__.py | Nivratti/split-folders | 292 | 11094127 | __version__ = "0.4.3"
from .split import *
|
src/WebApp/App_Data/jobs/continuous/DatabricksAndSimulatedDevicesSetup/simulated_devices_setup.py | Azure/AI | 110 | 11094146 | import os
import json
import random
from iot_hub_helpers import IoTHub
def create_device(iot_hub, device_id, simulation_parameters):
iot_hub.create_device(device_id)
tags = {
'simulated': True
}
tags.update(simulation_parameters)
twin_properties = {
'tags': tags
}
iot_hub.update_twin(device_id, json.dumps(twin_properties))
if __name__ == "__main__":
IOT_HUB_NAME = os.environ['IOT_HUB_NAME']
IOT_HUB_OWNER_KEY = os.environ['IOT_HUB_OWNER_KEY']
iot_hub = IoTHub(IOT_HUB_NAME, IOT_HUB_OWNER_KEY)
count = 5
for i in range(count):
device_id = 'Machine-{0:03d}'.format(i)
h1 = random.uniform(0.8, 0.95)
h2 = random.uniform(0.8, 0.95)
simulation_parameters = {
'simulator': 'devices.engines.Engine',
'h1': h1,
'h2': h2
}
create_device(iot_hub, device_id, simulation_parameters)
|
server/actor_libs/database/orm/utils.py | goodfree/ActorCloud | 173 | 11094155 | from datetime import datetime
from typing import List, Dict
from flask import g, request
from sqlalchemy import asc, desc, inspect, or_
from sqlalchemy.sql.sqltypes import Integer, String
from actor_libs.cache import Cache
from actor_libs.errors import ParameterInvalid
def dumps_query_result(query_result, **kwargs):
""" Dump a query result """
record = {}
if query_result.__class__.__name__ == 'result':
query_result_dict = mapping_result(query_result)
for key, value in query_result_dict.items():
# When query has with_entities
if hasattr(value, '__tablename__'):
schema = get_model_schema(value.__class__.__name__)
dump_dict = schema.dump(value).data
record.update(dump_dict)
elif isinstance(value, datetime):
record[key] = value.strftime("%Y-%m-%d %H:%M:%S")
else:
record[key] = value
else:
schema = get_model_schema(query_result.__class__.__name__)
record = schema.dump(query_result).data
if kwargs.get('code_list'):
record = dict_code_label(record, kwargs['code_list'])
return record
def dumps_query_results(query_results: List, **kwargs):
""" Dump multiple query results """
records = []
records_append = records.append
for query_result in query_results:
record = dumps_query_result(query_result)
if kwargs.get('code_list'):
record = dict_code_label(record, kwargs['code_list'])
records_append(record)
return records
def paginate(query, code_list=None):
""" Result display by paging of query """
page = request.args.get('_page', 1, type=int)
limit = request.args.get('_limit', 10, type=int)
limit = 1000 if limit > 1000 else limit
offset = (page - 1) * limit if (page - 1) * limit > 0 else 0
if request.args.get('paginate', type=str) == 'false':
query_results = query.limit(10000).all()
page, limit = 0, 10000
else:
query_results = query.limit(limit).offset(offset).all()
# paginate items count
if page == 1 and len(query_results) < limit:
total_count = len(query_results)
else:
total_count = query.order_by(None).count()
records = dumps_query_results(query_results, code_list=code_list)
meta = {'page': page, 'limit': limit, 'count': total_count} # build paginate schema
result = {'items': records, 'meta': meta}
return result
def sort_query(model, query):
""" sort query """
order = request.args.get('_order', 'desc', type=str)
sort_key = request.args.get('_sort', type=str)
if sort_key:
sort_key = sort_key
elif hasattr(model, 'createAt'):
sort_key = 'createAt'
elif hasattr(model, 'msgTime'):
sort_key = 'msgTime'
if hasattr(model, sort_key):
if order == 'asc':
query = query.order_by(asc(getattr(model, sort_key)))
else:
query = query.order_by(desc(getattr(model, sort_key)))
return query
def base_filter_tenant(model, query, tenant_uid):
""" Filter tenant """
if not tenant_uid:
tenant_uid = g.get('tenant_uid')
exclude_models = ['Message', 'Lwm2mObject', 'Lwm2mItem']
if model.__name__ in exclude_models or not tenant_uid:
# admin user not filter
return query
if hasattr(model, 'userIntID'):
from app.models import User
mapper = inspect(User)
# inspect model is join user query
if mapper not in query._join_entities:
query = query.join(User, User.id == model.userIntID)
query = query.filter(User.tenantID == tenant_uid)
elif hasattr(model, 'tenantID'):
if model.__name__ == 'Role':
from app.models import Role
query = query.filter(or_(Role.tenantID == tenant_uid, Role.isShare == 1))
else:
query = query.filter(model.tenantID == tenant_uid)
return query
def filter_api(model, query):
""" Filter by application """
exclude_models = []
app_uid = g.get('app_uid')
if any([
not app_uid, model.__name__ in exclude_models
]):
return query
group_uid_attr = hasattr(model, 'groupID')
device_uid_attr = hasattr(model, 'deviceID')
device_id_attr = hasattr(model, 'deviceIntID')
if not any([group_uid_attr, device_uid_attr, device_id_attr]):
return query
from app.models import ApplicationGroup, Group, GroupDevice, Device
app_groups = Group.query \
.join(ApplicationGroup, ApplicationGroup.c.groupID == Group.groupID) \
.filter(Group.userIntID == g.user_id) \
.with_entities(Group.groupID).all()
if group_uid_attr:
query = query.filter(model.groupID.in_(app_groups))
elif device_uid_attr:
devices_uid = Device.query \
.join(GroupDevice, GroupDevice.c.deviceIntID == Device.id) \
.filter(GroupDevice.c.groupID.in_(app_groups)) \
.with_entities(Device.deviceID).all()
query = query.filter(model.deviceID.in_(devices_uid))
elif device_id_attr:
devices_id = Device.query \
.join(GroupDevice, GroupDevice.c.deviceIntID == Device.id) \
.filter(GroupDevice.c.groupID.in_(app_groups)) \
.with_entities(Device.id).all()
query = query.filter(model.deviceIntID.in_(devices_id))
return query
def filter_group(model, query):
""" Filter by group """
exclude_models = []
user_auth_type = g.get('user_auth_type')
if any([
not user_auth_type, user_auth_type == 1,
model.__name__ in exclude_models
]):
return query
group_uid_attr = hasattr(model, 'groupID')
device_uid_attr = hasattr(model, 'deviceID')
device_id_attr = hasattr(model, 'deviceIntID')
if not any([group_uid_attr, device_uid_attr, device_id_attr]):
return query
from app.models import UserGroup, GroupDevice, Device, Group
user_groups = Group.query \
.join(UserGroup, UserGroup.c.groupID == Group.groupID) \
.filter(UserGroup.c.userIntID == g.user_id) \
.with_entities(Group.groupID).all()
if group_uid_attr:
query = query.filter(model.groupID.in_(user_groups))
elif device_uid_attr:
devices_uid = Device.query \
.join(GroupDevice, GroupDevice.c.deviceIntID == Device.id) \
.filter(GroupDevice.c.groupID.in_(user_groups)) \
.with_entities(Device.deviceID).all()
query = query.filter(model.deviceID.in_(devices_uid))
elif device_id_attr:
devices_id = Device.query \
.join(GroupDevice, GroupDevice.c.deviceIntID == Device.id) \
.filter(GroupDevice.c.groupID.in_(user_groups)) \
.with_entities(Device.id).all()
query = query.filter(model.deviceIntID.in_(devices_id))
return query
def filter_request_args(model, query):
""" Query by request args """
exclude_args = [
'_page', '_limit', 'paginate', '_sort', '_order', 'startTime',
'endTime', 'createAt', 'msgTime', 'password', 'token', 'id',
'userIntID', 'tenantID'
]
for key, value in request.args.items():
if any([key in exclude_args, value == '', value is None]):
continue
elif key.endswith('_like'):
# 相似类型查询
key = key.replace('_like', '')
if hasattr(model, key) and key not in exclude_args:
column = getattr(model, key)
if not check_column_type(column, value):
continue
query = query.filter(column.ilike(u'%{0}%'.format(value)))
elif key.endswith('_in'):
# 范围查询
key = key.replace('_in', '')
try:
in_value_list = [int(row) for row in value.split(',')]
except Exception:
raise ParameterInvalid(field=key)
if hasattr(model, key) and key not in exclude_args:
column = getattr(model, key)
if not check_column_type(column, value):
continue
query = query.filter(column.in_(in_value_list))
elif key == 'time_name' and value in ['startTime', 'endTime', 'createAt', 'msgTime']:
start_time = request.args.get('start_time')
end_time = request.args.get('end_time')
query = query.filter(getattr(model, value).between(start_time, end_time))
elif hasattr(model, key):
column = getattr(model, key)
if not check_column_type(column, value):
continue
query = query.filter(column == value)
else:
continue
return query
def mapping_result(query_result):
convert_dict = {
key: getattr(query_result, key)
for key in query_result.keys()
}
return convert_dict
def dict_code_label(record: Dict, code_list: List = None):
""" Convert dict_code """
cache = Cache()
dict_code_cache = cache.dict_code
if not dict_code_cache:
return record
for code in code_list:
if record.get(code) is None or not dict_code_cache.get(code):
continue
code_value_dict = dict_code_cache[code]
code_value = record[code]
if code_value_dict.get(code_value):
record[f'{code}Label'] = code_value_dict[code_value].get(f'{g.language}Label')
else:
record[f'{code}Label'] = None
return record
def get_model_schema(model_name):
cache = Cache()
models_schema_cache = cache.models_schema_cache
if models_schema_cache.get(model_name):
model_schema = models_schema_cache[model_name]
else:
from app import schemas
schema_name = f"{model_name}Schema"
if hasattr(schemas, schema_name):
model_schema = getattr(schemas, schema_name)()
cache.models_schema_cache[model_name] = model_schema
else:
model_schema = None
return model_schema
def check_column_type(model_column, value):
value_type = type(value)
column_type = type(model_column.type)
status = False
if value_type == int and issubclass(column_type, Integer):
status = True
elif value_type == str:
if value.isdigit() and issubclass(column_type, Integer):
status = True
elif issubclass(column_type, String):
status = True
return status
|
libcity/model/traffic_speed_prediction/Seq2Seq.py | moghadas76/test_bigcity | 221 | 11094186 | import torch
import torch.nn as nn
import random
from logging import getLogger
from libcity.model import loss
from libcity.model.abstract_traffic_state_model import AbstractTrafficStateModel
class Encoder(nn.Module):
def __init__(self, device, rnn_type, input_size, hidden_size=64,
num_layers=1, dropout=0, bidirectional=False):
super().__init__()
self.device = device
self.rnn_type = rnn_type
self.layers = num_layers
self.hidden_size = hidden_size
self.dropout = dropout
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
if self.rnn_type.upper() == 'GRU':
self.rnn = nn.GRU(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type.upper() == 'LSTM':
self.rnn = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type.upper() == 'RNN':
self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, dropout=dropout, bidirectional=bidirectional)
else:
raise ValueError('Unknown RNN type: {}'.format(self.rnn_type))
def forward(self, x):
# x = [seq_len, batch_size, input_size]
# h_0 = [layers * num_directions, batch_size, hidden_size]
h_0 = torch.zeros(self.layers * self.num_directions, x.shape[1], self.hidden_size).to(self.device)
if self.rnn_type == 'LSTM':
c_0 = torch.zeros(self.layers * self.num_directions, x.shape[1], self.hidden_size).to(self.device)
out, (hn, cn) = self.rnn(x, (h_0, c_0))
# output = [seq_len, batch_size, hidden_size * num_directions]
# hn/cn = [layers * num_directions, batch_size, hidden_size]
else:
out, hn = self.rnn(x, h_0)
cn = torch.zeros(hn.shape)
# output = [seq_len, batch_size, hidden_size * num_directions]
# hn = [layers * num_directions, batch_size, hidden_size]
return hn, cn
class Decoder(nn.Module):
def __init__(self, device, rnn_type, input_size, hidden_size=64,
num_layers=1, dropout=0, bidirectional=False):
super().__init__()
self.device = device
self.rnn_type = rnn_type
self.layers = num_layers
self.hidden_size = hidden_size
self.dropout = dropout
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
if self.rnn_type.upper() == 'GRU':
self.rnn = nn.GRU(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type.upper() == 'LSTM':
self.rnn = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type.upper() == 'RNN':
self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, dropout=dropout, bidirectional=bidirectional)
else:
raise ValueError('Unknown RNN type: {}'.format(self.rnn_type))
self.fc = nn.Linear(hidden_size * self.num_directions, input_size)
def forward(self, x, hn, cn):
# x = [batch_size, input_size]
# hn, cn = [layers * num_directions, batch_size, hidden_size]
x = x.unsqueeze(0)
# x = [seq_len = 1, batch_size, input_size]
if self.rnn_type == 'LSTM':
out, (hn, cn) = self.rnn(x, (hn, cn))
else:
out, hn = self.rnn(x, hn)
cn = torch.zeros(hn.shape)
# out = [seq_len = 1, batch_size, hidden_size * num_directions]
# hn = [layers * num_directions, batch_size, hidden_size]
out = self.fc(out.squeeze(0))
# out = [batch_size, input_size]
return out, hn, cn
class Seq2Seq(AbstractTrafficStateModel):
def __init__(self, config, data_feature):
super().__init__(config, data_feature)
self._scaler = self.data_feature.get('scaler')
self.num_nodes = self.data_feature.get('num_nodes', 1)
self.feature_dim = self.data_feature.get('feature_dim', 1)
self.output_dim = self.data_feature.get('output_dim', 1)
self.input_window = config.get('input_window', 1)
self.output_window = config.get('output_window', 1)
self.device = config.get('device', torch.device('cpu'))
self._logger = getLogger()
self._scaler = self.data_feature.get('scaler')
self.rnn_type = config.get('rnn_type', 'GRU')
self.hidden_size = config.get('hidden_size', 64)
self.num_layers = config.get('num_layers', 1)
self.dropout = config.get('dropout', 0)
self.bidirectional = config.get('bidirectional', False)
self.teacher_forcing_ratio = config.get('teacher_forcing_ratio', 0)
self.encoder = Encoder(self.device, self.rnn_type, self.num_nodes * self.feature_dim,
self.hidden_size, self.num_layers, self.dropout, self.bidirectional)
self.decoder = Decoder(self.device, self.rnn_type, self.num_nodes * self.output_dim,
self.hidden_size, self.num_layers, self.dropout, self.bidirectional)
self._logger.info('You select rnn_type {} in Seq2Seq!'.format(self.rnn_type))
def forward(self, batch):
src = batch['X'] # [batch_size, input_window, num_nodes, feature_dim]
target = batch['y'] # [batch_size, output_window, num_nodes, feature_dim]
src = src.permute(1, 0, 2, 3) # [input_window, batch_size, num_nodes, feature_dim]
target = target.permute(1, 0, 2, 3) # [output_window, batch_size, num_nodes, feature_dim]
batch_size = src.shape[1]
src = src.reshape(self.input_window, batch_size, self.num_nodes * self.feature_dim)
target = target[..., :self.output_dim].contiguous().reshape(
self.output_window, batch_size, self.num_nodes * self.output_dim)
# src = [self.input_window, batch_size, self.num_nodes * self.feature_dim]
# target = [self.output_window, batch_size, self.num_nodes * self.output_dim]
encoder_hn, encoder_cn = self.encoder(src)
decoder_hn = encoder_hn
decoder_cn = encoder_cn
# encoder_hidden_state = [layers * num_directions, batch_size, hidden_size]
decoder_input = torch.randn(batch_size, self.num_nodes * self.output_dim).to(self.device)
# decoder_input = [batch_size, self.num_nodes * self.output_dim]
outputs = []
for i in range(self.output_window):
decoder_output, decoder_hn, decoder_cn = \
self.decoder(decoder_input, decoder_hn, decoder_cn)
# decoder_output = [batch_size, self.num_nodes * self.output_dim]
# decoder_hn = [layers * num_directions, batch_size, hidden_size]
outputs.append(decoder_output.reshape(batch_size, self.num_nodes, self.output_dim))
# 只有训练的时候才考虑用真值
if self.training and random.random() < self.teacher_forcing_ratio:
decoder_input = target[i]
else:
decoder_input = decoder_output
outputs = torch.stack(outputs)
# outputs = [self.output_window, batch_size, self.num_nodes, self.output_dim]
return outputs.permute(1, 0, 2, 3)
def calculate_loss(self, batch):
y_true = batch['y']
y_predicted = self.predict(batch)
y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim])
return loss.masked_mae_torch(y_predicted, y_true, 0)
def predict(self, batch):
return self.forward(batch)
|
venv/lib/python3.8/site-packages/cloudinary/cache/storage/key_value_storage.py | Joshua-Barawa/My-Photos | 199 | 11094197 | from abc import ABCMeta, abstractmethod
class KeyValueStorage:
"""
A simple key-value storage abstract base class
"""
__metaclass__ = ABCMeta
@abstractmethod
def get(self, key):
"""
Get a value identified by the given key
:param key: The unique identifier
:return: The value identified by key or None if no value was found
"""
raise NotImplementedError
@abstractmethod
def set(self, key, value):
"""
Store the value identified by the key
:param key: The unique identifier
:param value: Value to store
:return: bool True on success or False on failure
"""
raise NotImplementedError
@abstractmethod
def delete(self, key):
"""
Deletes item by key
:param key: The unique identifier
:return: bool True on success or False on failure
"""
raise NotImplementedError
@abstractmethod
def clear(self):
"""
Clears all entries
:return: bool True on success or False on failure
"""
raise NotImplementedError
|
examples/metapath2vec/utils/config.py | zbmain/PGL | 1,389 | 11094213 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""doc
"""
import sys
import datetime
import os
import yaml
import random
import shutil
import six
import warnings
import glob
def get_last_dir(path):
"""Get the last directory of a path.
"""
if os.path.isfile(path):
# e.g: "../checkpoints/task_name/epoch0_step300/predict.txt"
# return "epoch0_step300"
last_dir = path.split("/")[-2]
elif os.path.isdir(path):
if path[-1] == '/':
# e.g: "../checkpoints/task_name/epoch0_step300/"
last_dir = path.split('/')[-2]
else:
# e.g: "../checkpoints/task_name/epoch0_step300"
last_dir = path.split('/')[-1]
else:
# path or file is not existed
warnings.warn('%s is not a existed file or path' % path)
last_dir = ""
return last_dir
class AttrDict(dict):
def __init__(self, d={}, **kwargs):
if kwargs:
d.update(**kwargs)
for k, v in d.items():
setattr(self, k, v)
# Class attributes
# for k in self.__class__.__dict__.keys():
# if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
# setattr(self, k, getattr(self, k))
def __setattr__(self, name, value):
if isinstance(value, (list, tuple)):
value = [
self.__class__(x) if isinstance(x, dict) else x for x in value
]
elif isinstance(value, dict) and not isinstance(value, self.__class__):
value = self.__class__(value)
super(AttrDict, self).__setattr__(name, value)
super(AttrDict, self).__setitem__(name, value)
__setitem__ = __setattr__
def __getattr__(self, attr):
try:
value = super(AttrDict, self).__getitem__(attr)
except KeyError:
# log.warn("%s attribute is not existed, return None" % attr)
warnings.warn("%s attribute is not existed, return None" % attr)
value = None
return value
def update(self, e=None, **f):
d = e or dict()
d.update(f)
for k in d:
setattr(self, k, d[k])
def pop(self, k, d=None):
delattr(self, k)
return super(EasyDict, self).pop(k, d)
def make_dir(path):
"""Build directory"""
if not os.path.exists(path):
os.makedirs(path)
def load_config(config_file):
"""Load config file"""
with open(config_file) as f:
if hasattr(yaml, 'FullLoader'):
config = yaml.load(f, Loader=yaml.FullLoader)
else:
config = yaml.load(f)
return config
def create_necessary_dirs(config):
"""Create some necessary directories to save some important files.
"""
config.log_dir = os.path.join(config.log_dir, config.task_name)
config.save_dir = os.path.join(config.save_dir, config.task_name)
config.output_dir = os.path.join(config.output_dir, config.task_name)
make_dir(config.log_dir)
make_dir(config.save_dir)
make_dir(config.output_dir)
def save_files(config):
"""Save config file so that we can know the config when we look back
"""
filelist = config.files2saved
targetpath = config.log_dir
if filelist is not None:
for file_or_dir in filelist:
if os.path.isdir(file_or_dir):
last_name = get_last_dir(file_or_dir)
dst = os.path.join(targetpath, last_name)
try:
copy_and_overwrite(file_or_dir, dst)
except Exception as e:
print(e)
print("backup %s to %s" % (file_or_dir, targetpath))
else:
for filename in files(files=file_or_dir):
if os.path.isfile(filename):
print("backup %s to %s" % (filename, targetpath))
shutil.copy2(filename, targetpath)
else:
print("%s is not existed." % filename)
def copy_and_overwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
def files(curr_dir='./', files='*.py'):
for i in glob.glob(os.path.join(curr_dir, files)):
yield i
def prepare_config(config_file, isCreate=False, isSave=False):
if os.path.isfile(config_file):
config = load_config(config_file)
config = AttrDict(config)
else:
raise TypeError("%s is not a yaml file" % config_file)
if isCreate:
create_necessary_dirs(config)
if isSave:
save_files(config)
return config
|
util/make_ascii.py | wenq1/duktape | 4,268 | 11094233 | #!/usr/bin/env python2
#
# Paranoia escape input file to be printable ASCII.
#
import os, sys
inp = sys.stdin.read().decode('utf-8')
for c in inp:
if (ord(c) >= 0x20 and ord(c) <= 0x7e) or (c in '\x0a'):
sys.stdout.write(c)
else:
sys.stdout.write('\\u%04x' % ord(c))
|
tests/python/contrib/test_ethosu/cascader/test_propagator.py | XiaoSong9905/tvm | 4,640 | 11094235 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from math import isclose
from tvm.contrib.ethosu.cascader import StripeConfig, Propagator
def test_propagator():
transform = [
[1, 0, 0, 0],
[0, 1 / 2, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1],
]
offset = [-1, 1, 2]
propagator = Propagator(
transform=transform,
offset=offset,
)
assert list(propagator.offset) == offset
for i, row in enumerate(transform):
for j, value in enumerate(row):
assert isclose(propagator.transform[i][j], value)
@pytest.mark.parametrize(
["propagator", "input_stripe_config", "output_stripe_config"],
[
(
Propagator(
transform=[
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1 / 16, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 16],
[0, 0, 0, 0, 1],
],
offset=[0, 0, 0, 0, 0],
),
StripeConfig(
shape=[1, 12, 14, 36],
extent=[1, 24, 18, 72],
strides=[1, 12, 14, 36],
order=[1, 2, 3, 4],
stripes=[1, 2, 2, 2],
offset=[0, 0, 0, 0],
),
StripeConfig(
shape=[1, 12, 3, 14, 16],
extent=[1, 24, 5, 18, 16],
strides=[1, 12, 2.25, 14, 0],
order=[1, 2, 4, 3, 0],
stripes=[1, 2, 2, 2, 1],
offset=[0, 0, 0, 0, 0],
),
),
(
Propagator(
transform=[
[0.5, 0, 0],
[0, 0.5, 0],
[0, 0, 1],
],
offset=[0, 0],
),
StripeConfig(
shape=[3, 5],
extent=[27, 50],
strides=[3, 5],
order=[1, 2],
stripes=[9, 10],
offset=[0, 0],
),
StripeConfig(
shape=[2, 3],
extent=[14, 25],
strides=[1.5, 2.5],
order=[1, 2],
stripes=[9, 10],
offset=[0, 0],
),
),
(
Propagator(
transform=[
[2, 0, 0, 4],
[0, 1, 0, 2],
[0, 0, 0, 8],
[0, 0, 0, 1],
],
offset=[-2, -1, 0],
),
StripeConfig(
shape=[4, 6, 32],
extent=[48, 60, 64],
strides=[4, 6, 32],
order=[1, 2, 3],
stripes=[12, 10, 2],
offset=[0, 0, 0],
),
StripeConfig(
shape=[12, 8, 8],
extent=[100, 62, 8],
strides=[8, 6, 0],
order=[1, 2, 0],
stripes=[12, 10, 1],
offset=[-2, -1, 0],
),
),
],
)
def test_propagate(propagator, input_stripe_config, output_stripe_config):
result_stripe_config = propagator.propagate(input_stripe_config)
assert result_stripe_config == output_stripe_config
if __name__ == "__main__":
pytest.main([__file__])
|
mythril/analysis/module/__init__.py | kalloc/mythril | 1,887 | 11094284 | <reponame>kalloc/mythril<filename>mythril/analysis/module/__init__.py<gh_stars>1000+
from mythril.analysis.module.base import EntryPoint, DetectionModule
from mythril.analysis.module.loader import ModuleLoader
from mythril.analysis.module.util import (
get_detection_module_hooks,
reset_callback_modules,
)
|
python/242_Valid_Anagram.py | dvlpsh/leetcode-1 | 4,416 | 11094295 | class Solution(object):
# def isAnagram(self, s, t):
# """
# :type s: str
# :type t: str
# :rtype: bool
# """
# # sort
# return sorted(s) == sorted(t)
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
# hash
# https://leetcode.com/articles/valid-anagram/
if len(s) != len(t):
return False
counter = [0] * 26
for i in range(len(s)):
counter[ord(s[i]) - ord('a')] += 1
counter[ord(t[i]) - ord('a')] -= 1
for num in counter:
if num != 0:
return False
return True |
dm_main.py | david-gpu/deep-makeover | 267 | 11094309 | import os
# Disable Tensorflow's INFO and WARNING messages
# See http://stackoverflow.com/questions/35911252
if 'TF_CPP_MIN_LOG_LEVEL' not in os.environ:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import numpy.random
import os.path
import random
import tensorflow as tf
import dm_celeba
import dm_flags
import dm_infer
import dm_input
import dm_model
import dm_show
import dm_train
import dm_utils
FLAGS = tf.app.flags.FLAGS
def _setup_tensorflow():
# Create session
config = tf.ConfigProto(log_device_placement=False) #, intra_op_parallelism_threads=1)
sess = tf.Session(config=config)
# Initialize all RNGs with a deterministic seed
with sess.graph.as_default():
tf.set_random_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
return sess
# TBD: Move to dm_train.py?
def _prepare_train_dirs():
# Create checkpoint dir (do not delete anything)
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
# Cleanup train dir
if tf.gfile.Exists(FLAGS.train_dir):
try:
tf.gfile.DeleteRecursively(FLAGS.train_dir)
except:
pass
tf.gfile.MakeDirs(FLAGS.train_dir)
# Ensure dataset folder exists
if not tf.gfile.Exists(FLAGS.dataset) or \
not tf.gfile.IsDirectory(FLAGS.dataset):
raise FileNotFoundError("Could not find folder `%s'" % (FLAGS.dataset,))
# TBD: Move to dm_train.py?
def _get_train_data():
# Setup global tensorflow state
sess = _setup_tensorflow()
# Prepare directories
_prepare_train_dirs()
# Which type of transformation?
# Note: eyeglasses and sunglasses are filtered out because they tend to produce artifacts
if FLAGS.train_mode == 'ftm' or FLAGS.train_mode == 'f2m':
# Trans filter: from female to attractive male
# Note: removed facial hair from target images because otherwise the network becomes overly focused on rendering facial hair
source_filter = {'Male':False, 'Blurry':False, 'Eyeglasses':False}
target_filter = {'Male':True, 'Blurry':False, 'Eyeglasses':False, 'Attractive':True, 'Goatee':False, 'Mustache':False, 'No_Beard':True}
elif FLAGS.train_mode == 'mtf' or FLAGS.train_mode == 'm2f':
# Trans filter: from male to attractuve female
source_filter = {'Male':True, 'Blurry':False, 'Eyeglasses':False}
target_filter = {'Male':False, 'Blurry':False, 'Eyeglasses':False, 'Attractive':True}
elif FLAGS.train_mode == 'ftf' or FLAGS.train_mode == 'f2f':
# Vanity filter: from female to attractive female
source_filter = {'Male':False, 'Blurry':False, 'Eyeglasses':False}
target_filter = {'Male':False, 'Blurry':False, 'Eyeglasses':False, 'Attractive':True}
elif FLAGS.train_mode == "mtm" or FLAGS.train_mode == 'm2m':
# Vanity filter: from male to attractive male
source_filter = {'Male':True, 'Blurry':False, 'Eyeglasses':False}
target_filter = {'Male':True, 'Blurry':False, 'Eyeglasses':False, 'Attractive':True}
else:
raise ValueError('`train_mode` must be one of: `ftm`, `mtf`, `ftf` or `mtm`')
# Setup async input queues
selected = dm_celeba.select_samples(source_filter)
source_images = dm_input.input_data(sess, 'train', selected)
test_images = dm_input.input_data(sess, 'test', selected)
print('%8d source images selected' % (len(selected),))
selected = dm_celeba.select_samples(target_filter)
target_images = dm_input.input_data(sess, 'train', selected)
print('%8d target images selected' % (len(selected),))
print()
# Annealing temperature: starts at 1.0 and decreases exponentially over time
annealing = tf.Variable(initial_value=1.0, trainable=False, name='annealing')
halve_annealing = tf.assign(annealing, 0.5*annealing)
# Create and initialize training and testing models
train_model = dm_model.create_model(sess, source_images, target_images, annealing, verbose=True)
print("Building testing model...")
test_model = dm_model.create_model(sess, test_images, None, annealing)
print("Done.")
# Forget this line and TF will deadlock at the beginning of training
tf.train.start_queue_runners(sess=sess)
# Pack all for convenience
train_data = dm_utils.Container(locals())
return train_data
# TBD: Move to dm_infer.py?
def _get_inference_data():
# Setup global tensorflow state
sess = _setup_tensorflow()
# Load single image to use for inference
if FLAGS.infile is None:
raise ValueError('Must specify inference input file through `--infile <filename>` command line argument')
if not tf.gfile.Exists(FLAGS.infile) or tf.gfile.IsDirectory(FLAGS.infile):
raise FileNotFoundError('File `%s` does not exist or is a directory' % (FLAGS.infile,))
filenames = [FLAGS.infile]
infer_images = dm_input.input_data(sess, 'inference', filenames)
print('Loading model...')
# Create inference model
infer_model = dm_model.create_model(sess, infer_images)
# Load model parameters from checkpoint
checkpoint = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
try:
saver = tf.train.Saver()
saver.restore(sess, checkpoint.model_checkpoint_path)
del saver
del checkpoint
except:
raise RuntimeError('Unable to read checkpoint from `%s`' % (FLAGS.checkpoint_dir,))
print('Done.')
# Pack all for convenience
infer_data = dm_utils.Container(locals())
return infer_data
def main(argv=None):
if FLAGS.run == 'train':
train_data = _get_train_data()
dm_train.train_model(train_data)
elif FLAGS.run == 'inference':
infer_data = _get_inference_data()
dm_infer.inference(infer_data)
else:
print("Operation `%s` not supported" % (FLAGS.run,))
if __name__ == '__main__':
dm_flags.define_flags()
tf.app.run()
|
train.py | kanchen-usc/QRC-Net | 117 | 11094314 | <filename>train.py
import tensorflow as tf
import os, sys
import numpy as np
import time
from dataprovider import dataprovider
from model import ground_model
from util.iou import calc_iou
from util.iou import calc_iou_by_reg_feat
from util.nms import nms
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model_name", type=str, default='qrc')
parser.add_argument("-r", "--reward_con", type=float, default=0.2)
parser.add_argument("-g", "--gpu", type=str, default='0')
parser.add_argument("--restore_id", type=int, default=0)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
class Config(object):
batch_size = 40
img_feat_dir = './feature'
sen_dir = './annotation'
train_file_list = 'flickr30k_train_val.lst'
test_file_list = 'flickr30k_test.lst'
log_file = './log/ground_supervised'
save_path = './model/ground_supervised'
vocab_size = 17869
num_epoch = 3
max_step = 40000
optim='adam'
dropout = 0.5
lr = 0.0001
weight_decay=0.0
lstm_dim = 500
def update_feed_dict(dataprovider, model, is_train):
img_feat, sen_feat, gt_reg, bbx_label, reward_batch, pos_all, pos_reg_all = dataprovider.get_next_batch_reg()
feed_dict = {
model.sen_data: sen_feat,
model.vis_data: img_feat,
model.bbx_label: bbx_label,
model.gt_reg: gt_reg,
model.reward: reward_batch,
model.is_train: is_train}
if dataprovider.multi_reg:
feed_dict[model.pos_all] = pos_all
feed_dict[model.pos_reg_all] = pos_reg_all
feed_dict[model.num_reg] = float(pos_all.shape[0])
return feed_dict
def eval_cur_batch(gt_label, cur_logits,
is_train=True, num_sample=0, pos_or_reg=None,
bbx_loc=None, gt_loc_all=None, ht = 1.0, wt = 1.0):
accu = 0.0
if is_train:
res_prob = cur_logits[:, :, 0]
res_label = np.argmax(res_prob, axis=1)
accu = float(np.sum(res_label == gt_label)) / float(len(gt_label))
else:
num_bbx = len(bbx_loc)
res_prob = cur_logits[:, :num_bbx, 0]
res_label = np.argmax(res_prob, axis=1)
for gt_id in range(len(pos_or_reg)):
cur_gt_pos = gt_label[gt_id]
success = False
cur_gt = gt_loc_all[gt_id]
if np.any(cur_gt):
cur_bbx = bbx_loc[res_label[gt_id]]
cur_reg = cur_logits[gt_id, res_label[gt_id], 1:]
#print 'IOU Stats: ', cur_gt, cur_bbx, cur_reg
iou, _ = calc_iou_by_reg_feat(cur_gt, cur_bbx, cur_reg, ht, wt)
if iou > 0.5:
success = True
if success:
accu += 1.0
accu /= float(num_sample)
return accu
def load_img_id_list(file_list):
img_list = []
with open(file_list) as fin:
for img_id in fin.readlines():
img_list.append(int(img_id.strip()))
img_list = np.array(img_list).astype('int')
return img_list
def run_eval(sess, dataprovider, model, eval_op, feed_dict):
num_test = 0.0
num_corr_all = 0.0
num_cnt_all = 0.0
for img_ind, img_id in enumerate(dataprovider.test_list):
img_feat_raw, sen_feat_batch, bbx_gt_batch, gt_loc_all, \
bbx_loc, num_sample_all, pos_or_reg, ht, wt = dataprovider.get_test_feat_reg(img_id)
if num_sample_all > 0:
num_test += 1.0
num_corr = 0
num_sample = len(bbx_gt_batch)
img_feat = feed_dict[model.vis_data]
for i in range(num_sample):
img_feat[i] = img_feat_raw
sen_feat = feed_dict[model.sen_data]
sen_feat[:num_sample] = sen_feat_batch
eval_feed_dict = {
model.sen_data: sen_feat,
model.vis_data: img_feat,
model.is_train: False}
cur_att_logits = sess.run(eval_op, feed_dict=eval_feed_dict)
cur_att_logits = cur_att_logits[:num_sample]
# print cur_att_logits
cur_accuracy = eval_cur_batch(bbx_gt_batch, cur_att_logits, False,
num_sample_all, pos_or_reg, bbx_loc, gt_loc_all, ht , wt)
num_valid = np.sum(np.all(gt_loc_all, 1))
print '%d/%d: %d/%d, %.4f'%(img_ind, len(dataprovider.test_list), num_valid, num_sample, cur_accuracy)
num_corr_all += cur_accuracy*num_sample_all
num_cnt_all += float(num_sample_all)
accu = num_corr_all/num_cnt_all
print 'Accuracy = %.4f'%(accu)
return accu
def run_training():
train_list = []
test_list = []
config = Config()
train_list = load_img_id_list(config.train_file_list)
test_list = load_img_id_list(config.test_file_list)
config.save_path = config.save_path + '_' + args.model_name
if not os.path.isdir(config.save_path):
print 'Save models into %s'%config.save_path
os.mkdir(config.save_path)
log_file = config.log_file + '_' + args.model_name + '.log'
config.hidden_size = 500
config.is_multi = True
config.reward_con = args.reward_con
log = open(log_file, 'w', 0)
restore_id = args.restore_id
cur_dataset = dataprovider(train_list, test_list, config.img_feat_dir, config.sen_dir, config.vocab_size,
reward_con=config.reward_con, batch_size=config.batch_size)
model = ground_model(config)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.33)
with tf.Graph().as_default():
loss, loss_vec, logits, rwd_pred, loss_rwd = model.build_model()
# Create a session for running Ops on the Graph.
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# Run the Op to initialize the variables.
saver = tf.train.Saver(max_to_keep=20)
duration = 0.0
if restore_id > 0:
print 'Restore model_%d'%restore_id
# cur_dataset.epoch_id = restore_id
cur_dataset.is_save = False
temp_vars = set(tf.global_variables())
saver.restore(sess, './model/%s/model_%d.ckpt'%(config.save_path, restore_id))
model.train_op = model.build_train_op(loss, loss_vec, model.reward_w, rwd_pred)
train_op = model.train_op
sess.run(tf.variables_initializer(set(tf.global_variables()) - temp_vars))
else:
print 'Train from scratch'
cur_dataset.is_save = False
model.train_op = model.build_train_op(loss, loss_vec, loss_rwd, rwd_pred)
train_op = model.train_op
init = tf.global_variables_initializer()
sess.run(init)
for step in xrange(config.max_step):
start_time = time.time()
feed_dict = update_feed_dict(cur_dataset, model, True)
_,loss_value,loss_vec_value, cur_logits = sess.run([train_op, loss, loss_vec, logits], feed_dict=feed_dict)
duration += time.time()-start_time
if cur_dataset.is_save:
print 'Save model_%d into %s'%(cur_dataset.epoch_id, config.save_path)
saver.save(sess, '%s/model_%d.ckpt'%(config.save_path, cur_dataset.epoch_id))
cur_dataset.is_save = False
if step%10 == 0:
cur_accu = eval_cur_batch(feed_dict[model.bbx_label], cur_logits, True)
print 'Step %d: loss = %.4f, accu = %.4f (%.4f sec)'%(step, loss_value, cur_accu, duration/10.0)
duration = 0.0
if ((step)%600)==0:
print "-----------------------------------------------"
eval_accu = run_eval(sess, cur_dataset, model, logits, feed_dict)
log.write('%d/%d: %.4f, %.4f\n'%(step+1, cur_dataset.epoch_id, loss_value, eval_accu))
print "-----------------------------------------------"
model.batch_size = config.batch_size
cur_dataset.is_save = False
log.close()
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
|
tests/test_compat.py | KazakovDenis/django-extensions | 4,057 | 11094322 | <reponame>KazakovDenis/django-extensions
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.test.utils import override_settings
from django_extensions.compat import get_template_setting
class CompatTests(TestCase):
@override_settings(TEMPLATES=None)
def test_should_return_None_by_default_if_TEMPLATES_setting_is_none(self):
self.assertIsNone(get_template_setting('template_key'))
@override_settings(TEMPLATES=None)
def test_should_return_default_if_TEMPLATES_setting_is_none(self):
self.assertEqual(get_template_setting('template_key', 'test'), 'test')
@override_settings(TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True
}])
def test_should_return_value_for_key(self):
self.assertEqual(get_template_setting('BACKEND'),
'django.template.backends.django.DjangoTemplates')
|
ipypublish/port_api/plugin_to_json.py | parmentelat/ipypublish | 220 | 11094325 | <reponame>parmentelat/ipypublish<filename>ipypublish/port_api/plugin_to_json.py<gh_stars>100-1000
""" a module to convert between the old (Python script) plugin format,
and the new (JSON) one
"""
from typing import Dict, Tuple # noqa: F401
import ast
import json
def assess_syntax(path):
with open(path) as file_obj:
content = file_obj.read()
syntax_tree = ast.parse(content)
docstring = "" # docstring = ast.get_docstring(syntaxTree)
unknowns = []
imported = {}
assignments = {}
for i, child in enumerate(ast.iter_child_nodes(syntax_tree)):
if i == 0 and isinstance(child, ast.Expr) and isinstance(child.value, ast.Str):
docstring = child.value.s
elif isinstance(child, ast.ImportFrom):
module = child.module
for n in child.names:
import_pth = module + "." + n.name
imported[n.name if n.asname is None else n.asname] = import_pth
elif isinstance(child, ast.Assign):
targets = child.targets
if len(targets) > 1:
raise IOError(
"cannot handle expansion assignments " "(e.g. `a, b = [1, 2]`)"
)
target = child.targets[0] # type: ast.Name
assignments[target.id] = child.value
else:
unknowns.append(child)
if unknowns:
print(
"Warning this script can only handle 'ImportFrom' and 'Assign' "
"syntax, found additional items: {}".format(unknowns)
)
return docstring, imported, assignments
def ast_to_json(item, imported, assignments):
"""recursively convert ast items to json friendly values"""
value = None
if item in ["True", "False", "None"]: # python 2.7
value = {"True": True, "False": False, "None": None}[item]
elif hasattr(ast, "NameConstant") and isinstance(item, ast.NameConstant):
value = item.value
elif isinstance(item, ast.Str):
value = item.s
elif isinstance(item, ast.Num):
value = item.n
elif isinstance(item, ast.Name):
if item.id in imported:
value = imported[item.id]
elif item.id in assignments:
value = ast_to_json(assignments[item.id], imported, assignments)
elif item.id in ["True", "False", "None"]: # python 2.7
value = {"True": True, "False": False, "None": None}[item.id]
else:
raise ValueError("could not find assignment '{}' in config".format(item.id))
elif isinstance(item, (ast.List, ast.Tuple, ast.Set)):
value = [ast_to_json(i, imported, assignments) for i in item.elts]
elif isinstance(item, ast.Dict):
value = convert_dict(item, imported, assignments)
else:
raise ValueError("could not handle ast item: {}".format(item))
return value
def convert_dict(dct, imported, assignments):
# type: (ast.Dict, Dict[str, str], dict) -> dict
"""recurse through and replace keys"""
out_dict = {}
for key, val in zip(dct.keys, dct.values):
if not isinstance(key, ast.Str):
raise ValueError("expected key to be a Str; {}".format(key))
out_dict[key.s] = ast_to_json(val, imported, assignments)
return out_dict
def convert_oformat(oformat):
if oformat == "Notebook":
outline = None # TODO do notebooks need template (they have currently)
exporter = "nbconvert.exporters.NotebookExporter"
elif oformat == "Latex":
exporter = "nbconvert.exporters.LatexExporter"
outline = {
"module": "ipypublish.templates.outline_schemas",
"file": "latex_outline.latex.j2",
}
elif oformat == "HTML":
exporter = "nbconvert.exporters.HTMLExporter"
outline = {
"module": "ipypublish.templates.outline_schemas",
"file": "html_outline.html.j2",
}
elif oformat == "Slides":
exporter = "nbconvert.exporters.SlidesExporter"
outline = {
"module": "ipypublish.templates.outline_schemas",
"file": "html_outline.html.j2",
}
else:
raise ValueError(
"expected oformat to be: " "'Notebook', 'Latex', 'HTML' or 'Slides'"
)
return exporter, outline
def convert_config(config, exporter_class, allow_other):
# type: (dict, str) -> dict
"""convert config into required exporter format"""
filters = {}
preprocs = {}
other = {}
# first parse
for key, val in config.items():
# TODO Exporter.filters and TemplateExporter.filters always the same?
if key in ["Exporter.filters", "TemplateExporter.filters"]:
filters.update(config[key])
if key in ["Exporter.preprocessors", "TemplateExporter.preprocessors"]:
if preprocs:
raise ValueError(
"'config' contains both Exporter.preprocessors and "
"TemplateExporter.preprocessors"
)
for p in val:
pname = p.split(".")[-1]
preprocs[pname] = {"class": p, "args": {}}
# TODO move these special cases to seperate input/function
if pname in ["LatexDocLinks", "LatexDocHTML"]:
preprocs[pname]["args"]["metapath"] = "${meta_path}"
preprocs[pname]["args"]["filesfolder"] = "${files_path}"
# second parse
for key, val in config.items():
if key in [
"Exporter.filters",
"TemplateExporter.filters",
"Exporter.preprocessors",
"TemplateExporter.preprocessors",
]:
continue
if key.split(".")[0] in preprocs:
preprocs[key.split(".")[0]]["args"][".".join(key.split(".")[1:])] = val
else:
other[key] = val
if other and not allow_other:
print("Warning: ignoring other args: {}".format(other))
other = {}
output = {
"class": exporter_class,
"filters": filters,
"preprocessors": list(preprocs.values()),
"other_args": other,
}
return output
def replace_template_path(path):
""" replace original template path with new dict """
segments = path.split(".")
module = ".".join(segments[0:-1])
name = segments[-1]
if module == "ipypublish.html.ipypublish":
return {
"module": "ipypublish.templates.segments",
"file": "ipy-{0}.html-tplx.json".format(name),
}
elif module == "ipypublish.html.standard":
return {
"module": "ipypublish.templates.segments",
"file": "std-{0}.html-tplx.json".format(name),
}
elif module == "ipypublish.latex.standard":
return {
"module": "ipypublish.templates.segments",
"file": "std-{0}.latex-tpl.json".format(name),
}
elif module == "ipypublish.latex.ipypublish":
return {
"module": "ipypublish.templates.segments",
"file": "ipy-{0}.latex-tpl.json".format(name),
}
else:
print("Warning: unknown template path: {}".format(path))
return {"module": module, "file": "{0}.json".format(name)}
def create_json(docstring, imported, assignments, allow_other=True):
# type: (str, Dict[str, str], dict, bool) -> dict
"""Set docstring here.
Parameters
----------
docstring: str
the doc string of the module
imported: dict
imported classes
assignments: dict
assigned values (i.e. 'a = b')
allow_other: bool
whether to allow arguments in config,
which do not relate to preprocessors
Returns
-------
"""
oformat = None
config = None
template = None
for value, expr in assignments.items():
if value == "oformat":
if not isinstance(expr, ast.Str):
raise ValueError("expected 'oformat' to be a Str; {}".format(expr))
oformat = expr.s
elif value == "config":
if not isinstance(expr, ast.Dict):
raise ValueError("expected 'config' to be a Dict; {}".format(expr))
config = convert_dict(expr, imported, assignments)
elif value == "template":
if not isinstance(expr, ast.Call):
raise ValueError("expected 'config' to be a call to create_tpl(x)")
# func = expr.func # TODO make sure func name is create_tpl/tplx
args = expr.args
keywords = expr.keywords
if len(args) != 1 or len(keywords) > 0:
raise ValueError("expected create_tpl(x) to have one argument")
seg_list = args[0]
if isinstance(seg_list, ast.ListComp):
seg_list = seg_list.generators[0].iter
if not isinstance(seg_list, ast.List):
raise ValueError(
"expected create_tpl(x) arg to be a List; {}".format(seg_list)
)
segments = []
for seg in seg_list.elts:
if isinstance(seg, ast.Attribute):
seg_name = seg.value.id
elif isinstance(seg, ast.Name):
seg_name = seg.id
else:
raise ValueError(
"expected seg in template to be an Attribute; "
+ "{1}".format(seg)
)
if seg_name not in imported:
raise ValueError("segment '{}' not found".format(seg_name))
segments.append(imported[seg_name])
template = segments
if oformat is None:
raise ValueError("could not find 'oformat' assignment")
if config is None:
raise ValueError("could not find 'config' assignment")
if template is None:
raise ValueError("could not find 'template' assignment")
exporter_class, outline = convert_oformat(oformat)
exporter = convert_config(config, exporter_class, allow_other)
if any(["biblio_natbib" in s for s in template]):
exporter["filters"]["strip_ext"] = "ipypublish.filters.filters.strip_ext"
return {
"description": docstring.splitlines(),
"exporter": exporter,
"template": None
if outline is None
else {
"outline": outline,
"segments": [replace_template_path(s) for s in template],
},
}
def convert_to_json(path, outpath=None, ignore_other=False):
"""Set docstring here.
Parameters
----------
path: str
input module path
outpath=None: str or None
if set, output json to this path
ignore_other: bool
whether to ignore arguments in config,
which do not relate to preprocessors
Returns
-------
"""
_docstring, _imported, _assignments = assess_syntax(path)
# print(_docstring)
# print()
# print(_imported)
# print()
# print(_assignments)
output = create_json(_docstring, _imported, _assignments, not ignore_other)
if outpath:
with open(outpath, "w") as file_obj:
json.dump(output, file_obj, indent=2)
return json.dumps(output, indent=2)
if __name__ == "__main__":
if False:
import glob
import os
for path in glob.glob(
"/Users/cjs14/GitHub/ipypublish" "/ipypublish/export_plugins/*.py"
):
dirname = os.path.dirname(path)
name = os.path.splitext(os.path.basename(path))[0]
try:
convert_to_json(
path, os.path.join(dirname, name + ".json"), ignore_other=True
)
except ValueError as err:
print("{0} failed: {1}".format(path, err))
convert_to_json(
"/Users/cjs14/GitHub/ipypublish" "/ipypublish_plugins/example_new_plugin.py",
"/Users/cjs14/GitHub/ipypublish" "/ipypublish_plugins/example_new_plugin.json",
)
|
api/src/opentrons/protocol_engine/commands/pick_up_tip.py | knownmed/opentrons | 235 | 11094349 | """Pick up tip command request, result, and implementation models."""
from __future__ import annotations
from pydantic import BaseModel
from typing import Optional, Type
from typing_extensions import Literal
from .pipetting_common import BasePipettingData
from .command import AbstractCommandImpl, BaseCommand, BaseCommandRequest
PickUpTipCommandType = Literal["pickUpTip"]
class PickUpTipData(BasePipettingData):
"""Data needed to move a pipette to a specific well."""
pass
class PickUpTipResult(BaseModel):
"""Result data from the execution of a PickUpTip."""
pass
class PickUpTipImplementation(AbstractCommandImpl[PickUpTipData, PickUpTipResult]):
"""Pick up tip command implementation."""
async def execute(self, data: PickUpTipData) -> PickUpTipResult:
"""Move to and pick up a tip using the requested pipette."""
await self._pipetting.pick_up_tip(
pipette_id=data.pipetteId,
labware_id=data.labwareId,
well_name=data.wellName,
)
return PickUpTipResult()
class PickUpTip(BaseCommand[PickUpTipData, PickUpTipResult]):
"""Pick up tip command model."""
commandType: PickUpTipCommandType = "pickUpTip"
data: PickUpTipData
result: Optional[PickUpTipResult]
_ImplementationCls: Type[PickUpTipImplementation] = PickUpTipImplementation
class PickUpTipRequest(BaseCommandRequest[PickUpTipData]):
"""Pick up tip command creation request model."""
commandType: PickUpTipCommandType = "pickUpTip"
data: PickUpTipData
_CommandCls: Type[PickUpTip] = PickUpTip
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.