repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
NeurodataWithoutBorders/api-python | examples/create_scripts/general-e.py | 1 | 2091 | #!/usr/bin/python
import sys
from nwb import nwb_file
from nwb import nwb_utils as utils
"""
Example using extension to add metadata to group /general
Group /general contains general metadata, i.e. metadata that
applies to the entire session.
This example uses the extension defined in extensions/e-general.py
to add new metadata to define then add new metadata to section
/general
"""
# create a new NWB file
OUTPUT_DIR = "../created_nwb_files/"
file_name = __file__[0:-3] + ".nwb"
settings = {}
settings["file_name"] = OUTPUT_DIR + file_name
settings["identifier"] = utils.create_identifier("add metadata to general")
settings["mode"] = "w"
settings["start_time"] = "2016-04-07T03:16:03.604121"
settings["description"] = "Test file demonstrating use of an extension for general"
# specify the extension (Could be more than one. Only one used now).
settings['extensions'] = ["extensions/e-general.py"]
f = nwb_file.open(**settings)
########################################################################
# Specifier experimenter (this dataset is part of the core NWB format)
eds = f.set_dataset('experimenter', "Joseline Doe")
# specify attribute to experimenter, this defined in extension file.
# it is not part of the core NWB format
eds.set_attr("orcid_id", "7012023")
# Now add metadata that is defined by the extension
gri = f.make_group("rctn_info")
gri.set_dataset("seminars", ["Thom Smith", "Dwight Keenan", "Sue Trimble"])
gri.set_dataset("attendance", [23, 45, 33])
f.set_dataset("rctn:activity_level", '7')
f.set_dataset("rctn:time_since_fed", '6 hours 20 minutes')
f.set_dataset("notes", "some notes")
# also set extra metadata about subject
# these datasets are also defined in the extension
# dataset names and values are from a file in the AIBS cell types database
f.set_dataset("aibs_specimen_id",313862134)
f.set_dataset("aibs_specimen_name","Sst-IRES-Cre;Ai14(IVSCC)-167638.03.01.01")
f.set_dataset("aibs_dendrite_state","NA")
f.set_dataset("aibs_dendrite_type","aspiny")
f.set_dataset("aibs_cre_line","Sst-IRES-Cre")
# All done. Close the file
f.close()
| bsd-3-clause | 7,030,291,989,708,587,000 | 33.278689 | 83 | 0.704926 | false | 3.1875 | false | false | false |
guildai/guild | guild/remotes/gist.py | 1 | 20430 | # Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import os
import pprint
import shutil
import subprocess
import sys
import zipfile
from guild import remote as remotelib
from guild import remote_util
from guild import util
from . import meta_sync
log = logging.getLogger("guild.remotes.gist")
class NoSuchGist(remotelib.OperationError):
pass
class MissingRequiredEnv(remotelib.OperationError):
pass
class GistRemoteType(remotelib.RemoteType):
def __init__(self, _ep):
pass
def remote_for_config(self, name, config):
return GistRemote(name, config)
def remote_for_spec(self, spec):
name = "gist:%s" % spec
user, gist_name = _parse_spec(spec)
config = remotelib.RemoteConfig(
{
"user": user,
"gist-name": gist_name,
}
)
return GistRemote(name, config)
def _parse_spec(spec):
parts = spec.split("/", 1)
if len(parts) == 1:
try:
return _required_gist_user_env({}), parts[0]
except MissingRequiredEnv as e:
raise remotelib.InvalidRemoteSpec(str(e))
return parts
def _required_gist_user_env(env):
try:
return _required_env("GIST_USER", [env, os.environ])
except KeyError:
raise MissingRequiredEnv(
"gist remotes must be specified as USER/GIST_NAME if GIST_USER "
"environment variable is not defined"
)
def _required_env(name, sources):
for src in sources:
try:
return src[name]
except KeyError:
pass
raise KeyError(name)
class GistRemote(meta_sync.MetaSyncRemote):
def __init__(self, name, config):
self.name = name
self.user = config["user"]
self.gist_name = config["gist-name"]
self._gist_readme_name = _gist_readme_name(self.gist_name)
self.local_env = remote_util.init_env(config.get("local-env"))
self.local_sync_dir = meta_sync.local_meta_dir(
_remote_full_name(self.user, self.gist_name), ""
)
self._local_gist_repo = os.path.join(self.local_sync_dir, "gist")
runs_dir = os.path.join(self.local_sync_dir, "runs")
super(GistRemote, self).__init__(runs_dir, None)
def status(self, verbose=False):
remote_util.remote_activity("Getting %s status", self.name)
gist = self._repo_gist()
sys.stdout.write("%s (gist %s) is available\n" % (self.name, gist["id"]))
if verbose:
sys.stdout.write(pprint.pformat(gist))
sys.stdout.write("\n")
def start(self):
remote_util.remote_activity("Getting %s status", self.name)
try:
gist = self._repo_gist()
except NoSuchGist:
log.info("Creating gist")
gist = self._create_gist()
log.info(
"Created %s (gist %s) for user %s",
self.name,
gist["id"],
self.user,
)
self._sync_runs_meta()
else:
raise remotelib.OperationError(
"%s (gist %s) already exists for user %s"
% (self.name, gist["id"], self.user)
)
def stop(self):
self._delete_gist()
self._clear_gist_cache()
def _delete_gist(self):
gist = self._repo_gist()
log.info("Deleting gist %s", gist["id"])
_delete_gist(gist, self.local_env)
def _clear_gist_cache(self):
log.info("Clearning local cache")
log.debug("deleting %s", self.local_sync_dir)
util.ensure_safe_rmtree(self.local_sync_dir)
def stop_details(self):
remote_util.remote_activity("Getting %s status", self.name)
try:
gist = self._repo_gist()
except NoSuchGist:
return None
else:
return "gist %s will be deleted - THIS CANNOT BE UNDONE!" % gist["id"]
def _sync_runs_meta(self, force=False):
remote_util.remote_activity("Refreshing run info for %s" % self.name)
self._ensure_local_gist_repo()
self._sync_runs_meta_for_gist(force)
def _ensure_local_gist_repo(self):
if _is_git_repo(self._local_gist_repo):
log.debug("gist local repo found at %s", self._local_gist_repo)
return
log.debug("initializing gist local repo at %s", self._local_gist_repo)
gist = self._repo_gist()
_sync_gist_repo(gist, self._local_gist_repo, self.local_env)
def _repo_gist(self):
gist = _find_gist_with_file(self.user, self._gist_readme_name, self.local_env)
if not gist:
raise NoSuchGist(
"cannot find gist remote '%s' (denoted by the file '%s') for user %s\n"
"If the gist is private, you must specify a valid access token with "
"GIST_ACCESS_TOKEN.\nFor more information see "
"https://my.guild.ai/docs/gists."
% (self.gist_name, self._gist_readme_name, self.user)
)
return gist
def _sync_runs_meta_for_gist(self, force):
try:
_pull_gist_repo(self._local_gist_repo, self.local_env)
except NoSuchGist:
self._clear_gist_cache()
else:
git_commit = self._gist_repo_current_commit()
if not force and self._meta_current(git_commit):
return
_refresh_runs_meta(
self._local_gist_repo,
self._runs_dir,
git_commit,
self.local_sync_dir,
)
def _meta_current(self, git_commit):
return meta_sync.meta_current(self.local_sync_dir, lambda: git_commit)
def _gist_repo_current_commit(self):
return _git_current_commit(self._local_gist_repo)
def _delete_runs(self, runs, permanent):
assert permanent # gist remotes only support permanent delete
_delete_gist_runs(runs, self._local_gist_repo, self._runs_dir)
_commit_and_push_gist_repo_for_delete(
self._local_gist_repo,
_delete_commit_msg(),
self.local_env,
self.name,
)
def _restore_runs(self, runs):
raise NotImplementedError()
def _purge_runs(self, runs):
raise NotImplementedError()
def push(self, runs, delete=False):
self._ensure_synced_gist_repo()
_export_runs_to_gist_archives(runs, self._local_gist_repo)
_commit_and_push_gist_repo_for_push(
self._local_gist_repo,
_push_commit_msg(),
self.local_env,
self.name,
)
self._sync_runs_meta_for_gist(True)
def _ensure_synced_gist_repo(self):
try:
self._sync_runs_meta()
except NoSuchGist:
self._init_gist_repo()
def _init_gist_repo(self):
gist = self._create_gist()
_sync_gist_repo(gist, self._local_gist_repo, self.local_env)
def _create_gist(self):
return _create_gist(
self.user, self.gist_name, self._gist_readme_name, self.local_env
)
def pull(self, runs, delete=False):
from guild import var
# That we have `runs` means we've sync'd runs meta. "Meta" in
# this case also contains the runs themselves as zip
# archives. At this point we need only extract the run
# archives to the runs dir.
_extract_runs(runs, self._local_gist_repo, var.runs_dir(), self.name)
def _remote_full_name(user, gist_name):
return "gist-%s-%s" % (user, gist_name)
def _gist_readme_name(gist_name):
return "[Guild AI] %s" % _ensure_md_ext(gist_name)
def _ensure_md_ext(s):
if s.lower().endswith(".md"):
return s
return s + ".md"
def _find_gist_with_file(user, filename, env):
import requests # expensive
page = 1
url = "https://api.github.com/users/%s/gists" % user
while True:
resp = requests.get(
url,
params={"page": page, "per_page": 100},
headers=_github_auth_headers(env),
)
gists = resp.json()
if not gists:
return None
for gist in gists:
for name in gist["files"]:
if name == filename:
return gist
page += 1
def _github_auth_headers(env):
try:
access_token = _required_gist_access_token(env)
except MissingRequiredEnv:
return {}
else:
return {"Authorization": "token %s" % access_token}
def _sync_gist_repo(gist, local_repo, env):
repo_url = _gist_repo_url(gist, env)
if _is_git_repo(local_repo):
_pull_gist_repo(local_repo, env)
else:
_clone_gist_repo(repo_url, local_repo, env)
def _gist_repo_url(gist, env):
if _gist_urltype(env) == "ssh":
return "[email protected]:%s.git" % gist["id"]
else:
return gist["git_pull_url"]
def _gist_urltype(env):
try:
return _required_env("GIST_URLTYPE", [env, os.environ])
except KeyError:
return None
def _clone_gist_repo(repo_url, local_repo, env):
cmd = [_git_cmd(), "clone", "--quiet", repo_url, local_repo]
log.debug("cloning %s to %s", repo_url, local_repo)
_subprocess_tty(cmd, extra_env=env)
def _git_cmd():
cmd = util.which("git")
if not cmd:
raise remotelib.OperationError(
"git command is not available\n"
"Refer to https://git-scm.com/book/en/v2/Getting-Started-Installing-Git "
"for help installing it."
)
return cmd
def _pull_gist_repo(local_repo, env):
cmd = [_git_cmd(), "-C", local_repo, "pull", "--quiet", "--rebase"]
log.debug("pulling for %s", local_repo)
code = _subprocess_tty(cmd, extra_env=env, allowed_returncodes=(0, 1))
if code == 1:
raise NoSuchGist()
def _refresh_runs_meta(gist_repo, runs_dir, meta_id, local_sync_dir):
for archive in _run_archives(gist_repo):
_unpack_meta(archive, runs_dir)
meta_sync.write_local_meta_id(meta_id, local_sync_dir)
def _run_archives(dir):
for name in os.listdir(dir):
if _is_guild_run(name):
yield os.path.join(dir, name)
def _is_guild_run(name):
return name.startswith("guildai-run-") and name.endswith(".zip")
def _unpack_meta(archive, runs_dir):
log.debug("unpacking %s meta to %s", archive, runs_dir)
with zipfile.ZipFile(archive, "r") as zf:
for name in zf.namelist():
if _is_meta_file(name):
zf.extract(name, runs_dir)
def _is_meta_file(name):
return (
name.endswith(".guild/opref")
or "/.guild/attrs/" in name
or "/.guild/LOCK" in name
)
def _is_git_repo(dir):
return os.path.exists(os.path.join(dir, ".git"))
def _git_current_commit(git_repo):
if not _is_git_repo(git_repo):
return None
cmd = [_git_cmd(), "-C", git_repo, "log", "-1", "--format=%H"]
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return out.decode("utf-8").strip()
def _extract_runs(runs, archive_dir, dest_dir, gist_name):
for run in runs:
archive = os.path.join(archive_dir, _run_archive_filename(run.id))
if not os.path.exists(archive):
log.error(
"%s archive for gist does not exist (%s), skipping", run.id, archive
)
continue
log.info("Copying %s from %s", run.id, gist_name)
_replace_run(archive, run.id, dest_dir)
def _run_archive_filename(run_id):
return "guildai-run-%s.zip" % run_id
def _replace_run(archive, run_id, dest_dir):
with util.TempDir("guild-gist-run-") as tmp:
_extract_archive(archive, tmp.path)
extracted_run_dir = _validate_extracted_run(tmp.path, run_id, archive)
dest_run_dir = os.path.join(dest_dir, run_id)
_replace_run_dir(dest_run_dir, extracted_run_dir)
def _extract_archive(archive, dest_dir):
with zipfile.ZipFile(archive, "r") as zf:
for name in zf.namelist():
zf.extract(name, dest_dir)
def _validate_extracted_run(dir, run_id, archive):
# RUN_DIR/.guild/opref is required for a run.
extracted_run_dir = os.path.join(dir, run_id)
opref_path = os.path.join(extracted_run_dir, ".guild", "opref")
if not os.path.exists(opref_path):
log.error("%s does not contain expected run %s", archive, run_id)
raise remotelib.OperationError("invalid run archive in gist")
return extracted_run_dir
def _replace_run_dir(run_dir, src_dir):
log.debug("moving %s to %s", src_dir, run_dir)
util.ensure_safe_rmtree(run_dir)
shutil.move(src_dir, run_dir)
def _create_gist(gist_remote_user, gist_remote_name, gist_readme_name, env):
import requests
access_token = _required_gist_access_token(env)
content = _gist_readme_content(gist_remote_user, gist_remote_name)
data = {
"accept": "application/vnd.github.v3+json",
"description": "Guild AI Repository",
"public": True,
"files": {
gist_readme_name: {
"filename": gist_readme_name,
"type": "text/markdown",
"language": "Markdown",
"content": content,
}
},
}
headers = {
"Authorization": "token %s" % access_token,
}
resp = requests.post("https://api.github.com/gists", json=data, headers=headers)
if resp.status_code not in (200, 201):
raise remotelib.OperationError(
"error creating gist: (%i) %s" % (resp.status_code, resp.text)
)
return resp.json()
def _required_gist_access_token(env):
try:
return _required_env("GIST_ACCESS_TOKEN", [env, os.environ])
except KeyError:
raise MissingRequiredEnv(
"missing required environment variable GIST_ACCESS_TOKEN\n"
"This operation requires a GitHub personal access token for "
"creating gists.\n"
"See https://my.guild.ai/docs/gists for more information."
)
def _gist_readme_content(user, remote_name):
return (
"This is a Guild AI runs repository. To access runs, "
"[install Guild AI](https://guild.ai/install) and run `guild pull gist:%s/%s`. "
"For more information about Guild AI Gist based repositories, see "
"[Guild AI - Gists](https://my.guild.ai/docs/gists)." % (user, remote_name)
)
def _export_runs_to_gist_archives(runs, gist_repo):
with util.TempDir("guild-runs-export-") as tmp:
archives = [_run_export_archive(run, tmp.path) for run in runs]
_export_runs(zip(runs, archives))
for archive_src in archives:
archive_dest = os.path.join(gist_repo, os.path.basename(archive_src))
util.ensure_deleted(archive_dest)
shutil.move(archive_src, archive_dest)
def _run_export_archive(run, export_dir):
return os.path.join(export_dir, _run_archive_filename(run.id))
def _export_runs(runs_with_dest):
from guild import run_util
for run, dest in runs_with_dest:
log.info("Compressing %s", run.id)
run_util.export_runs([run], dest, copy_resources=False, quiet=True)
def _push_commit_msg():
import guild
return "`guild push` by %s@%s with version %s" % (
util.user(),
util.hostname(),
guild.version(),
)
def _commit_and_push_gist_repo_for_push(repo, commit_msg, env, remote_name):
_git_add_all(repo, env)
try:
_git_commit(repo, commit_msg, env)
except _NoChanges:
pass
log.info("Copying runs to %s", remote_name)
_git_push(repo, env)
def _git_add_all(local_repo, env, update=False):
cmd = [_git_cmd(), "-C", local_repo, "add", "."]
if update:
cmd.append("-u")
log.debug("adding files for %s", local_repo)
_subprocess_quiet(cmd, extra_env=env)
class _NoChanges(Exception):
pass
def _git_commit(local_repo, msg, env):
cmd = [_git_cmd(), "-C", local_repo, "commit", "-m", msg]
log.debug("commiting for %s", local_repo)
result = _subprocess_quiet(cmd, extra_env=env, allowed_returncodes=(0, 1))
if result == 1:
raise _NoChanges()
def _git_push(local_repo, env):
cmd = [_git_cmd(), "-C", local_repo, "push", "--quiet"]
env = _maybe_askpass(env, local_repo)
log.debug("pushing for %s", local_repo)
_subprocess_tty(cmd, extra_env=env)
def _maybe_askpass(env, local_repo):
if not _gist_access_token_defined(env):
return
askpass_path = _maybe_gist_access_token_script(local_repo)
if not askpass_path:
return env
env = dict(env)
env["GIT_ASKPASS"] = askpass_path
return env
def _gist_access_token_defined(env):
try:
_required_env("GIST_ACCESS_TOKEN", [env, os.environ])
except KeyError:
return False
else:
return True
def _maybe_gist_access_token_script(local_repo):
if util.get_platform() == "Windows":
return None
script_path = _gist_access_token_script(local_repo)
if os.path.exists(script_path):
return script_path
_write_gist_access_token_script(script_path)
return script_path
def _gist_access_token_script(local_repo):
return os.path.join(local_repo, ".git", "gist-access-token")
def _write_gist_access_token_script(path):
with open(path, "w") as f:
f.write("echo $GIST_ACCESS_TOKEN\n")
util.make_executable(path)
def _delete_gist(gist, env):
import requests
access_token = _required_gist_access_token(env)
data = {
"accept": "application/vnd.github.v3+json",
"gist_id": gist["id"],
}
headers = {
"Authorization": "token %s" % access_token,
}
resp = requests.delete(
"https://api.github.com/gists/%s" % gist["id"], json=data, headers=headers
)
if resp.status_code not in (200, 204):
raise remotelib.OperationError(
"error creating gist: (%i) %s" % (resp.status_code, resp.text)
)
def _delete_gist_runs(runs, gist_repo, runs_dir):
for run in runs:
log.info("Deleting %s", run.id)
_delete_gist_repo_run_archive(gist_repo, run.id)
_delete_run(run, runs_dir)
def _delete_gist_repo_run_archive(gist_repo, run_id):
run_archive = os.path.join(gist_repo, _run_archive_filename(run_id))
log.debug("deleting %s", run_archive)
util.ensure_deleted(run_archive)
def _delete_run(run, runs_dir):
run_dir = os.path.join(runs_dir, run.id)
log.debug("deleting %s", run_dir)
util.ensure_safe_rmtree(run_dir)
def _commit_and_push_gist_repo_for_delete(repo, commit_msg, env, remote_name):
_git_add_all(repo, env, update=True)
try:
_git_commit(repo, commit_msg, env)
except _NoChanges:
log.info("Nothing to update for %s - gist is up-to-date", remote_name)
else:
log.info("Updating runs on %s", remote_name)
_git_push(repo, env)
def _delete_commit_msg():
import guild
return "`guild runs rm` by %s@%s with version %s" % (
util.user(),
util.hostname(),
guild.version(),
)
def _subprocess_tty(cmd, extra_env, allowed_returncodes=(0,)):
env = dict(os.environ)
if extra_env:
env.update(extra_env)
log.debug("%r", cmd)
p = subprocess.Popen(cmd, env=env)
p.wait()
if p.returncode not in allowed_returncodes:
log.debug("exit code for %r is %i", cmd, p.returncode)
raise SystemExit("error running %s - see above for details" % cmd[0])
return p.returncode
def _subprocess_quiet(cmd, extra_env, allowed_returncodes=(0,)):
log.debug("%r", cmd)
return remote_util.subprocess_call(
cmd,
extra_env=extra_env,
quiet=True,
allowed_returncodes=allowed_returncodes,
)
| apache-2.0 | 8,576,580,919,284,401,000 | 29.266667 | 88 | 0.598776 | false | 3.312794 | false | false | false |
adamcandy/Gaia | FileTodo.py | 1 | 56922 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##########################################################################
#
# Gaia, task list organiser in with Caldav server sync.
#
# Copyright (C) 2013-2014 Dr Adam S. Candy.
# Dr Adam S. Candy, [email protected]
#
# This file is part of the Gaia project.
#
# Gaia is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Gaia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gaia. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
from Universe import universe, colour
import sys
import os
from datetime import datetime, timedelta
import re
from uuid import uuid4
from Support import error, report
from Support import generate_mono
from Support import repo_add, repo_remove, repo_update
from Parsers import is_relative_date, calculate_delta, prioritystring, is_same_time, timedelta_to_human, do_avoid_weekend, next_weekday, next_increment
def indentation(s, tabsize=2):
sx = s.expandtabs(tabsize)
return (len(sx) - len(sx.lstrip()))/tabsize
#return 0 if sx.isspace() else (len(sx) - len(sx.lstrip()))/tabsize
def parsedate(string, reference=None, alarm=False, allday=False, forward=False):
date = None
if (string is None or len(string) == 0):
if alarm:
if reference is not None:
if allday:
# Warning for day events 1800 - 1000 = 8 hours
date = reference + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
date = reference + universe.defaulttime.diff
else:
string = string.strip()
# Deal with tasks due on a day, not specific time
if len(string) == 6:
allday = True
if alarm:
string = string + universe.defaulttime.alarm.strftime('%H%M')
else:
string = string + universe.defaulttime.due.strftime('%H%M')
try:
if re.match('^\d{6}$', string):
date = datetime.strptime(string, '%y%m%d')
elif re.match('^\d{10}$', string):
try:
date = universe.timezone.localize(datetime.strptime(string, '%y%m%d%H%M'))
#date = datetime.strptime(string, '%y%m%d%H%M')
except Exception, e:
date = None
error('Date parse error [' + string + ']' + ' Exception: ' + str(e))
if universe.debug: raise
pass
elif is_relative_date(string):
d = calculate_delta(string)
if d is not None:
if reference is not None:
if forward:
date = reference + d
else:
date = reference - d
else:
date = universe.timezone.localize(datetime.strptime(string))
#date = datetime.strptime(string)
except Exception, e:
date = None
error('Date parse error [' + string + ']' + ' Exception: ' + str(e))
if universe.debug: raise
pass
return date, allday
def tasklist_read(name, category=None):
if category is None:
filename = universe.dataroot + name
else:
filename = universe.dataroot + category + '/' + name
if not os.path.exists(filename):
return None
f = open(filename, 'r')
level = 0
taskline = ''
notes = ''
lines = (f.read().decode('utf8') + os.linesep).splitlines()
f.close()
#end = len(lines)
#blank = False
#for i in range(len(lines)):
# if len(lines[i]) > 0:
# blank = False
# continue
# if not blank:
# blank = True
# continue
# end = i
# break
# Temp
#end = len(lines)
#root = FileTodos(lines[:end], title=name, parents=[category], filenotes=lines[end+1:])
root = FileTodos(lines, title=name, parents=[category])
root.check_for_modified_children()
if root.is_empty():
report(' ' + colour.grey + 'Removing EMPTY ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + root.name + colour.end + ' ' + colour.grey + '(' + colour.grey + filename + colour.grey + ')' + colour.end)
if not universe.dry:
root.set_modified()
try:
if os.path.exists(filename):
os.remove(filename)
repo_remove(filename)
except:
pass
return root
class FileTodos(object):
def __init__(self, lines=None, filenotes=None, parents=[], parent=None, title=None, flow='parallel', translate=None, number=1, level=None, uid=None, caldav=False, next_action=None):
self.lines = None
self.filenotes = filenotes
if self.filenotes is None:
self.filenotes = []
self.block = []
self.level = -2
# top level modified flag for file updates
self.modified = False
# task level update flag for caldav
self.updated = False
self.sequence = 0
if lines is not None:
self.lines = lines
self.block = [ 0, len(self.lines) ]
if title is not None:
self.level = 0
else:
self.level = indentation(self.lines[0]) + 1
title = self.lines[0].lstrip()
if level is not None:
self.level = level
self.name = None
self.duetext = None
self.alarmtext = None
self.is_checklist = False
self.flowtext = None
self.flow = flow
self.is_header = False
self.is_completed = False
#if caldav:
# self.is_onhold = None
# self.starttext = None
# self.repeat = None
#else:
#self.is_everpresent = False
self.is_onhold = False
self.starttext = None
self.repeat = None
self.expiretext = None
self.wait = ''
self.waitonrepeat = False
self.priority = None
self.is_permanent = False
self.avoidweekends = False
self.current = False
self.error = False
self.sublist = None
self.parents = parents
self.parent = parent
self.number = number
self.uid = uid
self.translate = ''
if translate is not None:
self.translate = translate
self.interpret_task(title)
#if len(self.translate) > 0:
# print self.name, self.translate
self.note = self.find_note()
self.childblocks = self.identify_blocks()
self.children = []
self.due, allday = parsedate(self.duetext)
self.alarm, allday = parsedate(self.alarmtext, reference=self.due, alarm=True, allday=allday)
self.start, allday = parsedate(self.starttext, reference=self.due)
self.expire, allday = parsedate(self.expiretext, reference=self.due, forward=True)
self.active = False
self.titleoptions = ''
self.type = 'file'
self.next_action = next_action
if self.next_action is not None:
self.next_action = next_action.lstrip()
# Need to add next action, in case of checklist, main header is first??
if lines is not None:
if len(self.childblocks) > 0:
filenotesstart = self.childblocks[-1][-1]
else:
filenotesstart = 0
i = filenotesstart
for i in range(filenotesstart, len(lines)):
if len(lines[i]) > 0:
filenotesstart = i
break
if self.level == 0:
#print self.name, filenotesstart
if filenotesstart < len(lines):
if lines[filenotesstart] is not None:
if len(lines[filenotesstart]) > 0:
self.filenotes = lines[filenotesstart:]
if len(self.childblocks) > 0:
self.find_children()
def child_is_task(self, task):
found = False
for child in self.children:
if child.is_same_task(task):
found = True
break
return found
def is_empty(self):
return (not self.has_children() and len(self.filenotes) == 0)
def is_same_task(self, task):
if (len(self.parents) == 0 or len(task.parents) == 0):
return self.name == task.name
else:
return (self.name == task.name and self.parents[0] == task.parents[0])
def is_translate_header(self):
if self.has_children():
if self.is_translate():
if self.parent is None:
return True
else:
if not self.parent.is_translate():
return True
return False
def group(self, masked=True):
if self.is_wait() and masked:
group = 'wait'
elif (self.is_translate() and (not self.is_translate_header()) and masked):
group = self.translate
elif len(self.parents) > 0:
group = self.parents[0]
else:
# Either root of tree, or an un-tied task!
group = 'home'
return group
def allday(self):
return (is_same_time(self.due, universe.defaulttime.due) and is_same_time(self.alarm, universe.defaulttime.alarm) )
def do_repeat(self):
avoid_weekends = (self.group(masked=False) in universe.skipweekendlists or self.avoidweekends)
# Deal with permanent task
if self.is_permanent:
#self.is_onhold = True
detail = ''
if self.waitonrepeat:
self.wait = 'wait'
detail = ' and moved to wait status'
self.set_updated()
report(colour.yellow + 'Permenant task' + detail + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.end)
return
if (self.repeat is None or len(self.repeat) == 0): return
if (self.due is None): return
d = None
if self.waitonrepeat:
self.wait = 'wait'
self.set_updated()
every = False
after = False
random = False
string = self.repeat
if string in ['decennially', 'biennially', 'annually', 'monthly', 'fortnightly', 'weekly', 'daily']:
every = True
if string == 'decennially':
string = '10years'
elif string == 'biennially':
string = '2years'
elif string == 'annually':
string = 'year'
elif string == 'monthly':
string = 'month'
elif string == 'fortnightly':
string = '2weeks'
elif string == 'weekly':
string = 'week'
elif string == 'daily':
string = 'day'
elif re.match('^every\w+$', string):
every = True
string = string[5:]
elif re.match('^after\w+$', string):
after = True
string = string[5:]
elif re.match('^random$', string):
random = True
if every or after or random:
d = calculate_delta(string)
if d is not None:
# Including case of absolute date
new_due = None
new_start = None
new_alarm = None
detail = ''
if every:
# Ensure at least advanced by one d delta
multi = 1
while (self.due + d * multi) < universe.now:
multi += 1
if multi > 1000:
multi = 1
error('Determining multiple every recur time delta for (>1000) ' + self.name)
break
#print 'A', d * multi
#print 'B', self.due
#print 'C', self.due + d * multi
#multi = 0
#d = d * multi
#dmulti = int((universe.now - self.due).total_seconds() // d.total_seconds())
#if dmulti > 0:
# # Event very overdue, such that subsequent repeats missed
# d = (dmulti + 1) * d
# #print "Multi d event", d, dmulti
new_due = self.due + d * multi
if self.start is not None:
if is_relative_date(self.starttext):
new_start = self.start + d * multi
elif (after or random):
if after:
# Use .replace on datetime object instead?
#shift = ((self.due.hour - universe.now.hour) * 60 + (self.due.minute - universe.now.minute)) * 60 + self.due.second - universe.now.second
#new_due = universe.now + d + timedelta(seconds=shift) + timedelta(microseconds=-universe.now.microsecond)
#
new_due = universe.now.replace(second=0, microsecond=0)
shift = (self.due.hour - new_due.hour) * 60 + self.due.minute - new_due.minute
new_due = new_due + d + timedelta(minutes=shift)
#
elif random:
new_due = universe.now.replace(second=0, microsecond=0) + d
new_due = do_avoid_weekend(new_due, avoid_weekends=avoid_weekends)
if (self.starttext is not None and len(self.starttext) > 0):
string = self.starttext
if is_relative_date(string):
d = calculate_delta(string)
if d is not None:
new_start = new_due - d
if self.alarm is not None:
if self.alarmtext is not None:
self.alarm, allday = parsedate(self.alarmtext, reference=new_due, alarm=True, allday=self.allday())
elif self.allday():
# Warning for day events 1800 - 1000 = 8 hours
new_alarm = new_due + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
new_alarm = new_due + universe.defaulttime.diff
if new_due is not None:
detail = detail + ' due: %(old)s -> %(new)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_due is None else new_due.strftime('%y%m%d%H%M%z')
}
self.due = new_due
if new_start is not None:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty]' if self.start is None else self.start.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_start is None else new_start.strftime('%y%m%d%H%M%z')
}
self.start = new_start
if new_alarm is not None:
detail = detail + ' alarm: %(old)s -> %(new)s' % {
'old': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_alarm is None else new_alarm.strftime('%y%m%d%H%M%z')
}
self.alarm = new_alarm
report(colour.yellow + 'Recur task in' + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
else:
error('Determining recur time delta for ' + self.name + ' string[' + string + ']')
return
def add(self, task):
if len(task.parents) == 1:
lists = []
for c in self.children:
if c.name == task.parents[0]:
lists = c.child_names()
break
if (task.sublist is None) or not (task.sublist in lists):
if (task.sublist is not None) and not (task.sublist in lists):
report(colour.red + 'Selected sublist ' + task.sublist + ' not present, adding to the inbox' + colour.end)
task.sublist = 'inbox'
task.parents.append(task.sublist)
task.sublist = None
match = self
for group in task.parents:
found = False
for child in match.children:
if child.name == group:
found = True
match = child
break
if not found:
inbox = FileTodos(title='inbox', parents=match.parents + [match.name], parent=match, translate=self.translate, level=match.level + 1)
match.add_child(inbox)
match = inbox
found = True
match.set_modified(task)
new = FileTodos(lines=task.reformat().splitlines(), parents=match.parents + [match.name], parent=match)
report(colour.green + 'Adding task to ' + colour.greenbright + 'file' + colour.green + ' in ' + '|'.join(new.parents) + colour.green + ':' + colour.end + ' ' + new.name)
match.add_child(new)
def find_task(self, task):
match = None
if self.is_same_task(task):
return self
for child in self.children:
match = child.find_task(task)
if match is not None:
match = match.find_task(task)
break
return match
def find_tasks_by_name(self, task=None, name=None, matches=None, check_is_wait=False):
if matches is None:
matches = []
if task is not None:
name = task.name
if name == self.name:
if (not check_is_wait or (check_is_wait and self.is_wait()) ):
matches.append(self)
for child in self.children:
matches = child.find_tasks_by_name(name=name, matches=matches)
return matches
def find_task_parent(self, task):
#if task.name in self.child_names():
if self.child_is_task(task):
return self
for child in self.children:
parents = child.find_task_parent(task)
if parents is not None:
return parents
return None
def children_all_completed(self):
allcomplete = True
for child in self.children:
if not child.is_completed:
allcomplete = False
return allcomplete
def uncomplete_childen(self):
self.is_completed = False
for child in self.children:
child.uncomplete_childen()
def unwait_childen(self):
# Assumes working just after uncompleted (for waitonrepeat test)
if self.waitonrepeat:
self.wait = 'wait'
else:
self.wait = ''
for child in self.children:
child.unwait_childen()
def is_repeat(self):
if self.repeat is not None:
if len(self.repeat) > 0:
if self.due is not None:
return True
if self.is_permanent:
return True
return False
def recur(self, task, root=None, recursive=False):
if root is None:
root = self
match = None
removed = False
#if task.name in self.child_names():
if self.child_is_task(task):
for child in self.children:
#if child.name == task.name:
if child.is_same_task(task):
match = child
break
# Should complete/remove any children here - otherwise need to wait for next run
match.uncomplete_childen()
match.unwait_childen()
if ((match.repeat is not None and match.due is not None) or match.is_permanent):
match.do_repeat()
#match.update()
else:
root.remove(task)
removed = True
else:
for child in self.children:
match = child.recur(task, root=root, recursive=True)
if match is not None:
break
if not recursive:
if match is not None:
self.make_modified(match)
if removed: return None
return match
def remove(self, task, root=None, repeats=False, recursive=False):
if root is None:
root = self
match = None
if self.child_is_task(task):
# Check if new tasks become active
if self.is_repeat():
repeats = True
new_children = []
for child in self.children:
#if child.name == task.name:
if child.is_same_task(task):
match = child
if repeats:
match.is_completed = True
else:
new_children.append(child)
if not match.is_header:
if repeats:
action = 'Completing'
else:
self.children = new_children
action = 'Removing'
stat = colour.greenbright + 'OK' + colour.end if match is not None else colour.redbright + 'FAIL' + colour.end
report(colour.red + action + ' task from full tree in' + colour.end + ' ' + colour.redbright + 'file' + '|' + '|'.join(match.parents) + colour.red + ':' + colour.end + ' ' + match.name + ' ' + stat)
else:
if self.is_repeat():
repeats = True
for child in self.children:
match = child.remove(task, root=root, repeats=repeats, recursive=True)
if match is not None:
break
# Check if parent requires removal
if match is not None:
# removed: child, parent: self X actually match?
if child.level > 0:
if child.name == match.parents[-1]:
if (child.is_repeat() or repeats):
if child.children_all_completed():
report(colour.red + ' need to complete parent also, ' + colour.redbright + child.name + colour.end)
# Uncomplete all children of child
child.uncomplete_childen()
child.unwait_childen()
if child.is_repeat():
# Apply repeat to child
child.do_repeat()
else:
self.remove(child, repeats=repeats, recursive=True)
match = child
else:
if not child.has_children():
if not child.is_header:
report(colour.red + ' need to remove parent also, ' + colour.redbright + child.name + colour.end)
self.remove(child, recursive=True)
match = child
if not recursive:
if match is not None:
self.make_modified(match)
return match
def clear_titleoptions(self):
self.starttext = None
self.repeat = None
#self.is_onhold = False
def is_equal(self, other, caldav=False):
if (self.due != other.due):
return False
if (self.alarm != other.alarm):
return False
if (self.note != other.note):
return False
if (self.priority != other.priority):
return False
if (self.wait != other.wait):
return False
if (self.next_action != other.next_action):
return False
#print self.name, '|', self.group(), other.group()
# Don't compare translate if either task is waiting
if (not self.is_wait() and not other.is_wait()):
if (self.translate != other.translate):
#print self.name, '|', self.group(), other.group()
return False
if caldav:
return True
# Optional checks:
# Note not possible for caldav
# start, starttext
#if (self.starttext is not None and other.starttext is not None):
if (self.starttext != other.starttext):
return False
# repeat
#if (self.repeat is not None and other.repeat is not None):
if (self.repeat != other.repeat):
return False
# is_onhold
#if (self.is_onhold is not None and other.is_onhold is not None):
if (self.is_onhold != other.is_onhold):
return False
# flow (no access, add later?)
# is_permanent (no access - add later?)
# is_header (no access from Caldav?)
# is_checklist (not used)
return True
def __eq__(self, other):
if isinstance(other, FileTodos):
return self.is_equal(other)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __lt__(self, other):
# Check due
if (self.due is None and other.due is not None):
return False
if (self.due is not None and other.due is None):
return True
if ((self.due is not None and other.due is not None) and self.due != other.due):
return self.due < other.due
# Check priorities
if (self.priority is None and other.priority is not None):
return False
if (self.priority is not None and other.priority is None):
return True
if ((self.priority is not None and other.priority is not None) and self.priority != other.priority):
# Note priroties in reverse
return self.priority < other.priority
# Check wait
if (self.is_wait() and not other.is_wait):
return False
if (not self.is_wait() and other.is_wait):
return True
return self.name < other.name
def update(self, task, due=False, note=False, priority=False, wait=False, recursive=False, caldav=False, previous=None, caldavsource=False):
# Also update FileTodo.__eq__
# To stop passing all of the above around...:
if previous is not None:
due = (task.due != previous.due) or (task.alarm != previous.alarm) or due
note = (task.note != previous.note) or note
next_action = (task.next_action != previous.next_action)
#next_action = True
#print '['+previous.next_action+']', '['+task.next_action+']'
priority = (task.priority != previous.priority) or priority
wait = (task.wait != previous.wait) or wait
# new:
#starttext = (task.starttext is not None and previous.starttext is not None) and (task.starttext != previous.starttext)
#repeat = (task.repeat is not None and previous.repeat is not None) and (task.repeat != previous.repeat)
#is_onhold = (task.is_onhold is not None and previous.is_onhold is not None) and (task.is_onhold != previous.is_onhold)
translate = False
if (not task.is_wait() and not previous.is_wait()):
translate = (task.translate != previous.translate)
# Deal with updates on tasks from caldav data (i.e. ensure below are False)
starttext = (task.starttext != previous.starttext) and (not caldavsource)
repeat = (task.repeat != previous.repeat) and (not caldavsource)
is_onhold = (task.is_onhold != previous.is_onhold) and (not caldavsource)
#print 'caldavsource', caldavsource, starttext, repeat, is_onhold, task.name
found = None
#if self.name == task.name:
if self.is_same_task(task):
detail = ''
if priority:
detail = detail + ' priority: %(old)s -> %(new)s' % {
'old': prioritystring(self.priority, shownone=True),
'new': prioritystring(task.priority, shownone=True),
}
self.priority = task.priority
if due:
detail = detail + ' due: %(old)s -> %(new)s, alarm: %(aold)s -> %(anew)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if task.due is None else task.due.strftime('%y%m%d%H%M%z'),
'aold': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'anew': '[empty]' if task.alarm is None else task.alarm.strftime('%y%m%d%H%M%z'),
}
self.due = task.due
self.alarm = task.alarm
# If due becomes None any start is now no longer relevant so ensure it is also cleared
# Might need to do this for alarm too? bit complicated...
if (self.due is None and self.starttext is not None):
detail = detail + ' start: %(old)s -> [empty] (enforced)' % {
'old': '[empty:'+str(self.starttext)+']' if (self.starttext is None or self.starttext == '') else ' + '.join(self.starttext.splitlines()),
}
self.starttext = None
if wait:
detail = detail + ' wait: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.wait)+']' if (self.wait is None or self.wait == '') else self.wait,
'new': '[empty:'+str(task.wait)+']' if (task.wait is None or task.wait == '') else task.wait
}
self.wait = task.wait
# asc 131203
# if translate:
# detail = detail + ' translate: %(old)s -> %(new)s' % {
# 'old': '[empty:'+str(self.translate)+']' if (self.translate is None or self.translate == '') else self.translate,
# 'new': '[empty:'+str(task.translate)+']' if (task.translate is None or task.translate == '') else task.translate
# }
# self.translate = task.translate
if note:
detail = detail + ' note: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.note)+']' if (self.note is None or self.note == '') else ' + '.join(self.note.splitlines()),
'new': '[empty:'+str(task.note)+']' if (task.note is None or task.note == '') else ' + '.join(task.note.splitlines()),
}
self.note = task.note
# new
if is_onhold:
detail = detail + ' hold: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.is_onhold)+']' if (self.is_onhold is None or self.is_onhold == '') else self.is_onhold,
'new': '[empty:'+str(task.is_onhold)+']' if (task.is_onhold is None or task.is_onhold == '') else task.is_onhold
}
self.is_onhold = task.is_onhold
if starttext:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.starttext)+']' if (self.starttext is None or self.starttext == '') else ' + '.join(self.starttext.splitlines()),
'new': '[empty:'+str(task.starttext)+']' if (task.starttext is None or task.starttext == '') else ' + '.join(task.starttext.splitlines()),
}
self.starttext = task.starttext
if repeat:
detail = detail + ' repeat: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.repeat)+']' if (self.repeat is None or self.repeat == '') else ' + '.join(self.repeat.splitlines()),
'new': '[empty:'+str(task.repeat)+']' if (task.repeat is None or task.repeat == '') else ' + '.join(task.repeat.splitlines()),
}
self.repeat = task.repeat
if next_action:
detail = detail + ' next action: %(old)s -> %(new)s' % {
'old': '[empty:'+str(self.next_action)+']' if (self.next_action is None or self.next_action == '') else ' + '.join(self.next_action.splitlines()),
'new': '[empty:'+str(task.next_action)+']' if (task.next_action is None or task.next_action == '') else ' + '.join(task.next_action.splitlines()),
}
self.next_action = task.next_action
#self.sequence_increment()
if caldav:
caltype = 'caldav'
elif recursive:
caltype = 'file'
else:
caltype = 'active'
updated = False
if caldav:
# Assumes have previous
if (due or note or priority or wait or translate or next_action):
from CaldavClient import ical_event_update
ical_event_update(self, due=due, note=note, priority=priority, wait=wait, translate=translate, previous=previous, next_action=next_action)
updated = True
else:
updated = True
if updated:
report(colour.yellow + 'Updating task in' + colour.end + ' ' + colour.yellowbright + caltype + '|' + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
else:
report(colour.yellow + 'Updating task in' + colour.end + ' ' + colour.yellowbright + caltype + '|' + '|'.join(self.parents) + colour.yellow + ' not required and '+ colour.yellowbright +'skipped' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
found = self
else:
for child in self.children:
found = child.update(task, due=due, note=note, priority=priority, wait=wait, recursive=True, caldav=caldav, previous=previous, caldavsource=caldavsource)
if found is not None:
break
if ((not recursive) and (not caldav)):
self.make_modified(found)
return found
def make_modified_parents(self, task=None):
if task is None:
task = self
if len(self.parents) > 1:
self.parent.make_modified_parents(task=task)
elif len(self.parents) == 1:
self.make_modified(task=task)
return
def check_for_modified_children(self, root=True):
modified = False
if self.modified:
modified = True
for child in self.children:
modified = modified or child.check_for_modified_children(root=False)
if root and modified:
self.set_modified()
return modified
def set_modified(self, task=None):
if task is not None:
name = task.name
else:
name = '[not provided]'
if len(self.parents) > 0:
parentstr = self.parents[-1]
else:
parentstr = '[parent unknown]'
report(colour.magenta+'Marking modified ' + parentstr + '|' + self.name + ' for task ' + name + colour.end)
self.modified = True
def make_modified(self, task):
def to_mark(current, task):
if len(current.parents) == 0:
return False
return (task.parents[1] == current.name and task.parents[0] == current.parents[0])
if len(task.parents) < 2:
return
if to_mark(self, task):
if not self.modified:
self.set_modified(task)
else:
for child in self.children:
child.make_modified(task)
def child_names(self):
names = []
for child in self.children:
names.append(child.name)
return names
def has_children(self):
if len(self.children) > 0:
return True
return False
def is_sequential(self):
return self.flow == 'sequential'
def set_wait(self, string=None):
if string is None:
string = 'wait'
self.wait = string
for child in self.children:
child.set_wait(string)
def set_updated(self, follow=True):
self.updated = True
if follow:
for child in self.children:
child.set_updated(follow=follow)
def is_translate(self):
if self.translate is not None:
if len(self.translate) > 0:
return True
return False
def is_wait(self):
if self.wait is not None:
if len(self.wait) > 0:
return True
return False
def is_available(self):
if self.is_onhold:
return False
if self.error:
return False
#if self.is_wait():
# return False
if self.start is not None:
if self.start > universe.now:
return False
return True
def is_expired(self):
if self.expire is not None:
if self.expire <= universe.now:
return True
return False
def is_active(self):
# Exclude the root and projects
if self.level <= 0:
return False
if self.is_header:
return False
if not self.is_available():
return False
if self.parent.is_wait():
# Only include highest wait
return False
#if (self.parent.is_translate_header() and self.parent.is_wait()):
# # Note onhold wipes out children anyway - here wait is special case
# return False
#if ( len(self.translate) > 0 and len(self.parent.translate) == 0 ):
if self.is_translate_header():
# Header of aux list
# Not great returning True here
return True
# Clause for grouped / lists
if ((not self.is_checklist) and (self.has_children())):
return False
# Restricted to next actions, when sequential
return True
def find_all_names(self, todos=None):
if todos == None:
todos = []
if not self.error:
if self.level >= 1:
todos.append(self.name)
for child in self.children:
todos = child.find_all_names(todos)
return todos
def find_all_tasks(self, todos=None):
if todos == None:
todos = []
if not self.error:
if self.level >= 1:
todos.append(self)
for child in self.children:
todos = child.find_all_tasks(todos)
return todos
def find_all_task_occurances(self, task, occurances=None):
if occurances == None:
occurances = 0
if self.is_same_task(task):
occurances +=1
#report(' DUPLICATE CALDAV: ' + str(occurances) + ' ' + task.name)
for child in self.children:
occurances = child.find_all_task_occurances(task, occurances)
return occurances
def find_active(self, active=None):
if active == None:
active = []
if self.is_active():
active.append(self)
self.active = True
is_sequential = self.is_sequential()
for child in self.children:
if child.is_completed:
continue
if not child.is_available():
if is_sequential:
break
continue
active = child.find_active(active)
if is_sequential:
break
return active
def is_valid_task(self):
if self.level <= 0:
return False
if self.is_header:
return False
if self.is_onhold:
return False
if self.error:
return False
return True
def find_next_actions(self, set_updated=True, updated=None):
#if 'Meshing ' in self.name:
# verb=True
#else:
# verb=False
if updated is None:
updated = []
next_action = self.find_next_action()
#if verb: print self.name + ': ['+str(self.next_action)+']', '['+str(next_action)+']'
if self.next_action != next_action:
self.next_action = next_action
if set_updated:
self.set_updated(follow=False)
updated.append(self.name)
#print ' UPDATED', self.name
#print self.name + ': ['+str(self.next_action)+']', '['+str(next_action)+']'
for child in self.children:
child.find_next_actions(set_updated=set_updated, updated=updated)
return updated
def find_next_action(self):
next_action = None
if self.level <= 0:
return None
if self.parent.is_sequential():
neighbours = self.parent.children
found = False
for neighbour in neighbours:
if found:
if neighbour.is_valid_task():
next_action = neighbour
break
elif neighbour.name == self.name:
found = True
if next_action is None:
return self.parent.find_next_action()
else:
return next_action.name
# next_actions = []
# if len(self.parents) == 0:
# return next_actions
# p = self.parents[-1]
# if not p.is_sequential():
# return next_actions
def find_error(self, error=None):
if error == None:
error = []
if self.error:
error.append(self)
for child in self.children:
error = child.find_error(error)
return error
def show_error(self, show_notes=False):
errors = self.find_error()
if len(errors) == 0: return
report(colour.redbright + 'ERROR' + colour.end)
for task in errors:
report(task.to_string(indentnone=True, notes=show_notes, show_where=True), forced=True)
def is_important(self):
return (self.priority is not None)
def is_due_on_day(self, day):
if self.due is None:
return False
if self.due.year != day.year:
return False
if self.due.month != day.month:
return False
if self.due.day != day.day:
return False
return True
def is_overdue(self):
if self.due is None:
return False
return universe.now > self.due
def is_due_today(self):
return self.is_due_on_day(universe.now)
def is_due_tomorrow(self):
return self.is_due_on_day(universe.now + timedelta(days=1))
def is_overdue_yesterday_or_past(self):
return (self.is_overdue() and (not self.is_due_today()))
def is_overdue_today_tomorrow_important(self):
return (self.is_overdue() or self.is_due_today() or self.is_due_tomorrow() or self.is_important())
def make_due_today(self, displacement=0, avoid_weekends=False):
new_due = None
new_start = None
new_alarm = None
detail = ''
# shift from now time to due time, all today
#shift = ((self.due.hour - universe.now.hour) * 60 + (self.due.minute - universe.now.minute)) * 60 + self.due.second - universe.now.second
#new_due = universe.now + timedelta(seconds=shift)
if self.repeat == 'random':
new_due = universe.now.replace(second=0, microsecond=0) + calculate_delta('random')
else:
new_due = universe.now.replace(hour=self.due.hour, minute=self.due.minute, second=0, microsecond=0)
# Apply displacement days
new_due = new_due + timedelta(days=displacement)
new_due = do_avoid_weekend(new_due, avoid_weekends=avoid_weekends)
# Update start time
if (self.starttext is not None and len(self.starttext) > 0):
string = self.starttext
if is_relative_date(string):
d = calculate_delta(string)
if d is not None:
new_start = new_due - d
# Update alarm
if self.alarm is not None:
if self.alarmtext is not None:
self.alarm, allday = parsedate(self.alarmtext, reference=new_due, alarm=True, allday=self.allday())
elif self.allday():
# Warning for day events 1800 - 1000 = 8 hours
new_alarm = new_due + universe.defaulttime.alldaydiff
else:
# Default warning of an hour
new_alarm = new_due + universe.defaulttime.diff
detail = detail + ' due: %(old)s -> %(new)s' % {
'old': '[empty]' if self.due is None else self.due.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_due is None else new_due.strftime('%y%m%d%H%M%z')
}
self.due = new_due
if new_start is not None:
detail = detail + ' start: %(old)s -> %(new)s' % {
'old': '[empty]' if self.start is None else self.start.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_start is None else new_start.strftime('%y%m%d%H%M%z')
}
self.start = new_start
if new_alarm is not None:
detail = detail + ' alarm: %(old)s -> %(new)s' % {
'old': '[empty]' if self.alarm is None else self.alarm.strftime('%y%m%d%H%M%z'),
'new': '[empty]' if new_alarm is None else new_alarm.strftime('%y%m%d%H%M%z')
}
self.alarm = new_alarm
report(colour.yellow + 'Update due to today for important task in' + colour.end + ' ' + colour.yellowbright + '|'.join(self.parents) + colour.yellow + ':' + colour.end + ' ' + self.name + colour.grey + detail + colour.end)
self.make_modified_parents()
return
def prioritycurrent(self, caldav=False):
# Make tasks with a priority that have a due time in the previous days or past,
# due today at the same time
# Only applied to current active list?
#print self.name
if ((self.is_important() or self.current) and self.is_overdue_yesterday_or_past()):
#print 'HERE', self.name
try:
# Check here if in skipweekendlists
avoid_weekends = ((self.group(masked=False) in universe.skipweekendlists) or self.avoidweekends)
# self.make_due_next_work_day()
self.make_due_today(avoid_weekends=avoid_weekends)
# state.aiyo.make_modified(self)
if caldav:
from CaldavClient import ical_event_update
ical_event_update(self, due=True)
else:
self.set_modified()
except Exception, e:
out = os.linesep + ' Task: ' + self.name + ' ' + self.due.strftime('%y%m%d%H%M')
error('Error in making a priority task current, exception: ' + str(e) + out)
pass
def to_string(self, reformat=False, indentfull=False, indentnone=False, notes=True, show_where=False, show_next_action=False, show_translate_inheritance=False):
iro = generate_mono(reformat)
contentlist = []
if self.lines is not None:
for i in range(len(self.lines)):
contentlist.append('%(num)6d %(indent)2d %(content)s' % { 'num':i, 'indent':indentation(self.lines[i]), 'content':self.lines[i] })
content = os.linesep.join(contentlist)
if not notes:
note = ''
elif self.note is None:
note = ''
elif len(self.note) == 0:
note = ''
else:
note = os.linesep + os.linesep.join([ ' ' * 4 + notelines for notelines in self.note.splitlines() ])
note = iro.grey + note + iro.end
out_due = ''
out_due_date = None
if self.due is not None:
out_due_date = self.due
elif self.alarm is not None:
out_due_date = self.alarm
else:
out_due = ''
if out_due_date is not None:
if self.allday():
out_due = out_due_date.strftime('%y%m%d')
else:
out_due = out_due_date.strftime('%y%m%d%H%M')
# Work out diff
if self.alarm is not None:
out_alarm = self.alarm.strftime('%y%m%d%H%M')
if self.due is not None:
d = self.alarm - self.due
if (self.allday() and d == universe.defaulttime.alldaydiff):
out_alarm = ''
elif (not self.allday() and d == universe.defaulttime.diff):
out_alarm = ''
else:
dh = timedelta_to_human(d)
if dh is not None:
out_alarm = dh
else:
out_alarm = ''
if len(out_alarm) > 0:
out_alarm = ' !' + out_alarm
out_priority = prioritystring(self.priority, spacer=True)
translate = ''
if self.translate is not None:
if self.is_translate():
if (self.parent is None or show_translate_inheritance):
translate = ' =' + self.translate
else:
if not self.parent.is_translate():
translate = ' =' + self.translate
#print self.name, self.translate, translate, self.parent
if show_where:
parents = ' ' + (iro.grey+':'+iro.end).join([ iro.grey + x + iro.end for x in self.parents ])
else:
parents = ''
if show_next_action and (self.next_action is not None) and (len(str(self.next_action)) > 0):
next_action = ' ' + iro.green + universe.next_char + str(self.next_action) + iro.end
else:
next_action = ''
if self.is_overdue():
highlight_name = iro.redbright
elif self.is_due_today():
highlight_name = iro.red
elif self.is_due_tomorrow():
highlight_name = iro.yellow
elif self.priority is not None:
highlight_name = iro.yellow
else:
highlight_name = ''
options = '''\
%(spacer)s%(start)s%(divider)s%(due)s%(expire)s%(alarm)s%(priority)s%(repeat)s%(translate)s%(checklist)s%(flow)s%(header)s%(waitonrepeat)s%(permanent)s%(current)s%(avoidweekends)s%(wait)s%(paused)s%(completed)s%(parents)s%(next)s%(error)s''' \
% {
'start': '' if (self.starttext is None or len(self.starttext) == 0) else iro.cyan + self.starttext + iro.end,
'due': iro.blue + out_due + iro.blue,
'alarm': iro.red + out_alarm + iro.end,
'priority': iro.redbright + out_priority + iro.end,
'divider': '' if (self.starttext is None or len(self.starttext) == 0 ) else iro.grey + ':' + iro.end,
'repeat': '' if (self.repeat is None or len(self.repeat) == 0) else ' ' + iro.magenta + self.repeat + iro.end,
'expire': '' if (self.expiretext is None or len(self.expiretext) == 0) else ' ' + iro.magenta + self.expiretext + iro.end,
'spacer': '' if ((self.starttext is None or len(self.starttext) == 0) and (len(out_due) == 0)) else ' ',
'translate': iro.yellow + translate + iro.end,
'checklist': iro.yellow+' checklist'+iro.end if self.is_checklist else '',
'header': iro.yellow+' header'+iro.end if self.is_header else '',
'completed': iro.green+' completed'+iro.end if self.is_completed else '',
'paused': iro.blue+' hold'+iro.end if self.is_onhold else '',
'permanent': iro.magenta+' permanent'+iro.end if self.is_permanent else '',
'current': iro.magenta+' current'+iro.end if self.current else '',
'avoidweekends': iro.magenta+' avoidweekends'+iro.end if self.avoidweekends else '',
'wait': ' ' + iro.blue+self.wait+iro.end if self.is_wait() else '',
'waitonrepeat': iro.blue+' waitonrepeat'+iro.end if self.waitonrepeat else '',
'error': iro.redbright+' ERROR'+iro.end if self.error else '',
'flow': iro.magenta+' ' + self.flowtext+iro.end if self.flowtext is not None else '',
'parents': parents,
'next': next_action,
}
text = '''%(name)s%(spacer)s%(options)s%(note)s''' \
% {
'name': highlight_name + self.name + iro.end,
'spacer': '' if len(options) == 0 else ' ',
'options': options,
'note': note,
}
if indentnone:
indent = 2
else:
indentmod = 0
if indentfull:
indentmod = 2
if reformat:
indentmod = -1
indent = (self.level + indentmod) * 2
text = os.linesep.join([ ' ' * indent + notelines for notelines in text.splitlines() ])
return text
def __str__(self):
return self.to_string()
def find_children(self):
for i in range(len(self.childblocks)):
block = self.childblocks[i]
parents = []
for p in self.parents + [self.name]:
parents.append(p)
child = FileTodos(self.lines[block[0]:block[1]], parents = parents, number=i+1, parent=self, translate=self.translate)
self.add_child(child)
def find_note(self):
if self.lines is None: return ''
if len(self.lines) == 0: return ''
if self.level == 0:
if indentation(self.lines[0]) < self.level + 1: return ''
else:
if len(self.lines) == 1: return ''
if indentation(self.lines[1]) < self.level + 1: return ''
note = []
for i in range(len(self.lines)):
if ((self.level > 0) and (i == 0)): continue
if indentation(self.lines[i]) < self.level + 1: break
note.append(re.sub('^'+ ' ' * (self.level + 1) * 2, '', self.lines[i]))
if len(note) == 0:
return ''
return os.linesep.join(note)
def set_note(self, obj):
self.note = obj
def add_child(self, obj):
obj.parent = self
self.children.append(obj)
def set_block(self, obj):
self.block = obj
def set_childblocks(self, obj):
self.childblocks = obj
def show_tree(self, indentfull=True, notes=True, activeonly=False, availableonly=False):
if ((activeonly or availableonly) and not self.is_available()): return
if (activeonly and not self.is_active()): return
report(self.to_string(indentfull=indentfull, notes=notes), forced=True)
for child in self.children:
child.show_tree(indentfull=indentfull, notes=notes, activeonly=activeonly, availableonly=availableonly)
def reformat(self):
output = ''
if self.level > 0:
output = self.to_string(reformat=True) + os.linesep
for child in self.children:
output = output + child.reformat()
if (self.level == 0 and self.filenotes is not None):
output = output + os.linesep.join(['',''] + self.filenotes)
return output
def write(self, name=None, category=None):
if not self.modified: return False
if name is None:
name = self.name
if len(self.parents) > 0:
category = self.parents[0]
if category is None:
filename = universe.dataroot + name
else:
filename = universe.dataroot + category + '/'
if not os.path.exists(filename):
# Could be case here where file exists in place of foldername, this will cause trouble!
os.mkdir(filename)
filename = filename + name
repo_in = os.path.exists(filename)
report(colour.grey + 'Writing ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + name + colour.end + ' ' + colour.grey + '(to' + colour.grey + ' ' + filename + colour.grey + ')' + colour.end)
if not universe.dry:
f = open(filename, 'w')
f.write(self.reformat().encode('utf-8'))
f.close()
if not repo_in:
repo_add(filename)
if self.is_empty():
report(' ' + colour.grey + 'Removing ' + colour.blue + category + colour.grey + '/' + colour.yellowbright + name + colour.end + ' ' + colour.grey + '(' + colour.grey + filename + colour.grey + ')' + colour.end)
if not universe.dry:
try:
if os.path.exists(filename):
os.remove(filename)
repo_remove(filename)
except:
pass
return True
def identify_blocks(self, start=None, end=None):
lines_to_excluded_section = 2
debug = False
#debug = (self.name == 'finance')
if self.lines is None:
return []
def add_block(r):
blocks.append(r)
if debug: print ' ', r
blocks = []
if start is None:
start = 0
if end is None:
end = len(self.lines)
if len(self.lines) <= 1: return blocks
r = [ -1, -1 ]
blanks = 0
for i in range(start, end):
line = self.lines[i]
indent = indentation(line)
if debug: print i, blanks, r, indent, line
if len(line) == 0:
blanks += 1
continue
# Indent is of current level
if indent == self.level:
# Existing block
if (r[0] > -1 and r[1] == -1):
if debug: print 'complete', blanks, blanks >= 2
r[1] = i
add_block(r)
r = [ -1, -1 ]
if r[0] == -1:
if debug: print 'new'
# If 2 or more previous blanks AND now indent = level
if blanks >= lines_to_excluded_section: break
# Start new block
if len(line.strip()) > 0:
r[0] = i
blanks = 0
# Add concluding block, if one has begun
if ((r[0] > -1) and (r[1] == -1)):
r[1] = i + 1
add_block(r)
if debug: print self.name, blocks
if debug:
report('XXXX'+ self.name)
print blocks
if len(blocks) > 0: print os.linesep.join(self.lines[blocks[-1][0]:blocks[-1][1]])
sys.exit(1)
return blocks
def interpret_task(self, title):
sections = title.split(' ', 1)
if len(sections) == 2:
# Check if len(sections[1]) > 0?
self.name = sections[0]
title = sections[1]
else:
self.name = title
title = ''
words = title.split(' ')
titlelist = []
for word in words:
# NLP not working here, as cannot apply set_modified at this early point of parsing,
# would need to mark to update aiyo at a later stage, once the FileTodo object
# has been set up.
if re.match('^today$', word):
self.duetext = universe.now.strftime('%y%m%d')
self.set_modified()
elif re.match('^tomorrow$', word):
self.duetext = (universe.now + timedelta(days=1)).strftime('%y%m%d')
self.set_modified()
elif word in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] \
+ ['mon', 'tues', 'tue', 'wed', 'thurs', 'thu', 'thur', 'fri', 'sat', 'sun']:
self.duetext = next_weekday(word)
self.set_modified()
elif re.match('^\d*(day|week|month|year)s*$', word):
self.duetext = next_increment(word)
self.set_modified()
elif re.match('^\w+:today$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = universe.now.strftime('%y%m%d')
self.set_modified()
elif re.match('^\w+:tomorrow$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = (universe.now + timedelta(days=1)).strftime('%y%m%d')
self.set_modified()
elif re.match('^\w+:(monday|tuesday|wednesday|thursday|friday|saturday|sunday|mon|tues|tue|wed|thurs|thu|thur|fri|sat|sun)$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = next_weekday(self.duetext)
self.set_modified()
elif re.match('^\w+:\d*(day|week|month|year)s*$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
self.duetext = next_increment(self.duetext)
self.set_modified()
elif re.match('^\d{6}$', word):
self.duetext = word
elif re.match('^\d{10}$', word):
self.duetext = word
elif re.match('^\d{6}:$', word):
self.starttext = word[:-1]
elif re.match('^\d{10}:$', word):
self.starttext = word[:-1]
elif re.match('^\w+:\d{6}$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
elif re.match('^\w+:\d{10}$', word):
self.starttext, self.duetext = word.rsplit(':', 1)
elif re.match('^\w+:$', word):
self.starttext = word[:-1]
elif re.match('^!\d{6}$', word):
self.alarmtext = word[1:]
elif re.match('^!\d{10}$', word):
self.alarmtext = word[1:]
elif (re.match('^!\w+$', word) and is_relative_date(word)):
self.alarmtext = word[1:]
elif re.match('^!$', word):
self.priority = 9
elif re.match('^!!$', word):
self.priority = 5
elif re.match('^!!!$', word):
self.priority = 1
elif re.match('^every\w+$', word):
self.repeat = word
elif re.match('^after\w+$', word):
self.repeat = word
elif re.match('^random$', word):
self.repeat = word
elif word in ['decennially', 'biennially', 'annually', 'monthly', 'fortnightly', 'weekly', 'daily']:
self.repeat = word
elif re.match('^expire\w+$', word):
self.expiretext = word
elif re.match('^checklist$', word):
self.is_checklist = True
elif re.match('^sequential$', word):
self.flowtext = 'sequential'
elif re.match('^parallel$', word):
self.flowtext = 'parallel'
elif re.match('^header$', word):
self.is_header = True
elif re.match('^completed$', word):
self.is_completed = True
elif re.match('^paused$', word):
self.is_onhold = True
elif re.match('^onhold$', word):
self.is_onhold = True
elif re.match('^hold$', word):
self.is_onhold = True
elif re.match('^permanent$', word):
self.is_permanent = True
elif re.match('^avoidweekends$', word):
self.avoidweekends = True
elif re.match('^current$', word):
self.current = True
#elif re.match('^everpresent$', word):
# self.is_everpresent = True
elif re.match('^waitonrepeat$', word):
self.waitonrepeat = True
#self.wait = 'wait'
elif re.match('^wait$', word):
self.wait = word
elif re.match('^ERROR$', word):
self.error = True
# asc
elif re.match('^=\w+$', word):
self.translate = word[1:]
elif re.match('^@\w+$', word):
self.sublist = word[1:]
else:
titlelist.append(word)
if self.flowtext is not None:
self.flow = self.flowtext
| gpl-3.0 | -4,814,139,854,392,383,000 | 34.845088 | 271 | 0.593531 | false | 3.500523 | false | false | false |
Thortoise/Super-Snake | Blender/animation_nodes-master/sockets/shape_key.py | 1 | 2668 | import bpy
from bpy.props import *
from bpy.types import ShapeKey
from .. events import propertyChanged
from .. base_types.socket import AnimationNodeSocket
from .. utils.id_reference import tryToFindObjectReference
class ShapeKeySocket(bpy.types.NodeSocket, AnimationNodeSocket):
bl_idname = "an_ShapeKeySocket"
bl_label = "Shape Key Socket"
dataType = "Shape Key"
allowedInputTypes = ["Shape Key"]
drawColor = (1.0, 0.6, 0.5, 1)
storable = False
comparable = True
objectName = StringProperty(update = propertyChanged,
description = "Load the second shape key of this object (the first that is not the reference key)")
def drawProperty(self, layout, text, node):
row = layout.row(align = True)
row.prop_search(self, "objectName", bpy.context.scene, "objects", icon = "NONE", text = text)
self.invokeFunction(row, node, "assignActiveObject", icon = "EYEDROPPER")
def getValue(self):
object = self.getObject()
if object is None: return None
if object.type not in ("MESH", "CURVE", "LATTICE"): return None
if object.data.shape_keys is None: return None
try: return object.data.shape_keys.key_blocks[1]
except: return None
def getObject(self):
if self.objectName == "": return None
object = tryToFindObjectReference(self.objectName)
name = getattr(object, "name", "")
if name != self.objectName: self.objectName = name
return object
def updateProperty(self):
self.getObject()
def assignActiveObject(self):
object = bpy.context.active_object
if object:
self.objectName = object.name
@classmethod
def getDefaultValue(cls):
return None
@classmethod
def correctValue(cls, value):
if isinstance(value, ShapeKey) or value is None:
return value, 0
return cls.getDefaultValue(), 2
class ShapeKeyListSocket(bpy.types.NodeSocket, AnimationNodeSocket):
bl_idname = "an_ShapeKeyListSocket"
bl_label = "Shape Key List Socket"
dataType = "Shape Key List"
baseDataType = "Shape Key"
allowedInputTypes = ["Shape Key List"]
drawColor = (1.0, 0.6, 0.5, 0.5)
storable = False
comparable = False
@classmethod
def getDefaultValue(cls):
return []
@classmethod
def getDefaultValueCode(cls):
return "[]"
@classmethod
def correctValue(cls, value):
if isinstance(value, list):
if all(isinstance(element, ShapeKey) or element is None for element in value):
return value, 0
return cls.getDefaultValue(), 2
| gpl-3.0 | -1,726,559,515,112,783,400 | 30.761905 | 107 | 0.652174 | false | 3.912023 | false | false | false |
almostearthling/when-wizard | share/when-wizard/plugins/cond-event-batterylow.py | 1 | 1402 | # file: share/when-wizard/templates/cond-event-batterylow.py
# -*- coding: utf-8 -*-
#
# Condition plugin for the low battery event
# Copyright (c) 2015-2018 Francesco Garosi
# Released under the BSD License (see LICENSE file)
import locale
from plugin import EventConditionPlugin, PLUGIN_CONST, plugin_name
# setup i18n for both applet text and dialogs
locale.setlocale(locale.LC_ALL, locale.getlocale())
locale.bindtextdomain(APP_NAME, APP_LOCALE_FOLDER)
locale.textdomain(APP_NAME)
_ = locale.gettext
HELP = _("""\
This event will occur when the battery is considered critically low by the
system: use this only if the event is not caught by the system itself, for
example by hibernating the computer.
""")
EVENT_SYSTEM_BATTERY_LOW = 'battery_low'
class Plugin(EventConditionPlugin):
def __init__(self):
EventConditionPlugin.__init__(
self,
basename=plugin_name(__file__),
name=_("Low Battery"),
description=_("The Battery is Critically Low"),
author=APP_AUTHOR,
copyright=APP_COPYRIGHT,
icon='low_battery',
help_string=HELP,
version=APP_VERSION,
)
self.category = PLUGIN_CONST.CATEGORY_COND_POWER
self.stock = True
self.event = EVENT_SYSTEM_BATTERY_LOW
self.summary_description = _("When the battery is critically low")
# end.
| bsd-3-clause | 3,910,088,821,444,205,000 | 28.208333 | 74 | 0.669044 | false | 3.718833 | false | false | false |
ghackebeil/PyORAM | src/pyoram/storage/block_storage.py | 1 | 3293 | __all__ = ('BlockStorageTypeFactory',)
import logging
log = logging.getLogger("pyoram")
def BlockStorageTypeFactory(storage_type_name):
if storage_type_name in BlockStorageTypeFactory._registered_devices:
return BlockStorageTypeFactory.\
_registered_devices[storage_type_name]
else:
raise ValueError(
"BlockStorageTypeFactory: Unsupported storage "
"type: %s" % (storage_type_name))
BlockStorageTypeFactory._registered_devices = {}
def _register_device(name, type_):
if name in BlockStorageTypeFactory._registered_devices:
raise ValueError("Can not register block storage device type "
"with name '%s'. A device type is already "
"registered with that name." % (name))
if not issubclass(type_, BlockStorageInterface):
raise TypeError("Can not register block storage device type "
"'%s'. The device must be a subclass of "
"BlockStorageInterface" % (type_))
BlockStorageTypeFactory._registered_devices[name] = type_
BlockStorageTypeFactory.register_device = _register_device
class BlockStorageInterface(object):
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#
# Abstract Interface
#
def clone_device(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@classmethod
def compute_storage_size(cls, *args, **kwds):
raise NotImplementedError # pragma: no cover
@classmethod
def setup(cls, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def header_data(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def block_count(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def block_size(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def storage_name(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def update_header_data(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def close(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def read_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def yield_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def read_block(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def write_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def write_block(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def bytes_sent(self):
raise NotImplementedError # pragma: no cover
@property
def bytes_received(self):
raise NotImplementedError # pragma: no cover
| mit | -4,740,640,862,702,392,000 | 38.674699 | 73 | 0.580626 | false | 4.864106 | false | false | false |
liweitianux/atoolbox | astro/radec2deg.py | 1 | 1957 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Aaron LI
# Created: 2015-04-17
# Updated: 2016-06-30
#
"""
Convert the coordinates data in format (??h??m??s, ??d??m??s)
to format (degree, degree).
"""
import os
import sys
import re
import getopt
import math
USAGE = """Usage:
%(prog)s [ -h ] -i coords_file
Required arguments:
-i, --infile
infile containing the coordinates
Optional arguments:
-h, --help
""" % {'prog': os.path.basename(sys.argv[0])}
def usage():
print(USAGE)
def ra2deg(h, m, s):
return h * 15.0 + m * 15.0/60.0 + s * 15.0/3600.0
def dec2deg(d, m, s):
if (d >= 0):
sign = 1.0
else:
sign = -1.0
return sign * (math.fabs(d) + m/60.0 + s/3600.0)
def s_ra2deg(hms):
h, m, s = map(float, re.sub('[hms]', ' ', hms).split())
return h * 15.0 + m * 15.0/60.0 + s * 15.0/3600.0
def s_dec2deg(dms):
d, m, s = map(float, re.sub('[dms]', ' ', dms).split())
if (d >= 0):
sign = 1.0
else:
sign = -1.0
return sign * (math.fabs(d) + m/60.0 + s/3600.0)
def calc_offset(coord1, coord2):
ra1, dec1 = coord1
ra2, dec2 = coord2
return math.sqrt((ra1-ra2)**2 + (dec1-dec2)**2)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:",
["help", "infile="])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(1)
elif opt in ("-i", "--infile"):
infile = arg
else:
assert False, "unhandled option"
for line in open(infile):
if re.match(r"^\s*#", line) or re.match(r"^\s*$", line):
continue
ra, dec = line.split()
ra_deg = s_ra2deg(ra)
dec_deg = s_dec2deg(dec)
print("%.8f %.8f" % (ra_deg, dec_deg))
if __name__ == "__main__":
main()
| mit | 6,794,721,827,466,608,000 | 19.385417 | 64 | 0.502299 | false | 2.807747 | false | false | false |
jfantom/incubator-airflow | airflow/contrib/auth/backends/proxied_auth.py | 1 | 3179 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from sys import version_info
import flask_login
from flask_login import login_required, current_user, logout_user
from airflow import settings
from airflow import models
from airflow.utils.log.logging_mixin import LoggingMixin
import os
log = LoggingMixin().log
class AuthenticationError(Exception):
pass
class ProxiedUser(models.User):
def __init__(self, user):
self.user = user
def is_active(self):
'''Required by flask_login'''
return True
def is_authenticated(self):
'''Required by flask_login'''
return True
def is_anonymous(self):
'''Required by flask_login'''
return False
def get_id(self):
'''Returns the current user id as required by flask_login'''
return self.user.get_id()
def data_profiling(self):
'''Provides access to data profiling tools'''
return True
def is_superuser(self):
'''Access all the things'''
return True
class ProxiedAuth(object):
def __init__(self):
self.login_manager = flask_login.LoginManager()
def init_app(self,flask_app):
self.flask_app = flask_app
self.login_manager.init_app(self.flask_app)
#checks headers instead of cookies
self.login_manager.request_loader(self.load_request)
# this is needed to disable the anti forgery check
flask_app.config['WTF_CSRF_CHECK_DEFAULT'] = False
def load_request(self, request):
'''
Reads the header field that has already been verified on the
nginx side by google auth. Header field is specified by setting
the environment variable AIRFLOW_PROXIED_AUTH_HEADER or else
it's defaulted to X-Email.
'''
session = settings.Session()
header_field = os.getenv('AIRFLOW_PROXIED_AUTH_HEADER', 'X-Email')
user_email = request.headers.get(header_field)
# this shouldn't happen since nginx should take care of it!
if user_email is None:
raise AuthenticationError(
'Airflow failed to get fields from request header')
# insert user into database if doesn't exist
user = session.query(models.User).filter(
models.User.username == user_email).first()
if not user:
user = models.User(
username=user_email,
is_superuser=True)
session.merge(user)
session.commit()
session.close()
return ProxiedUser(user)
login_manager = ProxiedAuth()
| apache-2.0 | 3,035,449,064,927,744,000 | 28.165138 | 74 | 0.650834 | false | 4.177398 | false | false | false |
dvklopfenstein/PrincetonAlgorithms | py/AlgsSedgewickWayne/Bag.py | 1 | 2806 | """Bag class is a container for generic items."""
class Bag(object): # <Item> implements Iterable<Item>:
"""The Bag class represents a bag (or multiset) of generic items."""
class _Node(object): # private static class <Item>:
"""helper linked list class"""
def __init__(self, Item, Next):
self._item = Item
self._next = Next
def __init__(self):
self._first = None # beginning of bag
self._N = 0 # number of elements in bag
def isEmpty(self):
"""return true if this bag is empty; false otherwise."""
return self._first is None
def size(self):
"""Returns the number of items in this bag."""
return self._N
def add(self, item):
"""Adds the arg item to this bag."""
self._first = self._Node(item, self._first)
self._N += 1
# Returns an iterator that iterates over the items in the bag in arbitrary order.
def __iter__(self):
return self._ListIterator(self._first)
class _ListIterator(object): # <Item> implements Iterator<Item>:
"""an iterator, doesn't implement remove() since it's optional."""
def __init__(self, first):
self._current = first
def hasNext(self):
"""If we are not at the end of the Bag."""
return self._current is not None
def next(self):
"""Go to the next element."""
if not self.hasNext():
raise StopIteration
item = self._current._item
self._current = self._current._next
return item
#************************************************************************
# Compilation: javac Bag.java
# Execution: java Bag < input.txt
#
# A generic bag or multiset, implemented using a singly-linked list.
#
# % more tobe.txt
# to be or not to - be - - that - - - is
#
# % java Bag < tobe.txt
# size of bag = 14
# is
# -
# -
# -
# that
# -
# -
# be
# -
# to
# not
# or
# be
# to
#
#************************************************************************/
# The Bag class represents a bag (or multiset) of generic items.
# It supports insertion and iterating over the
# items in arbitrary order.
#
# This implementation uses a singly-linked list with a static nested class Node.
# See {@link LinkedBag} for the version from the
# textbook that uses a non-static nested class.
# The <em>add</em>, <em>isEmpty</em>, and <em>size</em> operations
# take constant time. Iteration takes time proportional to the number of items.
#
# For additional documentation, see
# <a href="http://algs4.cs.princeton.edu/13stacks">Section 1.3</a> of
# <i>Algorithms, 4th Edition</i> by Robert Sedgewick and Kevin Wayne.
#
# @author Robert Sedgewick
# @author Kevin Wayne
# @converted to Python by DV Klopfenstein
# Copyright (C) 2002-2010, Robert Sedgewick and Kevin Wayne.
# Java last updated: Tue Mar 25 04:52:35 EDT 2014.
| gpl-2.0 | 8,381,885,796,278,084,000 | 27.343434 | 83 | 0.612616 | false | 3.438725 | false | false | false |
M32Media/redash | redash/tasks/queries.py | 1 | 24668 | import json
import time
import logging
import signal
import redis
import re
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from redash import redis_connection, models, statsd_client, settings, utils
from redash.utils import gen_query_hash
from redash.worker import celery
from redash.query_runner import InterruptException
from .alerts import check_alerts_for_query
from redash.authentication.account import send_api_token
logger = get_task_logger(__name__)
@celery.task(name="redash.tasks.refresh_selected_queries")
def refresh_selected_queries(
months, publishers, global_queries=False, non_monthly_publisher_queries=False,
no_query_execution=False):
outdated_queries_count = 0
query_ids = []
all_dashboards = models.Dashboard.query.all()
dashboard_ids_names = [
(db.id, db.name) for db in all_dashboards
if (publishers == ['ALL'] or any(publisher == db.name.split(':')[0] for publisher in publishers))]
if global_queries:
dashboard_ids_names += [(db.id, db.name) for db in all_dashboards if db.name.split(':')[0] == 'Global']
jobs = []
# An example of Dashboard is Cogeco:unsold:stats or Cogeco:segment:profile_referrer
for db_id, db_name in dashboard_ids_names:
dashboard = models.Dashboard.get_by_id(db_id)
layout_list = [widget_id for row in json.loads(dashboard.layout) for widget_id in row]
widgets = [models.Widget.get_by_id(widget_id) for widget_id in layout_list if not widget_id < 0]
# Some widgets are None objects, and this makes the script fail
widgets = [widget for widget in widgets if widget]
for widget in widgets:
condition = widget.visualization != None and any(month in widget.visualization.name for month in months)
if non_monthly_publisher_queries:
# If the flag is True, add the queries where the pattern DDDDDD, with D being a digit, is not present in the query
# This adds everything that is not month dependent to the query list
# e.g. Cogeco:segment:profile_referrer:view_cogeco, Global:Intell:AdManager:view_last_6m
condition = condition or (not re.findall(r'_(\d{6})', widget.visualization.name))
if global_queries:
condition = condition or db_name.split(':')[0] == 'Global'
if condition:
query_id = widget.visualization.query_rel.id
query = models.Query.get_by_id(query_id)
# If no_query_execution flag is enabled, the query is not run and we only return the query text
if no_query_execution:
jobs.append({
'query_text': query.query_text,
'view_name': '{}.{}'.format(db_name, widget.visualization.name)
})
else:
jobs.append({
'task': enqueue_query(
query.query_text, query.data_source, query.user_id,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'}).to_dict(),
'query_text': query.query_text,
'view_name': '{}.{}'.format(db_name, widget.visualization.name)
})
query_ids.append(query.id)
outdated_queries_count += 1
logger.info(jobs)
status = redis_connection.hgetall('redash:status')
now = time.time()
redis_connection.hmset(
'redash:status', {
'outdated_queries_count': outdated_queries_count,
'last_refresh_at': now,
'query_ids': json.dumps(query_ids)})
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
return jobs
"""
Gets task associated with ids
"""
def get_tasks(ids):
tasks = {}
if type(ids) is str:
lists = QueryTaskTracker.ALL_LISTS
else:
lists = QueryTaskTracker.ALL_LISTS
#Each different list of query trackers
for _list in lists:
#Each different tracker inside a list
for tracker in QueryTaskTracker.all(_list):
for _id in ids:
#Check if id given is matching a tracker
if(_id == tracker.task_id):
qt = QueryTask(tracker.task_id)
data = qt.to_dict()
tasks[tracker.task_id] = {
"status" : data.get('status', None),
"query_id" : tracker.query_id,
"query_result_id" : data.get('query_result_id', None),
"error": data.get('error', None)
}
return tasks;
def _job_lock_id(query_hash, data_source_id):
return "query_hash_job:%s:%s" % (data_source_id, query_hash)
def _unlock(query_hash, data_source_id):
redis_connection.delete(_job_lock_id(query_hash, data_source_id))
# TODO:
# There is some duplication between this class and QueryTask, but I wanted to implement the monitoring features without
# much changes to the existing code, so ended up creating another object. In the future we can merge them.
class QueryTaskTracker(object):
DONE_LIST = 'query_task_trackers:done'
WAITING_LIST = 'query_task_trackers:waiting'
IN_PROGRESS_LIST = 'query_task_trackers:in_progress'
ALL_LISTS = (DONE_LIST, WAITING_LIST, IN_PROGRESS_LIST)
def __init__(self, data):
self.data = data
@classmethod
def create(cls, task_id, state, query_hash, data_source_id, scheduled, metadata):
data = dict(task_id=task_id, state=state,
query_hash=query_hash, data_source_id=data_source_id,
scheduled=scheduled,
username=metadata.get('Username', 'unknown'),
query_id=metadata.get('Query ID', 'unknown'),
retries=0,
scheduled_retries=0,
created_at=time.time(),
started_at=None,
run_time=None)
return cls(data)
def save(self, connection=None):
if connection is None:
connection = redis_connection
self.data['updated_at'] = time.time()
key_name = self._key_name(self.data['task_id'])
connection.set(key_name, utils.json_dumps(self.data))
connection.zadd(self._get_list(), time.time(), key_name)
for l in self.ALL_LISTS:
if l != self._get_list():
connection.zrem(l, key_name)
# TOOD: this is not thread/concurrency safe. In current code this is not an issue, but better to fix this.
def update(self, **kwargs):
self.data.update(kwargs)
self.save()
@staticmethod
def _key_name(task_id):
return 'query_task_tracker:{}'.format(task_id)
def _get_list(self):
if self.state in ('finished', 'failed', 'cancelled'):
return self.DONE_LIST
if self.state in ('created'):
return self.WAITING_LIST
return self.IN_PROGRESS_LIST
@classmethod
def get_by_task_id(cls, task_id, connection=None):
if connection is None:
connection = redis_connection
key_name = cls._key_name(task_id)
data = connection.get(key_name)
return cls.create_from_data(data)
@classmethod
def create_from_data(cls, data):
if data:
data = json.loads(data)
return cls(data)
return None
@classmethod
def all(cls, list_name, offset=0, limit=-1):
if limit != -1:
limit -= 1
if offset != 0:
offset -= 1
ids = redis_connection.zrevrange(list_name, offset, limit)
pipe = redis_connection.pipeline()
for id in ids:
pipe.get(id)
tasks = [cls.create_from_data(data) for data in pipe.execute()]
return tasks
@classmethod
def prune(cls, list_name, keep_count):
count = redis_connection.zcard(list_name)
if count <= keep_count:
return 0
remove_count = count - keep_count
keys = redis_connection.zrange(list_name, 0, remove_count - 1)
redis_connection.delete(*keys)
redis_connection.zremrangebyrank(list_name, 0, remove_count - 1)
return remove_count
def __getattr__(self, item):
return self.data[item]
def __contains__(self, item):
return item in self.data
class QueryTask(object):
# TODO: this is mapping to the old Job class statuses. Need to update the client side and remove this
STATUSES = {
'PENDING': 1,
'STARTED': 2,
'SUCCESS': 3,
'FAILURE': 4,
'REVOKED': 4
}
def __init__(self, job_id=None, async_result=None):
if async_result:
self._async_result = async_result
else:
self._async_result = AsyncResult(job_id, app=celery)
@property
def id(self):
return self._async_result.id
def to_dict(self):
task_info = self._async_result._get_task_meta()
result, task_status = task_info['result'], task_info['status']
if task_status == 'STARTED':
updated_at = result.get('start_time', 0)
else:
updated_at = 0
status = self.STATUSES[task_status]
if isinstance(result, Exception):
error = result.message
status = 4
elif task_status == 'REVOKED':
error = 'Query execution cancelled.'
else:
error = ''
if task_status == 'SUCCESS' and not error:
query_result_id = result
else:
query_result_id = None
return {
'id': self._async_result.id,
'updated_at': updated_at,
'status': status,
'error': error,
'query_result_id': query_result_id,
}
@property
def is_cancelled(self):
return self._async_result.status == 'REVOKED'
@property
def celery_status(self):
return self._async_result.status
def ready(self):
return self._async_result.ready()
def cancel(self):
return self._async_result.revoke(terminate=True, signal='SIGINT')
def enqueue_query(query, data_source, user_id, scheduled_query=None, metadata={}):
query_hash = gen_query_hash(query)
logging.info("Inserting job for %s with metadata=%s", query_hash, metadata)
try_count = 0
job = None
while try_count < 5:
try_count += 1
pipe = redis_connection.pipeline()
try:
pipe.watch(_job_lock_id(query_hash, data_source.id))
job_id = pipe.get(_job_lock_id(query_hash, data_source.id))
if job_id:
logging.info("[%s] Found existing job: %s", query_hash, job_id)
job = QueryTask(job_id=job_id)
if job.ready():
logging.info("[%s] job found is ready (%s), removing lock", query_hash, job.celery_status)
redis_connection.delete(_job_lock_id(query_hash, data_source.id))
job = None
if not job:
pipe.multi()
if scheduled_query:
queue_name = data_source.scheduled_queue_name
scheduled_query_id = scheduled_query.id
else:
queue_name = data_source.queue_name
scheduled_query_id = None
result = execute_query.apply_async(args=(
query, data_source.id, metadata, user_id,
scheduled_query_id),
queue=queue_name)
job = QueryTask(async_result=result)
tracker = QueryTaskTracker.create(
result.id, 'created', query_hash, data_source.id,
scheduled_query is not None, metadata)
tracker.save(connection=pipe)
logging.info("[%s] Created new job: %s", query_hash, job.id)
pipe.set(_job_lock_id(query_hash, data_source.id), job.id, settings.JOB_EXPIRY_TIME)
pipe.execute()
break
except redis.WatchError:
continue
if not job:
logging.error("[Manager][%s] Failed adding job for query.", query_hash)
return job
@celery.task(name="redash.tasks.refresh_queries_http")
def refresh_queries_http():
logger.info("Refreshing queries...")
jobs = []
for query in models.Query.every_queries():
logger.info("Updating Query {} ...".format(query.id))
if query.data_source.paused:
logger.info("Skipping refresh of Query 1 {} because datasource {} is paused because {}"
.format(query.id,
query.data_source.name,
query.data_source.pause_reason
))
else:
jobs.append(enqueue_query(query.query_text, query.data_source, query.user_id,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'}))
""" LINK BETWEEN TRACKER AND ACTUAL TASK
for job in jobs:
print("JOBS : {}".format(job.to_dict().get('id', None)))
lists = QueryTaskTracker.ALL_LISTS
for _list in lists:
for tracker in QueryTaskTracker.all(_list):
print("TRACKER : {}".format(tracker.data.get('task_id', None)))
"""
return jobs
@celery.task(name="redash.tasks.refresh_queries")
def refresh_queries():
outdated_queries_count = 0
query_ids = []
with statsd_client.timer('manager.outdated_queries_lookup'):
for query in models.Query.outdated_queries():
if settings.FEATURE_DISABLE_REFRESH_QUERIES:
logging.info("Disabled refresh queries.")
elif query.data_source.paused:
logging.info("Skipping refresh of %s because datasource - %s is paused (%s).", query.id, query.data_source.name, query.data_source.pause_reason)
else:
enqueue_query(query.query_text, query.data_source, query.user_id,
scheduled_query=query,
metadata={'Query ID': query.id, 'Username': 'Scheduled'})
query_ids.append(query.id)
outdated_queries_count += 1
statsd_client.gauge('manager.outdated_queries', outdated_queries_count)
logger.info("Done refreshing queries. Found %d outdated queries: %s" % (outdated_queries_count, query_ids))
status = redis_connection.hgetall('redash:status')
now = time.time()
redis_connection.hmset('redash:status', {
'outdated_queries_count': outdated_queries_count,
'last_refresh_at': now,
'query_ids': json.dumps(query_ids)
})
statsd_client.gauge('manager.seconds_since_refresh', now - float(status.get('last_refresh_at', now)))
@celery.task(name="redash.tasks.cleanup_tasks")
def cleanup_tasks():
in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST)
for tracker in in_progress:
result = AsyncResult(tracker.task_id)
# If the AsyncResult status is PENDING it means there is no celery task object for this tracker, and we can
# mark it as "dead":
if result.status == 'PENDING':
logging.info("In progress tracker for %s is no longer enqueued, cancelling (task: %s).",
tracker.query_hash, tracker.task_id)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='cancelled')
if result.ready():
logging.info("in progress tracker %s finished", tracker.query_hash)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='finished')
waiting = QueryTaskTracker.all(QueryTaskTracker.WAITING_LIST)
for tracker in waiting:
result = AsyncResult(tracker.task_id)
if result.ready():
logging.info("waiting tracker %s finished", tracker.query_hash)
_unlock(tracker.query_hash, tracker.data_source_id)
tracker.update(state='finished')
# Maintain constant size of the finished tasks list:
QueryTaskTracker.prune(QueryTaskTracker.DONE_LIST, 1000)
@celery.task(name="redash.tasks.cleanup_query_results")
def cleanup_query_results():
"""
Job to cleanup unused query results -- such that no query links to them anymore, and older than
settings.QUERY_RESULTS_MAX_AGE (a week by default, so it's less likely to be open in someone's browser and be used).
Each time the job deletes only settings.QUERY_RESULTS_CLEANUP_COUNT (100 by default) query results so it won't choke
the database in case of many such results.
"""
logging.info("Running query results clean up (removing maximum of %d unused results, that are %d days old or more)",
settings.QUERY_RESULTS_CLEANUP_COUNT, settings.QUERY_RESULTS_CLEANUP_MAX_AGE)
unused_query_results = models.QueryResult.unused(settings.QUERY_RESULTS_CLEANUP_MAX_AGE).limit(settings.QUERY_RESULTS_CLEANUP_COUNT)
deleted_count = models.QueryResult.query.filter(
models.QueryResult.id.in_(unused_query_results.subquery())
).delete(synchronize_session=False)
models.db.session.commit()
logger.info("Deleted %d unused query results.", deleted_count)
@celery.task(name="redash.tasks.refresh_schemas")
def refresh_schemas():
"""
Refreshes the data sources schemas.
"""
blacklist = [int(ds_id) for ds_id in redis_connection.smembers('data_sources:schema:blacklist') if ds_id]
global_start_time = time.time()
logger.info(u"task=refresh_schemas state=start")
for ds in models.DataSource.query:
if ds.paused:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=paused(%s)", ds.id, ds.pause_reason)
elif ds.id in blacklist:
logger.info(u"task=refresh_schema state=skip ds_id=%s reason=blacklist", ds.id)
else:
logger.info(u"task=refresh_schema state=start ds_id=%s", ds.id)
start_time = time.time()
try:
ds.get_schema(refresh=True)
logger.info(u"task=refresh_schema state=finished ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
except Exception:
logger.exception(u"Failed refreshing schema for the data source: %s", ds.name)
logger.info(u"task=refresh_schema state=failed ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
logger.info(u"task=refresh_schemas state=finish total_runtime=%.2f", time.time() - global_start_time)
def signal_handler(*args):
raise InterruptException
class QueryExecutionError(Exception):
pass
# We could have created this as a celery.Task derived class, and act as the task itself. But this might result in weird
# issues as the task class created once per process, so decided to have a plain object instead.
class QueryExecutor(object):
def __init__(self, task, query, data_source_id, user_id, metadata,
scheduled_query):
self.task = task
self.query = query
self.data_source_id = data_source_id
self.metadata = metadata
self.data_source = self._load_data_source()
if user_id is not None:
self.user = models.User.query.get(user_id)
else:
self.user = None
self.query_hash = gen_query_hash(self.query)
self.scheduled_query = scheduled_query
# Load existing tracker or create a new one if the job was created before code update:
self.tracker = QueryTaskTracker.get_by_task_id(task.request.id) or QueryTaskTracker.create(task.request.id,
'created',
self.query_hash,
self.data_source_id,
False, metadata)
def run(self):
signal.signal(signal.SIGINT, signal_handler)
self.tracker.update(started_at=time.time(), state='started')
logger.debug("Executing query:\n%s", self.query)
self._log_progress('executing_query')
query_runner = self.data_source.query_runner
annotated_query = self._annotate_query(query_runner)
try:
data, error = query_runner.run_query(annotated_query, self.user)
except Exception as e:
error = unicode(e)
data = None
logging.warning('Unexpected error while running query:', exc_info=1)
run_time = time.time() - self.tracker.started_at
self.tracker.update(error=error, run_time=run_time, state='saving_results')
logger.info(u"task=execute_query query_hash=%s data_length=%s error=[%s]", self.query_hash, data and len(data), error)
_unlock(self.query_hash, self.data_source.id)
if error:
self.tracker.update(state='failed')
result = QueryExecutionError(error)
if self.scheduled_query:
self.scheduled_query.schedule_failures += 1
models.db.session.add(self.scheduled_query)
else:
if (self.scheduled_query and
self.scheduled_query.schedule_failures > 0):
self.scheduled_query.schedule_failures = 0
models.db.session.add(self.scheduled_query)
query_result, updated_query_ids = models.QueryResult.store_result(
self.data_source.org, self.data_source,
self.query_hash, self.query, data,
run_time, utils.utcnow())
self._log_progress('checking_alerts')
for query_id in updated_query_ids:
check_alerts_for_query.delay(query_id)
self._log_progress('finished')
result = query_result.id
models.db.session.commit()
return result
def _annotate_query(self, query_runner):
if query_runner.annotate_query():
self.metadata['Task ID'] = self.task.request.id
self.metadata['Query Hash'] = self.query_hash
self.metadata['Queue'] = self.task.request.delivery_info['routing_key']
annotation = u", ".join([u"{}: {}".format(k, v) for k, v in self.metadata.iteritems()])
annotated_query = u"/* {} */ {}".format(annotation, self.query)
else:
annotated_query = self.query
return annotated_query
def _log_progress(self, state):
logger.info(
u"task=execute_query state=%s query_hash=%s type=%s ds_id=%d "
"task_id=%s queue=%s query_id=%s username=%s",
state, self.query_hash, self.data_source.type, self.data_source.id,
self.task.request.id,
self.task.request.delivery_info['routing_key'],
self.metadata.get('Query ID', 'unknown'),
self.metadata.get('Username', 'unknown'))
self.tracker.update(state=state)
def _load_data_source(self):
logger.info("task=execute_query state=load_ds ds_id=%d", self.data_source_id)
return models.DataSource.query.get(self.data_source_id)
# user_id is added last as a keyword argument for backward compatability -- to support executing previously submitted
# jobs before the upgrade to this version.
@celery.task(name="redash.tasks.execute_query", bind=True, track_started=True)
def execute_query(self, query, data_source_id, metadata, user_id=None,
scheduled_query_id=None):
if scheduled_query_id is not None:
scheduled_query = models.Query.query.get(scheduled_query_id)
else:
scheduled_query = None
return QueryExecutor(self, query, data_source_id, user_id, metadata,
scheduled_query).run()
#Update tokens for API data access
@celery.task(name="redash.tasks.refresh_query_tokens")
def refresh_query_tokens():
logger.warning("Refreshing User Tokens")
#Refresh Tokens
models.User.refresh_tokens()
#Send Emails
users = models.User.get_all()
for u in users:
user = u.to_dict()
send_api_token(user)
| bsd-2-clause | -672,060,604,044,315,900 | 36.603659 | 160 | 0.59186 | false | 3.964004 | false | false | false |
deepmind/deepmind-research | ogb_lsc/mag/losses.py | 1 | 6654 | # Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses and related utilities."""
from typing import Mapping, Tuple, Sequence, NamedTuple, Dict, Optional
import jax
import jax.numpy as jnp
import jraph
import numpy as np
# pylint: disable=g-bad-import-order
import datasets
LogsDict = Mapping[str, jnp.ndarray]
class Predictions(NamedTuple):
node_indices: np.ndarray
labels: np.ndarray
predictions: np.ndarray
logits: np.ndarray
def node_classification_loss(
logits: jnp.ndarray,
batch: datasets.Batch,
extra_stats: bool = False,
) -> Tuple[jnp.ndarray, LogsDict]:
"""Gets node-wise classification loss and statistics."""
log_probs = jax.nn.log_softmax(logits)
loss = -jnp.sum(log_probs * batch.node_labels, axis=-1)
num_valid = jnp.sum(batch.label_mask)
labels = jnp.argmax(batch.node_labels, axis=-1)
is_correct = (jnp.argmax(log_probs, axis=-1) == labels)
num_correct = jnp.sum(is_correct * batch.label_mask)
loss = jnp.sum(loss * batch.label_mask) / (num_valid + 1e-8)
accuracy = num_correct / (num_valid + 1e-8)
entropy = -jnp.mean(jnp.sum(jax.nn.softmax(logits) * log_probs, axis=-1))
stats = {
'classification_loss': loss,
'prediction_entropy': entropy,
'accuracy': accuracy,
'num_valid': num_valid,
'num_correct': num_correct,
}
if extra_stats:
for k in range(1, 6):
stats[f'top_{k}_correct'] = topk_correct(logits, labels,
batch.label_mask, k)
return loss, stats
def get_predictions_labels_and_logits(
logits: jnp.ndarray,
batch: datasets.Batch,
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Gets prediction labels and logits."""
mask = batch.label_mask > 0.
indices = batch.node_indices[mask]
logits = logits[mask]
predictions = jnp.argmax(logits, axis=-1)
labels = jnp.argmax(batch.node_labels[mask], axis=-1)
return indices, predictions, labels, logits
def topk_correct(
logits: jnp.ndarray,
labels: jnp.ndarray,
valid_mask: jnp.ndarray,
topk: int,
) -> jnp.ndarray:
"""Calculates top-k accuracy."""
pred_ranking = jnp.argsort(logits, axis=1)[:, ::-1]
pred_ranking = pred_ranking[:, :topk]
is_correct = jnp.any(pred_ranking == labels[:, jnp.newaxis], axis=1)
return (is_correct * valid_mask).sum()
def ensemble_predictions_by_probability_average(
predictions_list: Sequence[Predictions]) -> Predictions:
"""Ensemble predictions by ensembling the probabilities."""
_assert_consistent_predictions(predictions_list)
all_probs = np.stack([
jax.nn.softmax(predictions.logits, axis=-1)
for predictions in predictions_list
],
axis=0)
ensembled_logits = np.log(all_probs.mean(0))
return predictions_list[0]._replace(
logits=ensembled_logits, predictions=np.argmax(ensembled_logits, axis=-1))
def get_accuracy_dict(predictions: Predictions) -> Dict[str, float]:
"""Returns the accuracy dict."""
output_dict = {}
output_dict['num_valid'] = predictions.predictions.shape[0]
matches = (predictions.labels == predictions.predictions)
output_dict['accuracy'] = matches.mean()
pred_ranking = jnp.argsort(predictions.logits, axis=1)[:, ::-1]
for k in range(1, 6):
matches = jnp.any(
pred_ranking[:, :k] == predictions.labels[:, None], axis=1)
output_dict[f'top_{k}_correct'] = matches.mean()
return output_dict
def bgrl_loss(
first_online_predictions: jnp.ndarray,
second_target_projections: jnp.ndarray,
second_online_predictions: jnp.ndarray,
first_target_projections: jnp.ndarray,
symmetrize: bool,
valid_mask: jnp.ndarray,
) -> Tuple[jnp.ndarray, LogsDict]:
"""Implements BGRL loss."""
first_side_node_loss = jnp.sum(
jnp.square(
_l2_normalize(first_online_predictions, axis=-1) -
_l2_normalize(second_target_projections, axis=-1)),
axis=-1)
if symmetrize:
second_side_node_loss = jnp.sum(
jnp.square(
_l2_normalize(second_online_predictions, axis=-1) -
_l2_normalize(first_target_projections, axis=-1)),
axis=-1)
node_loss = first_side_node_loss + second_side_node_loss
else:
node_loss = first_side_node_loss
loss = (node_loss * valid_mask).sum() / (valid_mask.sum() + 1e-6)
return loss, dict(bgrl_loss=loss)
def get_corrupted_view(
graph: jraph.GraphsTuple,
feature_drop_prob: float,
edge_drop_prob: float,
rng_key: jnp.ndarray,
) -> jraph.GraphsTuple:
"""Returns corrupted graph view."""
node_key, edge_key = jax.random.split(rng_key)
def mask_feature(x):
mask = jax.random.bernoulli(node_key, 1 - feature_drop_prob, x.shape)
return x * mask
# Randomly mask features with fixed probability.
nodes = jax.tree_map(mask_feature, graph.nodes)
# Simulate dropping of edges by changing genuine edges to self-loops on
# the padded node.
num_edges = graph.senders.shape[0]
last_node_idx = graph.n_node.sum() - 1
edge_mask = jax.random.bernoulli(edge_key, 1 - edge_drop_prob, [num_edges])
senders = jnp.where(edge_mask, graph.senders, last_node_idx)
receivers = jnp.where(edge_mask, graph.receivers, last_node_idx)
# Note that n_edge will now be invalid since edges in the middle of the list
# will correspond to the final graph. Set n_edge to None to ensure we do not
# accidentally use this.
return graph._replace(
nodes=nodes,
senders=senders,
receivers=receivers,
n_edge=None,
)
def _assert_consistent_predictions(predictions_list: Sequence[Predictions]):
first_predictions = predictions_list[0]
for predictions in predictions_list:
assert np.all(predictions.node_indices == first_predictions.node_indices)
assert np.all(predictions.labels == first_predictions.labels)
assert np.all(
predictions.predictions == np.argmax(predictions.logits, axis=-1))
def _l2_normalize(
x: jnp.ndarray,
axis: Optional[int] = None,
epsilon: float = 1e-6,
) -> jnp.ndarray:
return x * jax.lax.rsqrt(
jnp.sum(jnp.square(x), axis=axis, keepdims=True) + epsilon)
| apache-2.0 | -7,726,497,317,545,474,000 | 32.437186 | 80 | 0.681996 | false | 3.294059 | false | false | false |
adhocish/MELEEDB | meleedb/main.py | 1 | 1570 | import logging
import datetime
import os
from scrapy.crawler import CrawlerProcess
from scrapy.settings import Settings
from scrapy.utils.log import configure_logging
import spiders
def run():
# Logging settings
configure_logging(install_root_handler=False)
logging.basicConfig(
datefmt='%Y-%m-%d %H:%M:%S',
filemode='w',
filename='output/' + datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S") + '.log',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO
)
# Project settings
settings = Settings()
settings.setmodule('settings', priority='project')
# Class to run parallel spiders
process = CrawlerProcess(settings)
process.crawl(spiders.LiquipediaSpider)
# Block until crawling is complete
process.start()
def handle_cmdline_arguments():
# Output to JSON file
# Create new database
# Update database?
# Logging
# Specify specific tournament(s)
# parser = argparse.ArgumentParser(description='Creates an .m3u playlist from given media files.')
# parser.add_argument('-u', '--upload', help='Attempt to upload files to remote.', action='store_true')
# parser.add_argument('-r', '--recursive', help='Process subdirectories as well.', action='store_true')
# parser.add_argument('-n', '--name', type=str, help='Name of playlist.')
# parser.add_argument('files', type=str, nargs='+', help='Absolute paths to files.')
# args = parser.parse_args()
run()
if __name__ == "__main__":
handle_cmdline_arguments() | gpl-3.0 | 334,612,834,064,739,840 | 29.803922 | 107 | 0.664968 | false | 3.915212 | false | false | false |
uwescience/pulse2percept | pulse2percept/implants/bvt.py | 1 | 4694 | """`BVT24`"""
import numpy as np
from .base import ProsthesisSystem
from .electrodes import DiskElectrode
from .electrode_arrays import ElectrodeArray
class BVT24(ProsthesisSystem):
"""24-channel suprachoroidal retinal prosthesis
This class creates a 24-channel suprachoroidal retinal prosthesis
[Layton2014]_, which was developed by the Bionic Vision Australia
Consortium and commercialized by Bionic Vision Technologies (BVT).
The center of the array is located at (x,y,z), given in microns, and the
array is rotated by rotation angle ``rot``, given in radians.
The array consists of:
- 33 platinum stimulating electrodes:
- 30 electrodes with 600um diameter (Electrodes 1-20 (except
9, 17, 19) and Electrodes 21a-m),
- 3 electrodes with 400um diameter (Electrodes 9, 17, 19)
- 2 return electrodes with 2000um diameter (Electrodes 22, 23)
Electrodes 21a-m are typically being ganged to provide an external
ring for common ground. The center of the array is assumed to lie
between Electrodes 7, 8, 9, and 13.
.. note::
Column order for electrode numbering is reversed in a left-eye
implant.
.. versionadded:: 0.6
Parameters
----------
x : float
x coordinate of the array center (um)
y : float
y coordinate of the array center (um)
z: float or array_like
Distance of the array to the retinal surface (um). Either a list
with 60 entries or a scalar.
rot : float
Rotation angle of the array (rad). Positive values denote
counter-clock-wise (CCW) rotations in the retinal coordinate
system.
eye : {'RE', 'LE'}, optional
Eye in which array is implanted.
"""
# Frozen class: User cannot add more class attributes
__slots__ = ()
def __init__(self, x=0, y=0, z=0, rot=0, eye='RE', stim=None):
self.eye = eye
self.earray = ElectrodeArray([])
n_elecs = 35
# the positions of the electrodes 1-20, 21a-21m, R1-R2
x_arr = [-1275.0, -850.0, -1275.0, -850.0, -1275.0,
-425.0, 0, -425.0, 0, -425.0,
425.0, 850.0, 425.0, 850.0, 425.0,
1275.0, 1700.0, 1275.0, 1700.0, 1275.0,
-850.0, 0, 850.0, 1700.0, 2125.0,
2550.0, 2125.0, 2550.0, 2125.0, 1700.0,
850.0, 0, -850.0, 7000.0, 9370.0]
y_arr = [1520.0, 760.0, 0, -760.0, -1520.0,
1520.0, 760.0, 0, -760.0, -1520.0,
1520.0, 760.0, 0, -760.0, -1520.0,
1520.0, 760.0, 0, -760.0, -1520.0,
2280.0, 2280.0, 2280.0, 2280.0, 1520.0,
760.0, 0.0, -760.0, -1520.0, -2280.0,
-2280.0, -2280.0, -2280.0, 0, 0]
if isinstance(z, (list, np.ndarray)):
# Specify different height for every electrode in a list:
z_arr = np.asarray(self.z).flatten()
if z_arr.size != n_elecs:
raise ValueError("If `z` is a list, it must have %d entries, "
"not %d." % (n_elecs, len(z)))
else:
# If `z` is a scalar, choose same height for all electrodes:
z_arr = np.ones(n_elecs, dtype=float) * z
# the position of the electrodes 1-20, 21a-21m, R1-R2 for left eye
if eye == 'LE':
x_arr = np.negative(x_arr)
# the radius of all the electrodes in the implants
r_arr = [300.0] * n_elecs
# the radius of electrodes 9, 17, 19 is 200.0 um
r_arr[8] = r_arr[16] = r_arr[18] = 200.0
# the radius of the return electrodes is 1000.0 um
r_arr[33] = r_arr[34] = 1000.0
# the names of the electrodes 1-20, 21a-21m, R1 and R2
names = [str(name) for name in range(1, 21)]
names.extend(['21a', '21b', '21c', '21d', '21e',
'21f', '21g', '21h', '21i', '21j',
'21k', '21l', '21m'])
names.extend(['R1', 'R2'])
# Rotate the grid:
rotmat = np.array([np.cos(rot), -np.sin(rot),
np.sin(rot), np.cos(rot)]).reshape((2, 2))
xy = np.matmul(rotmat, np.vstack((x_arr, y_arr)))
x_arr = xy[0, :]
y_arr = xy[1, :]
# Apply offset to make the grid centered at (x, y):
x_arr += x
y_arr += y
for x, y, z, r, name in zip(x_arr, y_arr, z_arr, r_arr, names):
self.earray.add_electrode(name, DiskElectrode(x, y, z, r))
# Beware of race condition: Stim must be set last, because it requires
# indexing into self.electrodes:
self.stim = stim
| bsd-3-clause | -6,385,752,689,116,946,000 | 37.162602 | 78 | 0.553473 | false | 3.026435 | false | false | false |
belemizz/mimic2_tools | clinical_db/classify_patients.py | 1 | 3203 | """
classify patients based on lab tests
"""
import get_sample.mimic2
from mutil import Graph
import mutil.mycsv
import time
import datetime
import random
import numpy as np
import theano
import theano.tensor as T
import alg.classification
def main( max_id = 2000, target_codes = ['428.0'], show_flag = True):
mimic2db = get_sample.mimic2.Mimic2()
graph = Graph()
## Get Subject ID ##
id_list = mimic2db.subject_with_icd9_codes(target_codes)
subject_ids = [item for item in id_list if item < max_id]
print "Number of Candidates : %d"%len(subject_ids)
## Get Data ##
days_before_discharge = [0]
recover_values = [[], [], [], []]
expire_values = [[], [], [], []]
start_time = time.clock()
algo_num = 0
time_diff = 4
cr_id = 50090
bun_id = 50177
for str_id in subject_ids:
sid = int(str_id)
print sid
patient = mimic2db.get_subject(sid)
if patient:
final_adm = patient.get_final_admission()
if len(final_adm.icd9)>0 and final_adm.icd9[0][3] == target_codes[0]:
for index, dbd in enumerate(days_before_discharge):
if algo_num == 0:
# bun_and_creatinine
time_of_interest = final_adm.disch_dt + datetime.timedelta(1-dbd)
lab_result = final_adm.get_newest_lab_at_time(time_of_interest)
value1 = [item[4] for item in lab_result if item[0] == cr_id]
value2 = [item[4] for item in lab_result if item[0] == bun_id]
else:
# trend of BUN
time_of_interest1 = final_adm.disch_dt + datetime.timedelta(1-dbd)
time_of_interest2 = final_adm.disch_dt + datetime.timedelta(1-dbd-time_diff)
lab_result1 = final_adm.get_newest_lab_at_time(time_of_interest1)
lab_result2 = final_adm.get_newest_lab_at_time(time_of_interest2)
value1 = [item[4] for item in lab_result1 if item[0] == bun_id]
value2 = [item[4] for item in lab_result2 if item[0] == bun_id]
if patient.hospital_expire_flg == 'Y':
expire_values[index].append([value1, value2])
else:
recover_values[index].append([value1, value2])
end_time = time.clock()
print "data_retrieving_time: %f sec"%(end_time - start_time)
def transform_values(input_values):
""" transform to numpy format """
temp = []
for item in input_values:
if len(item[0])>0 and len(item[1])>0:
temp.append([float(item[0][0]), float(item[1][0])])
return np.array(temp)
positive_x = transform_values(expire_values[0])
negative_x = transform_values(recover_values[0])
data = [[item, 1] for item in positive_x]
data.extend([[item, 0] for item in negative_x])
random.shuffle(data)
x = np.array([item[0] for item in data])
y = np.array([item[1] for item in data])
if __name__ == '__main__':
main()
| mit | -5,518,623,470,133,790,000 | 33.074468 | 100 | 0.547924 | false | 3.418356 | false | false | false |
Stymphalian/sublime-plugins | KeyMapQuery/KeyMapQuery.py | 1 | 8716 | import sublime, sublime_plugin
import re
from collections import namedtuple
import os.path
"""
KeyMapQueryCommand allows you to quickly query if a key-binding is bound.
A combo-box will appear displayings a list of bound key-bindings. Type a key-combination
into the inptu box to narrow the results ( i.e. ctrl+k,ctrl+i ).
If there is a conflict in key-bindings,by default,the highest precendence match
is shown lower in the list.
i.e. if ctrl+o is bound in two files.
["ctrl+o" : command 1]
["ctrl+o" : command 2] <-- this is the one which actually gets used.
"""
class KeyMapQueryCommand(sublime_plugin.WindowCommand):
"""
InternalObject holds state during the execution of the command.
"""
class InternalObject(object):
KeyMap = namedtuple("KeyMap",["filename","bindings"])
def __init__(self):
self.keymaps = []
self.single_array = []
self.settings = sublime.load_settings("KeyMapQuery.sublime-settings")
def get_key_binding(self,index):
s = self.single_array[index]
return s.split(":")[0]
def get_relative_index(self,index):
count = 0
for d in self.keymaps:
if count <= index < count + len(d.bindings):
return index - count
else:
count += len(d.bindings)
raise IndexError("Index out of range")
# given an index from the on_select() callback
# determine the sublime-keymap filename which it belongs to.
def get_filename(self,index):
count = 0
for d in self.keymaps:
if count <= index < count + len(d.bindings):
return d.filename
else:
count += len(d.bindings)
raise IndexError("Index out of range")
# Given the keymap files we loaded in, flatten them into
# a single array of strings to be used by the window.show_quick_panel()
def get_string_list_of_keymaps(self):
# flatten the keymaps into a single array contains only the keybinding object
rs = []
for d in self.keymaps:
rs.extend(d.bindings)
# convert each key-binding into a string
# The format should look like
# ["ctrl+i","ctrl+j"] : command_to_be_run_1
# ["ctrl+i","ctrl+k"] : command_to_be_run_2
# ["ctrl+i","ctrl+l"] : command_to_be_run_3
def str_format(obj):
objs = map(lambda x: '"' + x +'"', obj["keys"])
return "{:30s} : {}".format("["+ ",".join(objs) + "]",obj["command"])
self.single_array = list(map(str_format,rs))
return self.single_array
# Load all the sublime-keymap files that are known to sublime.
# This includes keymap files zipped inside sublime-package directories.
def load_keymaps(self,file_re):
# Get all the keymap filenames
all_keymap_files = sublime.find_resources("*.sublime-keymap")
# sort them, such as described by the merging/precedence rules defined
# http://docs.sublimetext.info/en/latest/extensibility/packages.html?highlight=precedence
all_keymap_files.sort()
if self.settings.get("reverse_sort_order"):
all_keymap_files.reverse()
filtered_files = list(filter(lambda x : re.match(file_re,x) != None,all_keymap_files))
# Load the keymap files; decode them into pyobjects;
# and then convert them into KeyMap tuples
def mapToPythonObj(filename):
res = sublime.load_resource(filename)
# assumption is that the decoded json is represented as
# a python array of dictionaries.
return self.KeyMap(filename,sublime.decode_value(res))
self.keymaps = list(map(mapToPythonObj,filtered_files))
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def __init__(self,window):
self.window = window
self.file_re = self._get_keymap_regex(sublime.platform())
self.state = None
def run(self):
self.state = self.InternalObject()
self.state.load_keymaps(self.file_re)
input_array = self.state.get_string_list_of_keymaps()
view = self.window.show_quick_panel(
input_array,
flags=sublime.MONOSPACE_FONT,
selected_index=0,
on_select= self.on_select,
on_highlight=None)
# on_highlight=self.on_highlight)
def _get_keymap_regex(self,platform):
if( platform == "windows"):
file_re = re.compile(r'(.*(Default \(Windows\)|Default)\.sublime-keymap)')
elif (platform == "linux"):
file_re = re.compile(r'(.*(Default \(Linux\)|Default)\.sublime-keymap)')
else:
file_re = re.compile(r'(.*(Default \(OSX\)|Default)\.sublime-keymap)')
return file_re
def on_highlight(self,value):
if value == -1:
return
def on_select(self,value):
if value == -1:
return
# open the keymap file.
filename = self.state.get_filename(value)
split_filename = "/".join(filename.split("/")[1:])
# This fucking sucks. I would really like to use the sublime API open_file()
# directly. This would get me a direct ref to the View object and allow me
# to set the cursor to the proper position to show the hotkey binding.
# There are a few problems with this:
# i) sublime-packages are compresesd (zip).
# ii) packages are stored in different folders ( pakcages, pakcage/user, etc)
# and they are all differnet on differenct architectures.
# iii) Changing the sel() on a view doesn't update the cursor position
# on the screen. Not sure if this is a bug, but I thinkg it is
# becaues we aren't making edits using an Edit object. Therefore
# any changes that we make aren't known/shown until some user
# interaction
# Because of these problems I use the following hack.
# 1.) Open the file using the window.run_command, and use the ${packages}
# variables substitution.The internal sublime api automatically finds and
# uncompresses all the files for me. I don't have to deal with anything
# and the proper files gets opened (assuming it exists).
# 2.) The pit-fall to this is that I don't get a direct ref to the View
# that is created/opened. This means that I can't set the cursor position.
# Additinally,because the run_command is async, I don't even know when
# the View gets fully loaded (i.e. I can't use window.active_view())
# 3.) To get around this problem. I creat a helper TextCommand class.
# The purpose of this class is to positoin the cursor on the view.
# (i.e find_all() on a regex string). This is hacky because it pollutes
# the command namespace. This only solves the problem of being able to
# set the cursor position. I still have to use a set_timeout() in order
# to "ensure" the file is opened before I issue the command.
self.window.run_command("open_file",{"file":"${packages}/"+split_filename})
def inner():
self.window.run_command("move_cursor_to_pattern",
{"pattern":r'"keys"\s*:\s*\[',
"index":self.state.get_relative_index(value)})
# TODO: extract settings into my own class,whcih allows you to specify defaults
delay= self.state.settings.get("timeout_delay")
if(delay == None):
delay = 250
sublime.set_timeout(inner,delay)
# A Helper command used to move the cursor to the beginning/end of
# a regex pattern in the view.
class MoveCursorToPatternCommand(sublime_plugin.TextCommand):
def run(self,edit,pattern,index=0):
r = self.view.find_all(pattern)
if index < 0 or index >= len(r):
print("Pattern not found \"{}\"".format(pattern))
return
r = r[index]
self.view.show_at_center(r)
sel = self.view.sel()
sel.clear()
sel.add(sublime.Region(r.b,r.b)) | mit | -5,049,045,572,851,911,000 | 44.37234 | 101 | 0.572855 | false | 4.130806 | false | false | false |
alfredodeza/execnet | execnet/gateway_io.py | 1 | 7538 | # -*- coding: utf-8 -*-
"""
execnet io initialization code
creates io instances used for gateway io
"""
import os
import shlex
import sys
try:
from execnet.gateway_base import Popen2IO, Message
except ImportError:
from __main__ import Popen2IO, Message
from functools import partial
class Popen2IOMaster(Popen2IO):
def __init__(self, args, execmodel):
self.popen = p = execmodel.PopenPiped(args)
Popen2IO.__init__(self, p.stdin, p.stdout, execmodel=execmodel)
def wait(self):
try:
return self.popen.wait()
except OSError:
pass # subprocess probably dead already
def kill(self):
killpopen(self.popen)
def killpopen(popen):
try:
if hasattr(popen, "kill"):
popen.kill()
else:
killpid(popen.pid)
except EnvironmentError:
sys.stderr.write("ERROR killing: %s\n" % (sys.exc_info()[1]))
sys.stderr.flush()
def killpid(pid):
if hasattr(os, "kill"):
os.kill(pid, 15)
elif sys.platform == "win32" or getattr(os, "_name", None) == "nt":
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
raise EnvironmentError("no method to kill {}".format(pid))
popen_bootstrapline = "import sys;exec(eval(sys.stdin.readline()))"
def shell_split_path(path):
"""
Use shell lexer to split the given path into a list of components,
taking care to handle Windows' '\' correctly.
"""
if sys.platform.startswith("win"):
# replace \\ by / otherwise shlex will strip them out
path = path.replace("\\", "/")
return shlex.split(path)
def popen_args(spec):
args = shell_split_path(spec.python) if spec.python else [sys.executable]
args.append("-u")
if spec is not None and spec.dont_write_bytecode:
args.append("-B")
# Slight gymnastics in ordering these arguments because CPython (as of
# 2.7.1) ignores -B if you provide `python -c "something" -B`
args.extend(["-c", popen_bootstrapline])
return args
def ssh_args(spec):
# NOTE: If changing this, you need to sync those changes to vagrant_args
# as well, or, take some time to further refactor the commonalities of
# ssh_args and vagrant_args.
remotepython = spec.python or "python"
args = ["ssh", "-C"]
if spec.ssh_config is not None:
args.extend(["-F", str(spec.ssh_config)])
args.extend(spec.ssh.split())
remotecmd = '{} -c "{}"'.format(remotepython, popen_bootstrapline)
args.append(remotecmd)
return args
def vagrant_ssh_args(spec):
# This is the vagrant-wrapped version of SSH. Unfortunately the
# command lines are incompatible to just channel through ssh_args
# due to ordering/templating issues.
# NOTE: This should be kept in sync with the ssh_args behaviour.
# spec.vagrant is identical to spec.ssh in that they both carry
# the remote host "address".
remotepython = spec.python or "python"
args = ["vagrant", "ssh", spec.vagrant_ssh, "--", "-C"]
if spec.ssh_config is not None:
args.extend(["-F", str(spec.ssh_config)])
remotecmd = '{} -c "{}"'.format(remotepython, popen_bootstrapline)
args.extend([remotecmd])
return args
def create_io(spec, execmodel):
if spec.popen:
args = popen_args(spec)
return Popen2IOMaster(args, execmodel)
if spec.ssh:
args = ssh_args(spec)
io = Popen2IOMaster(args, execmodel)
io.remoteaddress = spec.ssh
return io
if spec.vagrant_ssh:
args = vagrant_ssh_args(spec)
io = Popen2IOMaster(args, execmodel)
io.remoteaddress = spec.vagrant_ssh
return io
#
# Proxy Gateway handling code
#
# master: proxy initiator
# forwarder: forwards between master and sub
# sub: sub process that is proxied to the initiator
RIO_KILL = 1
RIO_WAIT = 2
RIO_REMOTEADDRESS = 3
RIO_CLOSE_WRITE = 4
class ProxyIO(object):
""" A Proxy IO object allows to instantiate a Gateway
through another "via" gateway. A master:ProxyIO object
provides an IO object effectively connected to the sub
via the forwarder. To achieve this, master:ProxyIO interacts
with forwarder:serve_proxy_io() which itself
instantiates and interacts with the sub.
"""
def __init__(self, proxy_channel, execmodel):
# after exchanging the control channel we use proxy_channel
# for messaging IO
self.controlchan = proxy_channel.gateway.newchannel()
proxy_channel.send(self.controlchan)
self.iochan = proxy_channel
self.iochan_file = self.iochan.makefile("r")
self.execmodel = execmodel
def read(self, nbytes):
return self.iochan_file.read(nbytes)
def write(self, data):
return self.iochan.send(data)
def _controll(self, event):
self.controlchan.send(event)
return self.controlchan.receive()
def close_write(self):
self._controll(RIO_CLOSE_WRITE)
def kill(self):
self._controll(RIO_KILL)
def wait(self):
return self._controll(RIO_WAIT)
@property
def remoteaddress(self):
return self._controll(RIO_REMOTEADDRESS)
def __repr__(self):
return "<RemoteIO via {}>".format(self.iochan.gateway.id)
class PseudoSpec:
def __init__(self, vars):
self.__dict__.update(vars)
def __getattr__(self, name):
return None
def serve_proxy_io(proxy_channelX):
execmodel = proxy_channelX.gateway.execmodel
log = partial(
proxy_channelX.gateway._trace, "serve_proxy_io:%s" % proxy_channelX.id
)
spec = PseudoSpec(proxy_channelX.receive())
# create sub IO object which we will proxy back to our proxy initiator
sub_io = create_io(spec, execmodel)
control_chan = proxy_channelX.receive()
log("got control chan", control_chan)
# read data from master, forward it to the sub
# XXX writing might block, thus blocking the receiver thread
def forward_to_sub(data):
log("forward data to sub, size %s" % len(data))
sub_io.write(data)
proxy_channelX.setcallback(forward_to_sub)
def controll(data):
if data == RIO_WAIT:
control_chan.send(sub_io.wait())
elif data == RIO_KILL:
control_chan.send(sub_io.kill())
elif data == RIO_REMOTEADDRESS:
control_chan.send(sub_io.remoteaddress)
elif data == RIO_CLOSE_WRITE:
control_chan.send(sub_io.close_write())
control_chan.setcallback(controll)
# write data to the master coming from the sub
forward_to_master_file = proxy_channelX.makefile("w")
# read bootstrap byte from sub, send it on to master
log("reading bootstrap byte from sub", spec.id)
initial = sub_io.read(1)
assert initial == "1".encode("ascii"), initial
log("forwarding bootstrap byte from sub", spec.id)
forward_to_master_file.write(initial)
# enter message forwarding loop
while True:
try:
message = Message.from_io(sub_io)
except EOFError:
log("EOF from sub, terminating proxying loop", spec.id)
break
message.to_io(forward_to_master_file)
# proxy_channelX will be closed from remote_exec's finalization code
if __name__ == "__channelexec__":
serve_proxy_io(channel) # noqa
| mit | 4,103,531,356,347,174,000 | 29.152 | 82 | 0.645795 | false | 3.666342 | false | false | false |
wpjesus/codematch | ietf/secr/proceedings/forms.py | 1 | 6833 | import os
from django import forms
from django.conf import settings
from django.template.defaultfilters import filesizeformat
from ietf.doc.models import Document
from ietf.group.models import Group
from ietf.name.models import DocTypeName
from ietf.meeting.models import Meeting, Session
# ---------------------------------------------
# Globals
# ---------------------------------------------
VALID_SLIDE_EXTENSIONS = ('.doc','.docx','.pdf','.ppt','.pptx','.txt','.zip')
VALID_MINUTES_EXTENSIONS = ('.txt','.html','.htm','.pdf')
VALID_AGENDA_EXTENSIONS = ('.txt','.html','.htm')
VALID_BLUESHEET_EXTENSIONS = ('.pdf','.jpg','.jpeg')
#----------------------------------------------------------
# Forms
#----------------------------------------------------------
class AjaxChoiceField(forms.ChoiceField):
'''
Special ChoiceField to use when populating options with Ajax. The submitted value
is not in the initial choices list so we need to override valid_value().
'''
def valid_value(self, value):
return True
class EditSlideForm(forms.ModelForm):
class Meta:
model = Document
fields = ('title',)
class InterimMeetingForm(forms.Form):
date = forms.DateField(help_text="(YYYY-MM-DD Format, please)")
group_acronym_id = forms.CharField(widget=forms.HiddenInput())
def clean(self):
super(InterimMeetingForm, self).clean()
cleaned_data = self.cleaned_data
# need to use get() here, if the date field isn't valid it won't exist
date = cleaned_data.get('date','')
group_acronym_id = cleaned_data["group_acronym_id"]
qs = Meeting.objects.filter(type='interim',date=date,session__group__acronym=group_acronym_id)
if qs:
raise forms.ValidationError('A meeting already exists for this date.')
return cleaned_data
class RecordingForm(forms.Form):
group = forms.CharField(max_length=40)
external_url = forms.URLField(label='Url')
session = AjaxChoiceField(choices=(('','----'),))
def clean_session(self):
'''
Emulate ModelChoiceField functionality
'''
id = self.cleaned_data.get('session')
try:
return Session.objects.get(id=id)
except Session.DoesNotExist:
raise forms.ValidationError('Invalid Session')
def clean_group(self):
acronym = self.cleaned_data.get('group')
try:
return Group.objects.get(acronym=acronym)
except Group.DoesNotExist:
raise forms.ValidationError('Invalid group name')
class RecordingEditForm(forms.ModelForm):
class Meta:
model = Document
fields = ['external_url']
def __init__(self, *args, **kwargs):
super(RecordingEditForm, self).__init__(*args, **kwargs)
self.fields['external_url'].label='Url'
class ReplaceSlideForm(forms.ModelForm):
file = forms.FileField(label='Select File')
class Meta:
model = Document
fields = ('title',)
def clean_file(self):
file = self.cleaned_data.get('file')
ext = os.path.splitext(file.name)[1].lower()
if ext not in VALID_SLIDE_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for presentation slides: %s' % ','.join(VALID_SLIDE_EXTENSIONS))
if file._size > settings.SECR_MAX_UPLOAD_SIZE:
raise forms.ValidationError('Please keep filesize under %s. Current filesize %s' % (filesizeformat(settings.SECR_MAX_UPLOAD_SIZE), filesizeformat(file._size)))
return file
class UnifiedUploadForm(forms.Form):
acronym = forms.CharField(widget=forms.HiddenInput())
meeting_id = forms.CharField(widget=forms.HiddenInput())
material_type = forms.ModelChoiceField(queryset=DocTypeName.objects.filter(slug__in=('minutes','agenda','slides','bluesheets')),empty_label=None)
slide_name = forms.CharField(label='Name of Presentation',max_length=255,required=False,help_text="For presentations only")
file = forms.FileField(label='Select File',help_text='<div id="id_file_help">Note 1: You can only upload a presentation file in txt, pdf, doc, or ppt/pptx. System will not accept presentation files in any other format.<br><br>Note 2: All uploaded files will be available to the public immediately on the Preliminary Page. However, for the Proceedings, ppt/pptx files will be converted to html format and doc files will be converted to pdf format manually by the Secretariat staff.</div>')
def clean_file(self):
file = self.cleaned_data['file']
if file._size > settings.SECR_MAX_UPLOAD_SIZE:
raise forms.ValidationError('Please keep filesize under %s. Current filesize %s' % (filesizeformat(settings.SECR_MAX_UPLOAD_SIZE), filesizeformat(file._size)))
return file
def clean(self):
super(UnifiedUploadForm, self).clean()
# if an invalid file type is supplied no file attribute will exist
if self.errors:
return self.cleaned_data
cleaned_data = self.cleaned_data
material_type = cleaned_data['material_type']
slide_name = cleaned_data['slide_name']
file = cleaned_data['file']
ext = os.path.splitext(file.name)[1].lower()
if material_type.slug == 'slides' and not slide_name:
raise forms.ValidationError('ERROR: Name of Presentaion cannot be blank')
# only supporting PDFs per Alexa 04-05-2011
#if material_type == 1 and not file_ext[1] == '.pdf':
# raise forms.ValidationError('Presentations must be a PDF file')
# validate file extensions based on material type (slides,agenda,minutes,bluesheets)
# valid extensions per online documentation: meeting-materials.html
# 09-14-11 added ppt, pdf per Alexa
# 04-19-12 txt/html for agenda, +pdf for minutes per Russ
if material_type.slug == 'slides' and ext not in VALID_SLIDE_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for presentation slides: %s' % ','.join(VALID_SLIDE_EXTENSIONS))
if material_type.slug == 'agenda' and ext not in VALID_AGENDA_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for agendas: %s' % ','.join(VALID_AGENDA_EXTENSIONS))
if material_type.slug == 'minutes' and ext not in VALID_MINUTES_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for minutes: %s' % ','.join(VALID_MINUTES_EXTENSIONS))
if material_type.slug == 'bluesheets' and ext not in VALID_BLUESHEET_EXTENSIONS:
raise forms.ValidationError('Only these file types supported for bluesheets: %s' % ','.join(VALID_BLUESHEET_EXTENSIONS))
return cleaned_data
| bsd-3-clause | 3,905,673,467,625,157,000 | 45.482993 | 492 | 0.647007 | false | 4.038416 | false | false | false |
alaeddine10/ggrc-core | src/ggrc/models/relationship_types.py | 1 | 21783 |
class RelationshipTypes(object):
@classmethod
def types(cls):
types = {}
for k, rt in RELATIONSHIP_TYPES.items():
types[k] = rt.copy()
types[k].update({ 'relationship_type': k })
return types
@classmethod
def get_type(cls, relationship_type_id):
return cls.types().get(relationship_type_id, None)
@classmethod
def valid_relationship_hash(cls, relationship_type, related_model, endpoint):
return dict(
relationship_type=relationship_type,
related_model=related_model,
related_model_endpoint=endpoint)
@classmethod
def valid_relationship(cls, obj_type, name, rel):
if 'symmetric' in rel and rel['symmetric']:
if rel['source_type'] == obj_type and rel['target_type'] == obj_type:
return cls.valid_relationship_hash(name, obj_type, 'both')
else:
if rel['source_type'] == obj_type:
return cls.valid_relationship_hash(
name, rel['target_type'], 'destination')
if rel['target_type'] == obj_type:
return cls.valid_relationship_hash(
name, rel['source_type'], 'source')
@classmethod
def valid_relationship_helper(cls, obj_type):
return [
cls.valid_relationship(obj_type, name, rel)
for name, rel in cls.types().items()]
@classmethod
def valid_relationships(cls, obj_type):
if not isinstance(obj_type, (str, unicode)):
if not isinstance(obj_type, type):
obj_type = obj_type.__class__
obj_type = obj_type.__name__
return [vr for vr in cls.valid_relationship_helper(obj_type) if vr]
RELATIONSHIP_TYPES = {
'data_asset_has_process': {
'source_type': "DataAsset",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This data asset relies upon the following processes.",
'reverse_description': "This process supports the following data assets."
},
'data_asset_relies_upon_data_asset': {
'source_type': "DataAsset",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This data asset relies upon the following data assets.",
'reverse_description': "This data asset supports the following data assets."
},
'data_asset_relies_upon_facility': {
'source_type': "DataAsset",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This data asset relies upon the following facilities.",
'reverse_description': "This facility supports the following data assets."
},
'data_asset_relies_upon_system': {
'source_type': "DataAsset",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This data asset relies upon the following systems.",
'reverse_description': "This system supports the following data assets."
},
'facility_has_process': {
'source_type': "Facility",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This facility relies upon the following processes.",
'reverse_description': "This process supports the following facilities."
},
'facility_relies_upon_data_asset': {
'source_type': "Facility",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This facility relies upon the following data assets.",
'reverse_description': "This data asset supports the following facilities."
},
'facility_relies_upon_facility': {
'source_type': "Facility",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This facility relies upon the following facilities.",
'reverse_description': "This facility supports the following facilities."
},
'facility_relies_upon_system': {
'source_type': "Facility",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This facility relies upon the following systems.",
'reverse_description': "This system supports the following facilities."
},
'market_has_process': {
'source_type': "Market",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This market relies upon the following processes.",
'reverse_description': "This process supports the following markets."
},
'market_includes_market': {
'source_type': "Market",
'target_type': "Market",
'forward_phrase': "includes",
'reverse_phrase': "is included in",
'forward_description': "This market includes the following markets.",
'reverse_description': "This market is included in the following markets."
},
'market_relies_upon_data_asset': {
'source_type': "Market",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This market relies upon the following data assets.",
'reverse_description': "This data asset supports the following markets."
},
'market_relies_upon_facility': {
'source_type': "Market",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This market relies upon the following facilities.",
'reverse_description': "This facility supports the following markets."
},
'market_relies_upon_system': {
'source_type': "Market",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This market relies upon the following systems.",
'reverse_description': "This system supports the following markets."
},
'org_group_has_process': {
'source_type': "OrgGroup",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This org group relies upon the following processes.",
'reverse_description': "This process supports the following org groups."
},
'org_group_is_affiliated_with_org_group': {
'source_type': "OrgGroup",
'target_type': "OrgGroup",
'symmetric': True,
'forward_phrase': "is affiliated/collaborates with",
'reverse_phrase': "is affiliated/collaborates with",
'forward_description': "This org group is affiliated/collaborates with the following org groups.",
'reverse_description': "This org group is affiliated/collaborates with the following org groups."
},
'org_group_is_responsible_for_data_asset': {
'source_type': "OrgGroup",
'target_type': "DataAsset",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following data assets.",
'reverse_description': "This data asset is overseen by the following org groups."
},
'org_group_is_responsible_for_facility': {
'source_type': "OrgGroup",
'target_type': "Facility",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following facilities.",
'reverse_description': "This facility is overseen by the following org groups."
},
'org_group_is_responsible_for_market': {
'source_type': "OrgGroup",
'target_type': "Market",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following markets.",
'reverse_description': "This market is overseen by the following org groups."
},
'org_group_is_responsible_for_org_group': {
'source_type': "OrgGroup",
'target_type': "OrgGroup",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following org groups.",
'reverse_description': "This org group is overseen by the following org groups."
},
'org_group_is_responsible_for_process': {
'source_type': "OrgGroup",
'target_type': "Process",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following processes.",
'reverse_description': "This process is overseen by the following org groups."
},
'org_group_is_responsible_for_product': {
'source_type': "OrgGroup",
'target_type': "Product",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following products.",
'reverse_description': "This product is overseen by the following org groups."
},
'org_group_is_responsible_for_project': {
'source_type': "OrgGroup",
'target_type': "Project",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following projects.",
'reverse_description': "This project is overseen by the following org groups."
},
'org_group_is_responsible_for_system': {
'source_type': "OrgGroup",
'target_type': "System",
'forward_phrase': "is responsible for",
'reverse_phrase': "is overseen by",
'forward_description': "This org group is responsible for the following systems.",
'reverse_description': "This system is overseen by the following org groups."
},
'org_group_relies_upon_data_asset': {
'source_type': "OrgGroup",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following data assets.",
'reverse_description': "This data asset supports the following org groups."
},
'org_group_relies_upon_facility': {
'source_type': "OrgGroup",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following facilities.",
'reverse_description': "This facility supports the following org groups."
},
'org_group_relies_upon_org_group': {
'source_type': "OrgGroup",
'target_type': "OrgGroup",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following org groups.",
'reverse_description': "This org group supports the following org groups."
},
'org_group_relies_upon_system': {
'source_type': "OrgGroup",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This org group relies upon the following systems.",
'reverse_description': "This system supports the following org groups."
},
'product_has_process': {
'source_type': "Product",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This product relies upon the following processes.",
'reverse_description': "This process supports the following products."
},
'product_is_affiliated_with_product': {
'source_type': "Product",
'target_type': "Product",
'symmetric': True,
'forward_phrase': "is affiliated/collaborates with",
'reverse_phrase': "is affiliated/collaborates with",
'forward_description': "This product is affiliated/collaborates with the following products.",
'reverse_description': "This product is affiliated/collaborates with the following products."
},
'product_is_sold_into_market': {
'source_type': "Product",
'target_type': "Market",
'forward_phrase': "is sold into",
'reverse_phrase': "is a market for",
'forward_description': "This product is sold into the following markets.",
'reverse_description': "This market is a market for the following products."
},
'product_relies_upon_data_asset': {
'source_type': "Product",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following data assets.",
'reverse_description': "This data asset supports the following products."
},
'product_relies_upon_facility': {
'source_type': "Product",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following facilities.",
'reverse_description': "This facility supports the following products."
},
'product_relies_upon_product': {
'source_type': "Product",
'target_type': "Product",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following products.",
'reverse_description': "This product supports the following products."
},
'product_relies_upon_system': {
'source_type': "Product",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This product relies upon the following systems.",
'reverse_description': "This system supports the following products."
},
'program_applies_to_data_asset': {
'source_type': "Program",
'target_type': "DataAsset",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following data assets.",
'reverse_description': "This data asset is within scope of the following programs."
},
'program_applies_to_facility': {
'source_type': "Program",
'target_type': "Facility",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following facilities.",
'reverse_description': "This facility is within scope of the following programs."
},
'program_applies_to_market': {
'source_type': "Program",
'target_type': "Market",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following markets.",
'reverse_description': "This market is within scope of the following programs."
},
'program_applies_to_org_group': {
'source_type': "Program",
'target_type': "OrgGroup",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following org groups.",
'reverse_description': "This org group is within scope of the following programs."
},
'program_applies_to_process': {
'source_type': "Program",
'target_type': "Process",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following processes.",
'reverse_description': "This process is within scope of the following programs."
},
'program_applies_to_product': {
'source_type': "Program",
'target_type': "Product",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following products.",
'reverse_description': "This product is within scope of the following programs."
},
'program_applies_to_project': {
'source_type': "Program",
'target_type': "Project",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following projects.",
'reverse_description': "This project is within scope of the following programs."
},
'program_applies_to_system': {
'source_type': "Program",
'target_type': "System",
'forward_phrase': "applies to",
'reverse_phrase': "is within scope of",
'forward_description': "This program applies to the following systems.",
'reverse_description': "This system is within scope of the following programs."
},
'project_has_process': {
'source_type': "Project",
'target_type': "Process",
'forward_phrase': "has",
'reverse_phrase': "is a process for",
'forward_description': "This project relies upon the following processes.",
'reverse_description': "This process supports the following projects."
},
'project_relies_upon_data_asset': {
'source_type': "Project",
'target_type': "DataAsset",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This project relies upon the following data assets.",
'reverse_description': "This data asset supports the following projects."
},
'project_relies_upon_facility': {
'source_type': "Project",
'target_type': "Facility",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This project relies upon the following facilities.",
'reverse_description': "This facility supports the following projects."
},
'project_relies_upon_system': {
'source_type': "Project",
'target_type': "System",
'forward_phrase': "relies upon",
'reverse_phrase': "supports",
'forward_description': "This project relies upon the following systems.",
'reverse_description': "This system supports the following projects."
},
'project_targets_data_asset': {
'source_type': "Project",
'target_type': "DataAsset",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following data assets.",
'reverse_description': "This data asset is targeted by the following projects."
},
'project_targets_facility': {
'source_type': "Project",
'target_type': "Facility",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following facilities.",
'reverse_description': "This facility is targeted by the following projects."
},
'project_targets_market': {
'source_type': "Project",
'target_type': "Market",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following markets.",
'reverse_description': "This market is targeted by the following projects."
},
'project_targets_org_group': {
'source_type': "Project",
'target_type': "OrgGroup",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following org groups.",
'reverse_description': "This org group is targeted by the following projects."
},
'project_targets_product': {
'source_type': "Project",
'target_type': "Product",
'forward_phrase': "targets",
'reverse_phrase': "is targeted by",
'forward_description': "This project targets the following products.",
'reverse_description': "This product is targeted by the following projects."
},
'risk_is_a_threat_to_data_asset': {
'source_type': "Risk",
'target_type': "DataAsset",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following data assets.",
'reverse_description': "This data asset is vulnerable to the following risks."
},
'risk_is_a_threat_to_facility': {
'source_type': "Risk",
'target_type': "Facility",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following facilities.",
'reverse_description': "This faciliy is vulnerable to the following risks."
},
'risk_is_a_threat_to_market': {
'source_type': "Risk",
'target_type': "Market",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following markets.",
'reverse_description': "This market is vulnerable to the following risks."
},
'risk_is_a_threat_to_org_group': {
'source_type': "Risk",
'target_type': "OrgGroup",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is not a threat to the following org groups.",
'reverse_description': "This org group is vulnerable to the following risks."
},
'risk_is_a_threat_to_process': {
'source_type': "Risk",
'target_type': "Process",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following processes.",
'reverse_description': "This process is vulnerable to the following risks."
},
'risk_is_a_threat_to_product': {
'source_type': "Risk",
'target_type': "Product",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following products.",
'reverse_description': "This product is vulnerable to the following risks."
},
'risk_is_a_threat_to_project': {
'source_type': "Risk",
'target_type': "Project",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following projects.",
'reverse_description': "This project is vulnerable to the following risks."
},
'risk_is_a_threat_to_system': {
'source_type': "Risk",
'target_type': "System",
'forward_phrase': "is a threat to",
'reverse_phrase': "is vulnerable to",
'forward_description': "This risk is a threat to the following systems.",
'reverse_description': "This system is vulnerable to the following risks."
},
}
| apache-2.0 | -5,848,448,770,137,311,000 | 40.333966 | 102 | 0.66786 | false | 3.76999 | false | false | false |
bworrell/mixbox | mixbox/parser.py | 1 | 6500 | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from abc import ABCMeta, abstractmethod
from distutils.version import StrictVersion
from .exceptions import ignored
from .xml import get_etree_root, get_schemaloc_pairs
class UnknownVersionError(Exception):
"""A parsed document contains no version information."""
pass
class UnsupportedVersionError(Exception):
"""A parsed document is a version unsupported by the parser."""
def __init__(self, message, expected=None, found=None):
super(UnsupportedVersionError, self).__init__(message)
self.expected = expected
self.found = found
class UnsupportedRootElementError(Exception):
"""A parsed document contains an unsupported root element."""
def __init__(self, message, expected=None, found=None):
super(UnsupportedRootElementError, self).__init__(message)
self.expected = expected
self.found = found
class EntityParser(object):
__metaclass__ = ABCMeta
@abstractmethod
def supported_tags(self):
"""Return an iterable of supported document root tags (strings)."""
@abstractmethod
def get_version(self, root):
"""Return as a string the schema version used by the document root."""
@abstractmethod
def supported_versions(self, tag):
"""Return all the supported versions for a given tag."""
@abstractmethod
def get_entity_class(self, tag):
"""Return the class to be returned as the result of parsing."""
def _get_version(self, root):
"""Return the version of the root element passed in.
Args:
root (etree.Element)
Returns:
distutils.StrictVersion
Raises:
UnknownVersionError
"""
# Note: STIX and MAEC use a "version" attribute. To support CybOX, a
# subclass will need to combine "cybox_major_version",
# "cybox_minor_version", and "cybox_update_version".
version = self.get_version(root)
if version:
return StrictVersion(version)
raise UnknownVersionError(
"Unable to determine the version of the input document. No "
"version information found on the root element."
)
def _check_version(self, root):
"""Ensure the root element is a supported version.
Args:
root (etree.Element)
Raises:
UnsupportedVersionError
"""
version = self._get_version(root)
supported = [StrictVersion(x) for x in
self.supported_versions(root.tag)]
if version in supported:
return
error = "Document version ({0}) not in supported versions ({1})"
raise UnsupportedVersionError(
message=error.format(version, supported),
expected=supported,
found=version
)
def _check_root_tag(self, root):
"""Check that the XML element tree has a supported root element.
Args:
root (etree.Element)
Raises:
UnsupportedRootElementError
"""
supported = self.supported_tags()
if root.tag in supported:
return
error = "Document root element ({0}) not one of ({1})"
raise UnsupportedRootElementError(
message=error.format(root.tag, supported),
expected=supported,
found=root.tag,
)
def parse_xml_to_obj(self, xml_file, check_version=True, check_root=True,
encoding=None):
"""Creates a STIX binding object from the supplied xml file.
Args:
xml_file: A filename/path or a file-like object representing a STIX
instance document
check_version: Inspect the version before parsing.
check_root: Inspect the root element before parsing.
encoding: The character encoding of the input `xml_file`.
Raises:
.UnknownVersionError: If `check_version` is ``True`` and `xml_file`
does not contain STIX version information.
.UnsupportedVersionError: If `check_version` is ``False`` and
`xml_file` contains an unsupported STIX version.
.UnsupportedRootElement: If `check_root` is ``True`` and `xml_file`
contains an invalid root element.
"""
root = get_etree_root(xml_file, encoding=encoding)
if check_root:
self._check_root_tag(root)
if check_version:
self._check_version(root)
entity_class = self.get_entity_class(root.tag)
entity_obj = entity_class._binding_class.factory()
entity_obj.build(root)
return entity_obj
def parse_xml(self, xml_file, check_version=True, check_root=True,
encoding=None):
"""Creates a python-stix STIXPackage object from the supplied xml_file.
Args:
xml_file: A filename/path or a file-like object representing a STIX
instance document
check_version: Inspect the version before parsing.
check_root: Inspect the root element before parsing.
encoding: The character encoding of the input `xml_file`. If
``None``, an attempt will be made to determine the input
character encoding.
Raises:
.UnknownVersionError: If `check_version` is ``True`` and `xml_file`
does not contain STIX version information.
.UnsupportedVersionError: If `check_version` is ``False`` and
`xml_file` contains an unsupported STIX version.
.UnsupportedRootElement: If `check_root` is ``True`` and `xml_file`
contains an invalid root element.
"""
entity_obj = self.parse_xml_to_obj(
xml_file=xml_file,
check_version=check_version,
check_root=check_root,
encoding=encoding
)
root = get_etree_root(xml_file, encoding=encoding)
entity = self.get_entity_class(root.tag).from_obj(entity_obj)
# Save the parsed nsmap and schemalocations onto the parsed Entity
entity.__input_namespaces__ = dict(root.nsmap.iteritems())
with ignored(KeyError):
pairs = get_schemaloc_pairs(root)
entity.__input_schemalocations__ = dict(pairs)
return entity
| bsd-3-clause | 6,696,311,111,732,359,000 | 32.333333 | 79 | 0.612154 | false | 4.545455 | false | false | false |
urska19/LVR-sat | src/graphColoring.py | 1 | 1511 | #!/usr/bin/env python
from logConstructs import *
def graph_coloring(graph, colors):
if len(graph) < colors:
return False
variables=[[None for i in range(colors)] for j in range(len(graph))]
#construct variables
for i in range(len(graph)):
for j in range(colors):
variables[i][j] = Var("X" + str(i) + "" + str(j))
#construct first sub formula - node must be colored
main_formula = And(map(lambda x: Or(x), variables))
#construct second sub formula - node must be colored with one color
subformula = []
for k in range(colors - 1):
for l in range(k + 1, colors):
subformula += map(lambda x: Not(And([x[k], x[l]])), variables)
#construct third sub formula - connected nodes have different colors
for i in range(len(graph) - 1):
for j in range(i + 1, len(graph)):
if graph[i][j] == 1:
subformula += map(lambda x: Not(And([variables[i][x], variables[j][x]])), range(colors))
main_formula = And(subformula + main_formula.clause)
return main_formula.simplify()
def printGraph(graph):
result = ""
for i in range(len(graph)):
for j in range(len(graph)):
result += " " + str(graph[i][j]) + " "
result += "\n"
return result
def processResult(result):
mappings = {}
for key in result:
node = key[1]
color = key[2]
if result[key]:
mappings[int(node)] = int(color)
return mappings
| bsd-3-clause | 3,202,995,941,548,603,000 | 24.610169 | 104 | 0.578425 | false | 3.658596 | false | false | false |
dschien/PyExcelModelingHelper | excel_helper/__init__.py | 1 | 33092 | import csv
import datetime
import importlib
import sys
from abc import abstractmethod
from collections import defaultdict
from typing import Dict, List, Set
import numpy as np
import pandas as pd
from dateutil import relativedelta as rdelta
import logging
from functools import partial
from xlrd import xldate_as_tuple
import calendar
from scipy.interpolate import interp1d
import json
__author__ = 'schien'
import pkg_resources # part of setuptools
version = pkg_resources.require("excel-modelling-helper")[0].version
param_name_map_v1 = {'variable': 'name', 'scenario': 'source_scenarios_string', 'module': 'module_name',
'distribution': 'distribution_name', 'param 1': 'param_a', 'param 2': 'param_b',
'param 3': 'param_c',
'unit': '', 'CAGR': 'cagr', 'ref date': 'ref_date', 'label': '', 'tags': '', 'comment': '',
'source': ''}
param_name_map_v2 = {'CAGR': 'cagr',
'comment': '',
'label': '',
'mean growth': 'growth_factor',
'param': '',
'ref date': 'ref_date',
'ref value': '',
'scenario': 'source_scenarios_string',
'source': '',
'tags': '',
'type': '',
'unit': '',
'variability growth': 'ef_growth_factor',
'initial_value_proportional_variation': '',
'variable': 'name'}
param_name_maps = {1: param_name_map_v1, 2: param_name_map_v2}
# logger.basicConfig(level=logger.DEBUG)
logger = logging.getLogger(__name__)
class DistributionFunctionGenerator(object):
module: str
distribution: str
param_a: str
param_b: str
param_c: str
def __init__(self, module_name=None, distribution_name=None, param_a: float = None,
param_b: float = None, param_c: float = None, size=None, **kwargs):
"""
Instantiate a new object.
:param module_name:
:param distribution_name:
:param param_a:
:param param_b:
:param param_c:
:param size:
:param kwargs: can contain key "sample_mean_value" with bool value
"""
self.kwargs = kwargs
self.size = size
self.module_name = module_name
self.distribution_name = distribution_name
self.sample_mean_value = kwargs.get('sample_mean_value', False)
# prepare function arguments
if distribution_name == 'choice':
if type(param_a) == str:
tokens = param_a.split(',')
params = [float(token.strip()) for token in tokens]
self.random_function_params = [np.array(params, dtype=np.float)]
else:
self.random_function_params = [np.array([i for i in [param_a, param_b, param_c] if i], dtype=np.float)]
logger.debug(f'setting function params for choice distribution {self.random_function_params}')
else:
self.random_function_params = [i for i in [param_a, param_b, param_c] if i not in [None, ""]]
def get_mean(self, distribution_function):
"""Get the mean value for a distribution.
If the distribution function is [normal, uniform,choice,triangular] the analytic value is being calculted.
Else, the distribution is instantiated and then the mean is being calculated.
:param distribution_function:
:return: the mean as a scalar
"""
name = self.distribution_name
params = self.random_function_params
if name == 'normal':
return params[0]
if name == 'uniform':
return (params[0] + params[1]) / 2.
if name == 'choice':
return params[0].mean()
if name == 'triangular':
return (params[0] + params[1] + params[2]) / 3.
return distribution_function().mean()
def generate_values(self, *args, **kwargs):
"""
Generate a sample of values by sampling from a distribution. The size of the sample can be overriden with the 'size' kwarg.
If `self.sample_mean_value == True` the sample will contain "size" times the mean value.
:param args:
:param kwargs:
:return: sample as vector of given size
"""
sample_size = kwargs.get('size', self.size)
f = self.instantiate_distribution_function(self.module_name, self.distribution_name)
distribution_function = partial(f, *self.random_function_params, size=sample_size)
if self.sample_mean_value:
sample = np.full(sample_size, self.get_mean(distribution_function))
else:
sample = distribution_function()
return sample
@staticmethod
def instantiate_distribution_function(module_name, distribution_name):
module = importlib.import_module(module_name)
func = getattr(module, distribution_name)
return func
class Parameter(object):
"""
A single parameter
"""
version: int
name: str
unit: str
comment: str
source: str
scenario: str
processes: Dict[str, List]
"optional comma-separated list of tags"
tags: str
def __init__(self, name, tags=None, source_scenarios_string: str = None, unit: str = None,
comment: str = None, source: str = None, version=None,
**kwargs):
# The source definition of scenarios. A comma-separated list
self.version = version
self.source = source
self.comment = comment
self.unit = unit
self.source_scenarios_string = source_scenarios_string
self.tags = tags
self.name = name
self.scenario = None
self.cache = None
# track the usages of this parameter per process as a list of process-specific variable names that are backed by this parameter
self.processes = defaultdict(list)
self.kwargs = kwargs
def __call__(self, settings=None, *args, **kwargs):
"""
Samples from a parameter. Values are cached and returns the same value every time called.
@todo confusing interface that accepts 'settings' and kwargs at the same time.
worse- 'use_time_series' must be present in the settings dict
:param args:
:param kwargs:
:return:
"""
if self.cache is None:
kwargs['name'] = self.name
kwargs['unit'] = self.unit
kwargs['tags'] = self.tags
kwargs['scenario'] = self.scenario
if not settings:
settings = {}
common_args = {'size': settings.get('sample_size', 1),
'sample_mean_value': settings.get('sample_mean_value', False)}
common_args.update(**self.kwargs)
if settings.get('use_time_series', False):
if self.version == 2:
generator = GrowthTimeSeriesGenerator(**common_args, times=settings['times'])
else:
generator = ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(**common_args,
times=settings['times'])
else:
generator = DistributionFunctionGenerator(**common_args)
self.cache = generator.generate_values(*args, **kwargs)
return self.cache
def add_usage(self, process_name, variable_name):
# add the name of a variable of a process model that is backed by this parameter
self.processes[process_name].append(variable_name)
class GrowthTimeSeriesGenerator(DistributionFunctionGenerator):
ref_date: str
# of the mean values
# the type of growth ['exp']
# growth_function_type: str
# of the error function
variance: str
# error function growth rate
ef_growth_factor: str
def __init__(self, times=None, size=None, index_names=None, ref_date=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ref_date = ref_date if ref_date else None
self.times = times
self.size = size
iterables = [times, range(0, size)]
self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency'
def generate_values(self, *args, **kwargs):
"""
Instantiate a random variable and apply annual growth factors.
:return:
"""
assert 'ref value' in self.kwargs
# 1. Generate $\mu$
start_date = self.times[0].to_pydatetime()
end_date = self.times[-1].to_pydatetime()
ref_date = self.ref_date
if not ref_date:
raise Exception(f"Ref date not set for variable {kwargs['name']}")
mu = self.generate_mu(end_date, ref_date, start_date)
# 3. Generate $\sigma$
## Prepare array with growth values $\sigma$
if self.sample_mean_value:
sigma = np.zeros((len(self.times), self.size))
else:
if self.kwargs['type'] == 'interp':
def get_date(record):
return datetime.datetime.strptime(record[0], "%Y-%m-%d")
ref_value_ = sorted(json.loads(self.kwargs['ref value'].strip()).items(), key=get_date)
intial_value = ref_value_[0][1]
else:
intial_value = float(self.kwargs['ref value'])
variability_ = intial_value * self.kwargs['initial_value_proportional_variation']
logger.debug(f'sampling random distribution with parameters -{variability_}, 0, {variability_}')
sigma = np.random.triangular(-1 * variability_, 0, variability_, (len(self.times), self.size))
# logger.debug(ref_date.strftime("%b %d %Y"))
## 4. Prepare growth array for $\alpha_{sigma}$
alpha_sigma = growth_coefficients(start_date,
end_date,
ref_date,
self.kwargs['ef_growth_factor'], 1)
### 5. Prepare DataFrame
iterables = [self.times, range(self.size)]
index_names = ['time', 'samples']
_multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
# logger.debug(start_date)
# logger.debug(end_date)
from dateutil import relativedelta
r = relativedelta.relativedelta(end_date, start_date)
months = r.years * 12 + r.months + 1
name = kwargs['name']
## Apply growth to $\sigma$ and add $\sigma$ to $\mu$
# logger.debug(sigma.size)
# logger.debug(alpha_sigma.shape)
# logger.debug(months)
unit_ = kwargs["unit"]
if not unit_:
unit_ = 'dimensionless'
series = pd.Series(((sigma * alpha_sigma) + mu.reshape(months, 1)).ravel(), index=_multi_index,
dtype=f'pint[{unit_}]')
## test if df has sub-zero values
df_sigma__dropna = series.where(series < 0).dropna()
if not df_sigma__dropna.pint.m.empty:
logger.warning(f"Negative values for parameter {name} from {df_sigma__dropna.index[0][0]}")
return series
def generate_mu(self, end_date, ref_date, start_date):
if self.kwargs['type'] == 'exp':
mu_bar = np.full(len(self.times), float(self.kwargs['ref value']))
# 2. Apply Growth to Mean Values $\alpha_{mu}$
alpha_mu = growth_coefficients(start_date,
end_date,
ref_date,
self.kwargs['growth_factor'], 1)
mu = mu_bar * alpha_mu.ravel()
mu = mu.reshape(len(self.times), 1)
return mu
if self.kwargs['type'] == 'interp':
def toTimestamp(d):
return calendar.timegm(d.timetuple())
def interpolate(growth_config: Dict[str, float], date_range, kind='linear'):
arr1 = np.array([toTimestamp(datetime.datetime.strptime(date_val, '%Y-%m-%d')) for date_val in
growth_config.keys()])
arr2 = np.array([val for val in growth_config.values()])
f = interp1d(arr1, arr2, kind=kind, fill_value='extrapolate')
return f([toTimestamp(date_val) for date_val in date_range])
ref_value_ = json.loads(self.kwargs['ref value'].strip())
return interpolate(ref_value_, self.times, self.kwargs['param'])
class ConstantUncertaintyExponentialGrowthTimeSeriesGenerator(DistributionFunctionGenerator):
cagr: str
ref_date: str
def __init__(self, cagr=None, times=None, size=None, index_names=None, ref_date=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cagr = cagr if cagr else 0
self.ref_date = ref_date if ref_date else None
self.times = times
self.size = size
iterables = [times, range(0, size)]
self._multi_index = pd.MultiIndex.from_product(iterables, names=index_names)
assert type(times.freq) == pd.tseries.offsets.MonthBegin, 'Time index must have monthly frequency'
def generate_values(self, *args, **kwargs):
"""
Instantiate a random variable and apply annual growth factors.
:return:
"""
values = super().generate_values(*args, **kwargs, size=(len(self.times) * self.size,))
alpha = self.cagr
# @todo - fill to cover the entire time: define rules for filling first
ref_date = self.ref_date if self.ref_date else self.times[0].to_pydatetime()
# assert ref_date >= self.times[0].to_pydatetime(), 'Ref date must be within variable time span.'
# assert ref_date <= self.times[-1].to_pydatetime(), 'Ref date must be within variable time span.'
start_date = self.times[0].to_pydatetime()
end_date = self.times[-1].to_pydatetime()
a = growth_coefficients(start_date, end_date, ref_date, alpha, self.size)
values *= a.ravel()
# df = pd.DataFrame(values)
# df.columns = [kwargs['name']]
# df.set_index(self._multi_index, inplace=True)
# # @todo this is a hack to return a series with index as I don't know how to set an index and rename a series
# data_series = df.iloc[:, 0]
# data_series._metadata = kwargs
# data_series.index.rename(['time', 'samples'], inplace=True)
#
if not kwargs["unit"]:
series = pd.Series(values, index=self._multi_index, dtype='pint[dimensionless]')
else:
series = pd.Series(values, index=self._multi_index, dtype=f'pint[{kwargs["unit"]}]')
return series
def growth_coefficients(start_date, end_date, ref_date, alpha, samples):
"""
Build a matrix of growth factors according to the CAGR formula y'=y0 (1+a)^(t'-t0).
a growth rate alpha
t0 start date
t' end date
y' output
y0 start value
"""
start_offset = 0
if ref_date < start_date:
offset_delta = rdelta.relativedelta(start_date, ref_date)
start_offset = offset_delta.months + 12 * offset_delta.years
start_date = ref_date
end_offset = 0
if ref_date > end_date:
offset_delta = rdelta.relativedelta(ref_date, end_date)
end_offset = offset_delta.months + 12 * offset_delta.years
end_date = ref_date
delta_ar = rdelta.relativedelta(ref_date, start_date)
ar = delta_ar.months + 12 * delta_ar.years
delta_br = rdelta.relativedelta(end_date, ref_date)
br = delta_br.months + 12 * delta_br.years
# we place the ref point on the lower interval (delta_ar + 1) but let it start from 0
# in turn we let the upper interval start from 1
g = np.fromfunction(lambda i, j: np.power(1 - alpha, np.abs(i) / 12), (ar + 1, samples), dtype=float)
h = np.fromfunction(lambda i, j: np.power(1 + alpha, np.abs(i + 1) / 12), (br, samples), dtype=float)
g = np.flipud(g)
# now join the two arrays
a = np.vstack((g, h))
if start_offset > 0:
a = a[start_offset:]
if end_offset > 0:
a = a[:-end_offset]
return a
class ParameterScenarioSet(object):
"""
The set of all version of a parameter for all the scenarios.
"""
default_scenario = 'default'
"the name of the parameters in this set"
parameter_name: str
scenarios: Dict[str, Parameter]
def __init__(self):
self.scenarios = {}
def add_scenario(self, parameter: 'Parameter', scenario_name: str = default_scenario):
"""
Add a scenario for this parameter.
:param scenario_name:
:param parameter:
:return:
"""
self.scenarios[scenario_name] = parameter
def __getitem__(self, item):
return self.scenarios.__getitem__(item)
def __setitem__(self, key, value):
return self.scenarios.__setitem__(key, value)
class ParameterRepository(object):
"""
Contains all known parameter definitions (so that it is not necessary to re-read the excel file for repeat param accesses).
The param definitions are independent from the sampling (the Param.__call__ method). Repeat access to __call__ will
create new samples.
Internally, parameters are organised together with all the scenario variants in a single ParameterScenarioSet.
"""
parameter_sets: Dict[str, ParameterScenarioSet]
tags: Dict[str, Dict[str, Set[Parameter]]]
def __init__(self):
self.parameter_sets = defaultdict(ParameterScenarioSet)
self.tags = defaultdict(lambda: defaultdict(set))
def add_all(self, parameters: List[Parameter]):
for p in parameters:
self.add_parameter(p)
def clear_cache(self):
for p_sets in self.parameter_sets.values():
for param_name, param in p_sets.scenarios.items():
param.cache = None
def add_parameter(self, parameter: Parameter):
"""
A parameter can have several scenarios. They are specified as a comma-separated list in a string.
:param parameter:
:return:
"""
# try reading the scenarios from the function arg or from the parameter attribute
scenario_string = parameter.source_scenarios_string
if scenario_string:
_scenarios = [i.strip() for i in scenario_string.split(',')]
self.fill_missing_attributes_from_default_parameter(parameter)
else:
_scenarios = [ParameterScenarioSet.default_scenario]
for scenario in _scenarios:
parameter.scenario = scenario
self.parameter_sets[parameter.name][scenario] = parameter
# record all tags for this parameter
if parameter.tags:
_tags = [i.strip() for i in parameter.tags.split(',')]
for tag in _tags:
self.tags[tag][parameter.name].add(parameter)
def fill_missing_attributes_from_default_parameter(self, param):
"""
Empty fields in Parameter definitions in scenarios are populated with default values.
E.g. in the example below, the source for the Power_TV variable in the 8K scenario would also be EnergyStar.
| name | scenario | val | tags | source |
|----------|----------|-----|--------|------------|
| Power_TV | | 60 | UD, TV | EnergyStar |
| Power_TV | 8K | 85 | new_tag| |
**Note** tags must not differ. In the example above, the 8K scenario variable the tags value would be overwritten
with the default value.
:param param:
:return:
"""
if not self.exists(param.name) or not ParameterScenarioSet.default_scenario in self.parameter_sets[
param.name].scenarios.keys():
logger.warning(
f'No default value for param {param.name} found.')
return
default = self.parameter_sets[param.name][ParameterScenarioSet.default_scenario]
for att_name, att_value in default.__dict__.items():
if att_name in ['unit', 'label', 'comment', 'source', 'tags']:
if att_name == 'tags' and default.tags != param.tags:
logger.warning(
f'For param {param.name} for scenarios {param.source_scenarios_string}, tags is different from default parameter tags. Overwriting with default values.')
setattr(param, att_name, att_value)
if not getattr(param, att_name):
logger.debug(
f'For param {param.name} for scenarios {param.source_scenarios_string}, populating attribute {att_name} with value {att_value} from default parameter.')
setattr(param, att_name, att_value)
def __getitem__(self, item) -> Parameter:
"""
Return the default scenario parameter for a given variable name
:param item: the name of the variable
:return:
"""
return self.get_parameter(item, scenario_name=ParameterScenarioSet.default_scenario)
def get_parameter(self, param_name, scenario_name=ParameterScenarioSet.default_scenario) -> Parameter:
if self.exists(param_name, scenario=scenario_name):
return self.parameter_sets[param_name][scenario_name]
try:
return self.parameter_sets[param_name][ParameterScenarioSet.default_scenario]
except KeyError:
raise KeyError(f"{param_name} not found")
def find_by_tag(self, tag) -> Dict[str, Set[Parameter]]:
"""
Get all registered dicts that are registered for a tag
:param tag: str - single tag
:return: a dict of {param name: set[Parameter]} that contains all ParameterScenarioSets for
all parameter names with a given tag
"""
return self.tags[tag]
def exists(self, param, scenario=None) -> bool:
# if scenario is not None:
# return
present = param in self.parameter_sets.keys()
if not present:
return False
scenario = scenario if scenario else ParameterScenarioSet.default_scenario
return scenario in self.parameter_sets[param].scenarios.keys()
def list_scenarios(self, param):
if param in self.parameter_sets.keys():
return self.parameter_sets[param].scenarios.keys()
class ExcelHandler(object):
version: int
def __init__(self):
self.version = 1
@abstractmethod
def load_definitions(self, sheet_name, filename=None):
raise NotImplementedError()
class OpenpyxlExcelHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
definitions = []
from openpyxl import load_workbook
wb = load_workbook(filename=filename, data_only=True)
_sheet_names = [sheet_name] if sheet_name else wb.sheetnames
for _sheet_name in _sheet_names:
sheet = wb.get_sheet_by_name(_sheet_name)
rows = list(sheet.rows)
header = [cell.value for cell in rows[0]]
if header[0] != 'variable':
continue
for row in rows[1:]:
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
definitions.append(values)
return definitions
class Xlsx2CsvHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
from xlsx2csv import Xlsx2csv
data = Xlsx2csv(filename, inmemory=True).convert(None, sheetid=0)
definitions = []
_sheet_names = [sheet_name] if sheet_name else [data.keys()]
for _sheet_name in _sheet_names:
sheet = data[_sheet_name]
header = sheet.header
if header[0] != 'variable':
continue
for row in sheet.rows:
values = {}
for key, cell in zip(header, row):
values[key] = cell
definitions.append(values)
return definitions
class CSVHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
return csv.DictReader(open(filename), delimiter=',')
class PandasCSVHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
self.version = 2
import pandas as pd
df = pd.read_csv(filename, usecols=range(15), index_col=False, parse_dates=['ref date'],
dtype={'initial_value_proportional_variation': 'float64'},
dayfirst=True
# date_parser=lambda x: pd.datetime.strptime(x, '%d-%m-%Y')
)
df = df.dropna(subset=['variable', 'ref value'])
df.fillna("", inplace=True)
return df.to_dict(orient='records')
class XLRDExcelHandler(ExcelHandler):
version: int
@staticmethod
def get_sheet_range_bounds(filename, sheet_name):
import xlrd
wb = xlrd.open_workbook(filename)
sheet = wb.sheet_by_name(sheet_name)
rows = list(sheet.get_rows())
return len(rows)
def load_definitions(self, sheet_name, filename=None):
import xlrd
wb = xlrd.open_workbook(filename)
sh = None
definitions = []
_definition_tracking = defaultdict(dict)
_sheet_names = [sheet_name] if sheet_name else [sh.name for sh in wb.sheets()]
version = 1
try:
sheet = wb.sheet_by_name('metadata')
rows = list(sheet.get_rows())
for row in rows:
if row[0].value == 'version':
version = row[1].value
self.version = version
except:
logger.info(f'could not find a sheet with name "metadata" in workbook. defaulting to v2')
for _sheet_name in _sheet_names:
if _sheet_name == 'metadata':
continue
sheet = wb.sheet_by_name(_sheet_name)
rows = list(sheet.get_rows())
header = [cell.value for cell in rows[0]]
if header[0] != 'variable':
continue
for i, row in enumerate(rows[1:]):
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
if not values['variable']:
# logger.debug(f'ignoring row {i}: {row}')
continue
if 'ref date' in values and values['ref date']:
if isinstance(values['ref date'], float):
values['ref date'] = datetime.datetime(*xldate_as_tuple(values['ref date'], wb.datemode))
if values['ref date'].day != 1:
logger.warning(f'ref date truncated to first of month for variable {values["variable"]}')
values['ref date'] = values['ref date'].replace(day=1)
else:
raise Exception(
f"{values['ref date']} for variable {values['variable']} is not a date - "
f"check spreadsheet value is a valid day of a month")
logger.debug(f'values for {values["variable"]}: {values}')
definitions.append(values)
scenario = values['scenario'] if values['scenario'] else "n/a"
if scenario in _definition_tracking[values['variable']]:
logger.error(
f"Duplicate entry for parameter "
f"with name <{values['variable']}> and <{scenario}> scenario in sheet {_sheet_name}")
raise ValueError(
f"Duplicate entry for parameter "
f"with name <{values['variable']}> and <{scenario}> scenario in sheet {_sheet_name}")
else:
_definition_tracking[values['variable']][scenario] = 1
return definitions
class XLWingsExcelHandler(ExcelHandler):
def load_definitions(self, sheet_name, filename=None):
import xlwings as xw
definitions = []
wb = xw.Book(fullname=filename)
_sheet_names = [sheet_name] if sheet_name else wb.sheets
for _sheet_name in _sheet_names:
sheet = wb.sheets[_sheet_name]
range = sheet.range('A1').expand()
rows = range.rows
header = [cell.value for cell in rows[0]]
# check if this sheet contains parameters or if it documentation
if header[0] != 'variable':
continue
total_rows = XLRDExcelHandler.get_sheet_range_bounds(filename, _sheet_name)
range = sheet.range((1, 1), (total_rows, len(header)))
rows = range.rows
for row in rows[1:]:
values = {}
for key, cell in zip(header, row):
values[key] = cell.value
definitions.append(values)
return definitions
class ExcelParameterLoader(object):
definition_version: int
"""Utility to populate ParameterRepository from spreadsheets.
The structure of the spreadsheets is:
| variable | ... |
|----------|-----|
| ... | ... |
If the first row in a spreadsheet does not contain they keyword 'variable' the sheet is ignored.
"""
def __init__(self, filename, excel_handler='xlrd', **kwargs):
self.filename = filename
self.definition_version = 2
logger.info(f'Using {excel_handler} excel handler')
excel_handler_instance = None
if excel_handler == 'csv':
excel_handler_instance = CSVHandler()
if excel_handler == 'pandas':
excel_handler_instance = PandasCSVHandler()
if excel_handler == 'openpyxl':
excel_handler_instance = OpenpyxlExcelHandler()
if excel_handler == 'xlsx2csv':
excel_handler_instance = Xlsx2CsvHandler()
if excel_handler == 'xlwings':
excel_handler_instance = XLWingsExcelHandler()
if excel_handler == 'xlrd':
excel_handler_instance = XLRDExcelHandler()
self.excel_handler: ExcelHandler = excel_handler_instance
def load_parameter_definitions(self, sheet_name: str = None):
"""
Load variable text from rows in excel file.
If no spreadsheet arg is given, all spreadsheets are loaded.
The first cell in the first row in a spreadsheet must contain the keyword 'variable' or the sheet is ignored.
Any cells used as titles (with no associated value) are also added to the returned dictionary. However, the
values associated with each header will be None. For example, given the speadsheet:
| variable | A | B |
|----------|---|---|
| Title | | |
| Entry | 1 | 2 |
The following list of definitions would be returned:
[ { variable: 'Title', A: None, B: None }
, { variable: 'Entry', A: 1 , B: 2 }
]
:param sheet_name:
:return: list of dicts with {header col name : cell value} pairs
"""
definitions = self.excel_handler.load_definitions(sheet_name, filename=self.filename)
self.definition_version = self.excel_handler.version
return definitions
def load_into_repo(self, repository: ParameterRepository = None, sheet_name: str = None):
"""
Create a Repo from an excel file.
:param repository: the repository to load into
:param sheet_name:
:return:
"""
repository.add_all(self.load_parameters(sheet_name))
def load_parameters(self, sheet_name):
parameter_definitions = self.load_parameter_definitions(sheet_name=sheet_name)
params = []
param_name_map = param_name_maps[int(self.definition_version)]
for _def in parameter_definitions:
# substitute names from the headers with the kwargs names in the Parameter and Distributions classes
# e.g. 'variable' -> 'name', 'module' -> 'module_name', etc
parameter_kwargs_def = {}
for k, v in _def.items():
if k in param_name_map:
if param_name_map[k]:
parameter_kwargs_def[param_name_map[k]] = v
else:
parameter_kwargs_def[k] = v
name_ = parameter_kwargs_def['name']
del parameter_kwargs_def['name']
p = Parameter(name_, version=self.definition_version, **parameter_kwargs_def)
params.append(p)
return params
| mit | -7,804,508,916,194,763,000 | 36.434389 | 177 | 0.579808 | false | 4.125156 | false | false | false |
mike-perdide/gitbuster | gitbuster/q_git_delegate.py | 1 | 4118 | # q_git_delegate.py
# Copyright (C) 2010 Julien Miotte <[email protected]>
#
# This module is part of gitbuster and is released under the GPLv3
# License: http://www.gnu.org/licenses/gpl-3.0.txt
#
from PyQt4.QtCore import QDateTime, QVariant, Qt, SIGNAL, QRect
from PyQt4.QtGui import QDateTimeEdit, QItemDelegate, QLineEdit, QTextEdit
from gfbi_core import ACTOR_FIELDS, TEXT_FIELDS, TIME_FIELDS
class QGitDelegate(QItemDelegate):
def __init__(self, view):
QItemDelegate.__init__(self, None)
self._view = view
self._selected_indexes = None
def createEditor(self, parent, option, index):
if len(self._view.selectedIndexes()) > 1:
self._selected_indexes = self._view.selectedIndexes()
columns = index.model().get_git_model().get_columns()
field_name = columns[index.column()]
if field_name in TEXT_FIELDS:
editor = QTextEdit(parent)
elif field_name in ACTOR_FIELDS:
editor = QLineEdit(parent)
elif field_name in TIME_FIELDS:
editor = QDateTimeEdit(parent)
editor.setDisplayFormat("yyyy-MM-dd hh:mm:ss")
else:
return QItemDelegate.createEditor(self, parent, option,
index)
self.connect(editor, SIGNAL("returnPressed()"),
self.commitAndCloseEditor)
return editor
def updateEditorGeometry(self, editor, option, index):
"""
Here we're gonna make the text edit of the message column bigger.
"""
model = index.model()
columns = model.get_git_model().get_columns()
field_name = columns[index.column()]
if field_name != "message":
QItemDelegate.updateEditorGeometry(self, editor, option, index)
return
message = model.data(index, Qt.EditRole)
new_geometry = option.rect
new_height = 27 * message.toString().count("\n") or option.rect.height()
new_geometry.setHeight(new_height)
editor.setGeometry(new_geometry)
def commitAndCloseEditor(self):
editor = self.sender()
if isinstance(editor, (QTextEdit, QLineEdit)):
self.emit(SIGNAL("closeEditor(QWidget*)"), editor)
def setEditorData(self, editor, index):
columns = index.model().get_git_model().get_columns()
field_name = columns[index.column()]
if field_name in TEXT_FIELDS or field_name in ACTOR_FIELDS:
text = index.model().data(index, Qt.EditRole).toString()
editor.setText(text)
elif field_name in TIME_FIELDS:
timestamp, tz = index.model().data(index, Qt.EditRole)
_q_datetime = QDateTime()
_q_datetime.setTime_t(timestamp)
editor.setDateTime(_q_datetime)
def setModelData(self, editor, model, index, ignore_history=False):
model = index.model()
columns = model.get_git_model().get_columns()
field_name = columns[index.column()]
if field_name in TEXT_FIELDS:
data = QVariant(editor.toPlainText())
elif field_name in TIME_FIELDS:
data = (editor.dateTime().toTime_t(),
model.data(index, Qt.EditRole)[1])
elif field_name in ACTOR_FIELDS:
data = QVariant(editor.text())
if not ignore_history:
# Start a new history event, only for the first modified index.
# That way, an undo will undo all the selected indexes.
model.start_history_event()
model.setData(index, data)
if self._selected_indexes:
edited_column = index.column()
selected_indexes = list(self._selected_indexes)
self._selected_indexes = None
for selected_index in selected_indexes:
if model.is_first_commit(selected_index):
continue
if selected_index.column() == edited_column:
self.setModelData(editor, model, selected_index,
ignore_history=True)
| gpl-3.0 | -6,977,865,863,436,541,000 | 36.099099 | 80 | 0.602234 | false | 4.085317 | false | false | false |
luaduck/suds | soapclient.py | 1 | 6841 |
###
# This file is part of Soap.
#
# Soap is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, version 2.
#
# Soap is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should have received
# a copy of the GNU General Public License along with Soap. If not, see
# <http://www.gnu.org/licenses/>.
###
import logging
import Queue
from libottdadmin2.trackingclient import TrackingAdminClient
from libottdadmin2.event import Event
from libottdadmin2.enums import UpdateType, UpdateFrequency
from enums import RconStatus, ConnectionState
class SoapEvents(object):
def __init__(self):
self.connected = Event()
self.disconnected = Event()
self.shutdown = Event()
self.new_game = Event()
self.new_map = Event()
# self.protocol = Event()
# self.datechanged = Event()
# self.clientinfo = Event()
self.clientjoin = Event()
self.clientupdate = Event()
self.clientquit = Event()
# self.companyinfo = Event()
# self.companynew = Event()
# self.companyupdate = Event()
# self.companyremove = Event()
# self.companystats = Event()
# self.companyeconomy = Event()
self.chat = Event()
self.rcon = Event()
self.rconend = Event()
self.console = Event()
self.cmdlogging = Event()
self.pong = Event()
class SoapClient(TrackingAdminClient):
# Initialization & miscellanious functions
def __init__(self, channel, serverid, events = None):
super(SoapClient, self).__init__(events)
self.channel = channel
self.ID = serverid
self.soapEvents = SoapEvents()
self._attachEvents()
self.logger = logging.getLogger('Soap-%s' % self.ID)
self.logger.setLevel(logging.INFO)
self.rconCommands = Queue.Queue()
self.rconNick = None
self.rconResults = {}
self.rconState = RconStatus.IDLE
self.connectionstate = ConnectionState.DISCONNECTED
self.registered = False
self.filenumber = None
self.clientPassword = None
def _attachEvents(self):
self.events.connected += self._rcvConnected
self.events.disconnected += self._rcvDisconnected
self.events.shutdown += self._rcvShutdown
self.events.new_game += self._rcvNewGame
self.events.new_map += self._rcvNewMap
self.events.clientjoin += self._rcvClientJoin
self.events.clientupdate += self._rcvClientUpdate
self.events.clientquit += self._rcvClientQuit
self.events.chat += self._rcvChat
self.events.rcon += self._rcvRcon
self.events.rconend += self._rcvRconEnd
self.events.console += self._rcvConsole
self.events.cmdlogging += self._rcvCmdLogging
self.events.pong += self._rcvPong
def copy(self):
obj = SoapClient(self._channel, self._ID, self.events)
for prop in self._settable_args:
setattr(obj, prop, getattr(self, prop, None))
return obj
# Insert connection info into parameters
def _rcvConnected(self):
self.registered = True
self.soapEvents.connected(self.channel)
def _rcvDisconnected(self, canRetry):
self.registered = False
self.soapEvents.disconnected(self.channel, canRetry)
def _rcvShutdown(self):
self.soapEvents.shutdown(self.channel)
def _rcvNewGame(self):
self.soapEvents.new_game(self.channel)
def _rcvNewMap(self, mapinfo, serverinfo):
self.soapEvents.new_map(self.channel, mapinfo, serverinfo)
def _rcvClientJoin(self, client):
self.soapEvents.clientjoin(self.channel, client)
def _rcvClientUpdate(self, old, client, changed):
self.soapEvents.clientupdate(self.channel, old, client, changed)
def _rcvClientQuit(self, client, errorcode):
self.soapEvents.clientquit(self.channel, client, errorcode)
def _rcvChat(self, **kwargs):
data = dict(kwargs.items())
data['connChan'] = self.channel
self.soapEvents.chat(**data)
def _rcvRcon(self, result, colour):
self.soapEvents.rcon(self.channel, result, colour)
def _rcvRconEnd(self, command):
self.soapEvents.rconend(self.channel, command)
def _rcvConsole(self, message, origin):
self.soapEvents.console(self.channel, origin, message)
def _rcvCmdLogging(self, **kwargs):
data = dict(kwargs.items())
data['connChan'] = self.channel
self.soapEvents.cmdlogging(**data)
def _rcvPong(self, start, end, delta):
self.soapEvents.pong(self.channel, start, end, delta)
# Store some extra info
_settable_args = TrackingAdminClient._settable_args + ['irc', 'ID', 'channel', 'debugLog']
_irc = None
_ID = 'Default'
_channel = None
_debugLog = False
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value.lower()
@property
def irc(self):
return self._irc
@irc.setter
def irc(self, value):
self._irc = value
@property
def ID(self):
return self._ID
@ID.setter
def ID(self, value):
self._ID = value.lower()
@property
def debugLog(self):
return self._debugLog
@debugLog.setter
def debugLog(self, value):
self._debugLog = value
if self._debugLog:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
update_types = [
(UpdateType.CLIENT_INFO, UpdateFrequency.AUTOMATIC),
(UpdateType.COMPANY_INFO, UpdateFrequency.AUTOMATIC),
(UpdateType.COMPANY_ECONOMY, UpdateFrequency.WEEKLY),
(UpdateType.COMPANY_STATS, UpdateFrequency.WEEKLY),
(UpdateType.CHAT, UpdateFrequency.AUTOMATIC),
(UpdateType.CONSOLE, UpdateFrequency.AUTOMATIC),
(UpdateType.LOGGING, UpdateFrequency.AUTOMATIC),
(UpdateType.DATE, UpdateFrequency.DAILY),
]
| gpl-2.0 | -6,182,395,961,143,806,000 | 29.095455 | 94 | 0.597427 | false | 3.942939 | false | false | false |
cloudnull/skylab | skylab/executable.py | 1 | 9051 | # =============================================================================
# Copyright [2013] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
import os
import json
import skylab as sk
from skylab import arguments
from skylab import osclients
from skylab import service_module as sm
from skylab import utils
def execute():
"""Execute the Tribble Application."""
user_args = arguments.args()
# Load the local DB for rebuilding
user_args['db_path'] = sk.dbm_create(
db_path=user_args.get('db_path'),
db_name=user_args.get('db_name'),
db_key=user_args.get('name')
)
Runner(args=user_args).run_method()
class Runner(object):
"""Run the application."""
def __init__(self, args):
"""Run the application process from within the thread.
:param args: parsed cli arguments.
"""
self.client = None
self.args = args
def run_method(self):
"""Get action and run."""
action = getattr(self, self.args.get('method'))
if action is None:
raise SystemExit('Died because something bad happened.')
else:
action()
def build_lab(self):
"""Build the Openstack Lab."""
queue = None
# Load the Novaclient and Authenticate.
creds = osclients.Creds(
user=self.args.get('username'),
region=self.args.get('region'),
key=self.args.get('apikey'),
password=self.args.get('password'),
tenant=self.args.get('tenant_name'),
project_id=self.args.get('project_id'),
)
clients = osclients.Clients(creds=creds, args=self.args)
self.client = clients.nova()
self.client.authenticate()
# Set the tenant ID
with utils.IndicatorThread(debug=self.args.get('debug')):
tenant = self.client.client.tenant_id
# Set the controller Flavor ID
print('Finding Flavor Information')
controller_flavor = sm.flavor_find(
client=self.client, flavor_ram=self.args.get('controller_ram')
)
self.args['controller'] = {'flavor': controller_flavor}
# Set the compute Flavor ID
compute_flavor = sm.flavor_find(
client=self.client, flavor_ram=self.args.get('compute_ram')
)
self.args['compute'] = {'flavor': compute_flavor}
# Add up total purposed ram for the build
con_ram = self.args.get('controller_ram')
com_ram = self.args.get('compute_ram')
t_ram = con_ram + com_ram
print('Checking Build against Limits')
in_limits = sm.check_limits(
client=self.client,
tenant_id=tenant,
purposed_ram=t_ram
)
if in_limits is False:
raise sk.NotEnoughRam(
'This build is not possible, your account does not'
' have enough RAM available.'
)
print('Defining the Network')
network = sm.skylab_network(
client=self.client,
name=self.args.get('name'),
net_cidr=self.args.get('net_cidr'),
net_id=self.args.get('net_id')
)
print('Checking for Image')
image_id = sm.image_find(
client=self.client,
image=self.args.get('image')
)
nics = [
{'net-id': "00000000-0000-0000-0000-000000000000"},
{'net-id': network}
]
if self.args.get('no_private') is False:
nics.append(
{'net-id': "11111111-1111-1111-1111-111111111111"}
)
build_kwargs = {
'image': image_id,
'nics': nics
}
print('Defining the key')
if self.args.get('admin_pass') is not None:
build_kwargs['admin_pass'] = self.args['admin_pass']
if self.args.get('key_name'):
if not sm.client_key_find(self.client,
key_name=self.args['key_name']):
key_path = os.path.expanduser(self.args['key_location'])
if os.path.exists(key_path):
with open(key_path, 'rb') as key:
sm.client_key_create(
self.client,
key_name=self.args['key_name'],
public_key=key.read()
)
build_kwargs['key_name'] = self.args['key_name']
else:
build_kwargs['key_name'] = self.args['key_name']
print('Loading Work Queue')
if self.args['node_count'] < 3:
raise sk.NotEnoughNodes(
'The node count is too low. You have set "%s" but it needs'
' to be a minimum of "3".' % self.args['node_count']
)
else:
# Load our queue
queue = utils.basic_queue()
self.args['compute'].update(build_kwargs)
for c_node in range(self.args['node_count'] - 2):
c_node += 1
compute = {
'name': '%s_compute%s' % (self.args['name'], c_node)
}
compute.update(self.args['compute'])
queue.put(compute)
self.args['controller'].update(build_kwargs)
for c_node in range(2):
c_node += 1
controller = {
'name': '%s_controller%s' % (self.args['name'], c_node)
}
controller.update(self.args['controller'])
queue.put(controller)
# Prep the threader
proc_args = {'client': self.client,
'args': self.args,
'queue': queue,
'job_action': sm.bob_the_builder}
with utils.IndicatorThread(work_q=queue, debug=self.args.get('debug')):
print('Building "%s" nodes' % self.args['node_count'])
utils.worker_proc(
kwargs=proc_args
)
# Construct all the things.
with utils.IndicatorThread(work_q=queue, debug=self.args.get('debug')):
sm.construct_skylab(args=self.args)
def db_show(self):
with sk.Shelve(file_path=self.args['db_path']) as db:
print(json.dumps(dict(db), indent=4))
def lab_info(self):
def get_addr(server, net_name):
if 'addresses' in srv:
addresses = server['addresses'].get(net_name)
if addresses is not None:
for addr in addresses:
if addr.get('version') == 4:
return addr.get('addr')
else:
return None
name = self.args['name']
with sk.Shelve(file_path=self.args['db_path']) as db:
db_data = dict(db)
info = [db_data[name][srv] for srv in db_data[name].keys()
if srv.startswith(name)]
if self.args.get('server'):
pass
else:
print_data = []
skynet = '%s_address' % name
for srv in info:
print_data.append({
'id': srv.get('id'),
'name': srv.get('name'),
skynet: get_addr(server=srv, net_name=name),
'public_net': get_addr(server=srv, net_name='public')
})
sk.print_horiz_table(print_data)
def scuttle_lab(self):
with utils.IndicatorThread(debug=self.args.get('debug')):
servers = [
(server.id, server.name)
for server in sm.client_list(self.client)
if server.name.startswith(self.args['name'])
]
with sk.Shelve(file_path=self.args['db_path']) as db:
for server in servers:
if self.args['name'] in db:
lab_db = db[self.args['name']]
if lab_db.get(server[1]) is not None:
del lab_db[server[1]]
sm.client_delete(self.client, server_id=server[0])
if __name__ == '__main__':
execute()
| gpl-3.0 | -6,981,783,929,136,485,000 | 34.774704 | 79 | 0.481936 | false | 4.293643 | false | false | false |
kmaglione/amo-validator | validator/testcases/javascript/obsolete.py | 1 | 3693 | from __future__ import absolute_import, print_function, unicode_literals
from .jstypes import Hook, Interfaces
OBSOLETE_EXTENSION_MANAGER = {
'on_get': 'This interface is part of the obsolete extension manager '
'interface, which is not available in any remotely modern '
'version of Firefox. It should not be referenced in any '
'code.'}
Interfaces.hook({
'nsIExtensionManager': OBSOLETE_EXTENSION_MANAGER,
'nsIUpdateItem': OBSOLETE_EXTENSION_MANAGER,
'nsIInstallLocation': OBSOLETE_EXTENSION_MANAGER,
'nsIAddonInstallListener': OBSOLETE_EXTENSION_MANAGER,
'nsIAddonUpdateCheckListener': OBSOLETE_EXTENSION_MANAGER,
})
# nsIJSON
NSIJSON_DEPRECATED = {
'err_id': ('testcases_javascript_calldefinitions', 'nsIJSON', 'deprec'),
'warning': 'Deprecated nsIJSON methods in use.',
'description':
'The `encode` and `decode` methods in nsIJSON have been '
'deprecated since Gecko 7. You should use the methods in the '
'global JSON object instead. See '
'https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference'
'/Global_Objects/JSON for more information.'}
@Interfaces.hook
class nsIJSON(Hook):
encode = {'on_call': NSIJSON_DEPRECATED}
decode = {'on_call': NSIJSON_DEPRECATED}
# nsIWebBrowserPersist
WEBBROWSERPERSIST_DEPRECATED = {
'err_id': ('testcases_javascript_call_definititions',
'webbrowserpersist'),
'warning': 'nsIWebBrowserPersist should no longer be used',
'description':
'Most nsIWebBrowserPersist methods have been '
'superseded by simpler methods in Downloads.jsm, namely '
'`Downloads.fetch` and `Downloads.createDownload`. See '
'http://mzl.la/downloads-jsm for more information.',
}
@Interfaces.hook
class nsIWebBrowserPersist(Hook):
saveChannel = {'on_call': WEBBROWSERPERSIST_DEPRECATED}
savePrivacyAwareURI = {'on_call': WEBBROWSERPERSIST_DEPRECATED}
@Hook.on_call
def saveURI(this, args, callee):
"""nsIWebBrowserPersist.saveURI requires a valid privacy context as
of Firefox 19."""
if len(args) >= 7:
load_context = args[6]
if load_context.as_primitive() is None:
this.traverser.warning(
err_id=('testcases_javascript_call_definititions',
'webbrowserpersist_saveuri'),
warning=('saveURI should not be called with a null load '
'context'),
description=(
'While nsIWebBrowserPersist.saveURI accepts null '
'in place of a privacy context, this usage is '
'acceptable only when no appropriate load '
'context exists.'))
return WEBBROWSERPERSIST_DEPRECATED
# nsITransferable
@Interfaces.hook
class nsITransferable(Hook):
@Hook.on_call
def init(this, args, callee):
if args and not args[0].as_primitive():
this.traverser.warning(
err_id=('js_entity_values', 'nsITransferable', 'init'),
warning=(
'`nsITransferable.init` should not be called with `null` '
'as its first argument'),
description=(
'Calling `nsITransferable.init()` with a null first '
'argument has the potential to leak data across '
'private browsing mode sessions. `null` is '
'appropriate only when reading data or writing data '
'which is not associated with a particular window.'))
| bsd-3-clause | -5,829,427,618,306,759,000 | 37.46875 | 78 | 0.620092 | false | 4.044907 | false | false | false |
tchellomello/home-assistant | homeassistant/components/roomba/sensor.py | 1 | 1696 | """Sensor for checking the battery level of Roomba."""
import logging
from homeassistant.components.vacuum import STATE_DOCKED
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from homeassistant.helpers.icon import icon_for_battery_level
from .const import BLID, DOMAIN, ROOMBA_SESSION
from .irobot_base import IRobotEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the iRobot Roomba vacuum cleaner."""
domain_data = hass.data[DOMAIN][config_entry.entry_id]
roomba = domain_data[ROOMBA_SESSION]
blid = domain_data[BLID]
roomba_vac = RoombaBattery(roomba, blid)
async_add_entities([roomba_vac], True)
class RoombaBattery(IRobotEntity):
"""Class to hold Roomba Sensor basic info."""
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} Battery Level"
@property
def unique_id(self):
"""Return the ID of this sensor."""
return f"battery_{self._blid}"
@property
def device_class(self):
"""Return the device class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return PERCENTAGE
@property
def icon(self):
"""Return the icon for the battery."""
charging = bool(self._robot_state == STATE_DOCKED)
return icon_for_battery_level(
battery_level=self._battery_level, charging=charging
)
@property
def state(self):
"""Return the state of the sensor."""
return self._battery_level
| apache-2.0 | -4,041,612,516,098,413,000 | 28.241379 | 68 | 0.665684 | false | 3.743929 | false | false | false |
bcopeland/speccy | scanner.py | 1 | 9599 | #!/usr/bin/python
from multiprocessing import Value
from threading import Thread, Lock
import os
import time
class Scanner(object):
interface = None
freqlist = None
process = None
debugfs_dir = None
is_ath10k = False
lock = None
run = True
def dev_to_phy(self, dev):
f = open('/sys/class/net/%s/phy80211/name' % dev)
phy = f.read().strip()
f.close()
return phy
def freq_to_chan(self, freq):
chan = 0
if (freq >= 2412 and freq <= 2472):
chan = (freq - 2407) / 5
if (freq >= 5180 and freq <= 5900):
chan = (freq - 5000) / 5
return chan
def _find_debugfs_dir(self):
''' search debugfs for spectral_scan_ctl for this interface '''
for dirname, subd, files in os.walk('/sys/kernel/debug/ieee80211'):
if 'spectral_scan_ctl' in files:
phy = dirname.split(os.path.sep)[-2]
if phy == self.dev_to_phy(self.interface):
self.phy = phy
return dirname
return None
def _scan(self):
while self.run:
if self.is_ath10k:
self.cmd_trigger()
if self.mode.value == 1: # only in 'chanscan' mode
cmd = 'iw dev %s scan' % self.interface
self.lock.acquire()
if self.freqlist:
cmd = '%s freq %s' % (cmd, ' '.join(self.freqlist))
self.lock.release()
os.system('%s >/dev/null 2>/dev/null' % cmd)
time.sleep(.01)
def __init__(self, interface, idx=0):
self.interface = interface
self.lock = Lock()
self.phy = ""
self.idx = idx
self.monitor_name = "ssmon%d" % self.idx # just a arbitrary, but unique id
self.monitor_added = False
self.debugfs_dir = self._find_debugfs_dir()
if not self.debugfs_dir:
raise Exception, \
'Unable to access spectral_scan_ctl file for interface %s' % interface
self.is_ath10k = self.debugfs_dir.endswith("ath10k")
self.ctl_file = '%s/spectral_scan_ctl' % self.debugfs_dir
self.sample_count_file = '%s/spectral_count' % self.debugfs_dir
self.short_repeat_file = '%s/spectral_short_repeat' % self.debugfs_dir
self.cur_chan = 6
self.sample_count = 8
self.mode = Value('i', -1) # -1 = undef, 1 = 'chanscan', 2 = 'background scan', 3 = 'noninvasive bg scan'
self.channel_mode = "HT20"
self.thread = None
self.file_reader = None
self.noninvasive = False
self.set_freqs(2412, 2472, 5)
def set_freqs(self, minf, maxf, spacing):
self.lock.acquire()
self.freqlist = ['%s' % x for x in range(minf, maxf + spacing, spacing)]
self.lock.release()
# TODO restart scanner
self.freq_idx = 0;
print "freqlist: %s" % self.freqlist
def hw_setup_chanscan(self):
print "enter 'chanscan' mode: set dev type to 'managed'"
os.system("ip link set %s down" % self.interface)
os.system("iw dev %s set type managed" % self.interface)
os.system("ip link set %s up" % self.interface)
if self.is_ath10k:
self.cmd_background()
else:
self.cmd_chanscan()
def hw_setup_background(self):
if self.noninvasive:
self.dev_add_monitor()
else:
print "enter 'background' mode: set dev type to 'monitor'"
os.system("ip link set %s down" % self.interface)
os.system("iw dev %s set type monitor" % self.interface)
os.system("ip link set %s up" % self.interface)
self.cmd_setfreq(0)
self.cmd_background()
self.cmd_trigger()
def mode_chanscan(self):
if self.mode.value != 1:
self.hw_setup_chanscan()
self.mode.value = 1
def mode_background(self):
if self.mode.value != 2:
self.hw_setup_background()
self.mode.value = 2
def mode_manual(self):
self.mode.value = 3
def mode_noninvasive_background(self):
self.noninvasive = True
self.mode_background()
def retune_up(self): # FIXME: not save for 5Ghz / ath10k
if self.mode.value == 1: # tuning not possible in mode 'chanscan'
return
idx = (self.freq_idx + 1) % len(self.freqlist)
print "tune to freq %s" % self.freqlist[idx]
self.fix_ht40_mode()
self.cmd_setfreq(idx)
self.cmd_trigger()
def retune_down(self): # FIXME: not save for 5Ghz / ath10k
if self.mode.value == 1: # tuning not possible in mode 'chanscan'
return
idx = (self.freq_idx - 1) % len(self.freqlist)
print "tune to freq %s" % self.freqlist[idx]
self.fix_ht40_mode()
self.cmd_setfreq(idx)
self.cmd_trigger()
def cmd_samplecount_up(self):
self.sample_count *= 2
if self.sample_count == 256: # special case, 256 is not valid, set to last valid value
self.sample_count = 255
if self.sample_count > 255:
self.sample_count = 1
self.cmd_set_samplecount(self.sample_count)
def cmd_samplecount_down(self):
if self.sample_count == 255:
self.sample_count = 256 # undo special case, see above
self.sample_count /= 2
if self.sample_count < 1:
self.sample_count = 255
self.cmd_set_samplecount(self.sample_count)
def cmd_trigger(self):
f = open(self.ctl_file, 'w')
f.write("trigger")
f.close()
def cmd_background(self):
f = open(self.ctl_file, 'w')
f.write("background")
if self.is_ath10k:
f.write("trigger")
f.close()
def cmd_manual(self):
f = open(self.ctl_file, 'w')
f.write("manual")
f.close()
def cmd_chanscan(self):
f = open(self.ctl_file, 'w')
f.write("chanscan")
f.close()
def cmd_disable(self):
f = open(self.ctl_file, 'w')
f.write("disable")
f.close()
def cmd_set_samplecount(self, count):
print "set sample count to %d" % count
f = open(self.sample_count_file, 'w')
f.write("%s" % count)
f.close()
def set_short_repeat(self, short_repeat):
f = open(self.short_repeat_file, 'w')
f.write("%s" % short_repeat)
f.close()
def cmd_toggle_short_repeat(self):
f = open(self.short_repeat_file, 'r')
curval = int(f.read())
f.close()
if curval == 0:
curval = 1
else:
curval = 0
print "set short repeat to %d" % curval
self.set_short_repeat(curval)
def cmd_setchannel(self):
print "set channel to %d in mode %s" % (self.cur_chan, self.channel_mode)
if not self.noninvasive:
os.system("iw dev %s set channel %d %s" % (self.interface, self.cur_chan, self.channel_mode))
else: # this seems to be strange:
os.system("iw dev %s set channel %d %s" % (self.monitor_name, self.cur_chan, self.channel_mode))
def cmd_setfreq(self, idx):
freq = self.freqlist[idx]
chan = self.freq_to_chan(int(freq))
mode = self.channel_mode
print "set freq to %s (%d) in mode %s" % (freq, chan, mode)
if not self.noninvasive:
os.system("iw dev %s set freq %s %s" % (self.interface, freq, mode))
else: # this seems to be strange:
os.system("iw dev %s set freq %s %s" % (self.monitor_name, freq, mode))
self.freq_idx = idx
def fix_ht40_mode(self):
if self.channel_mode != "HT20":
# see https://wireless.wiki.kernel.org/en/developers/regulatory/processing_rules#mhz_channels1
if self.cur_chan < 8:
self.channel_mode = "HT40+"
else:
self.channel_mode = "HT40-"
def cmd_toggle_HTMode(self):
if self.channel_mode == "HT40+" or self.channel_mode == "HT40-":
self.channel_mode = "HT20"
else: # see https://wireless.wiki.kernel.org/en/developers/regulatory/processing_rules#mhz_channels1
if self.cur_chan < 8:
self.channel_mode = "HT40+"
else:
self.channel_mode = "HT40-"
self.cmd_setchannel()
self.cmd_trigger()
def dev_add_monitor(self):
if self.monitor_added:
return
print "add a monitor interface"
os.system("iw phy %s interface add %s type monitor" % (self.phy, self.monitor_name))
os.system("ip link set %s up" % self.monitor_name)
self.monitor_added = True
def dev_del_monitor(self):
if self.monitor_added:
os.system("ip link set %s down" % self.monitor_name)
os.system("iw dev %s del" % self.monitor_name)
self.monitor_added = False
def start(self):
if self.thread is None:
self.thread = Thread(target=self._scan, args=())
self.run = True
self.thread.start()
def stop(self):
if self.channel_mode != "HT20":
self.cmd_toggle_HTMode()
self.cmd_set_samplecount(8)
self.cmd_disable()
self.dev_del_monitor()
if self.thread is not None:
self.run = False
self.thread.join()
self.thread = None
self.mode.value = -1
def get_debugfs_dir(self):
return self.debugfs_dir
| gpl-2.0 | 7,727,370,529,989,816,000 | 32.680702 | 114 | 0.551307 | false | 3.484211 | false | false | false |
kretusmaximus/MicroFTP.py | microftp/common_view.py | 1 | 2633 | # -*- coding: utf-8 -*-
import Tkinter
import tkFont
class Window(Tkinter.Tk):
def __init__(self, parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
def initialize(self):
pass
class Listbox(Tkinter.Listbox):
def autowidth(self, maxwidth):
f = tkFont.Font(font=self.cget("font"))
pixels = 0
for item in self.get(0, "end"):
pixels = max(pixels, f.measure(item))
# bump listbox size until all entries fit
pixels +=10
width = int(self.cget("width"))
for w in range(0, maxwidth+1, 5):
if self.winfo_reqwidth() >= pixels:
break
self.config(width=width+w)
class CommonFrame(Tkinter.Frame):
"""Common class for displaying UIs"""
def __init__(self, parent, core):
"""
:param parent: the containing Tkinter object
:param core: the MicroFTPCore object
:return:
"""
self.core = core
Tkinter.Frame.__init__(self, parent)
self.pack(expand=1, fill='both')
self.parent = parent
self.controls = {}
self.init_controls()
self.show_controls()
def init_controls(self):
pass
def show_controls(self):
pass
def hide_view(self, widgets_to_destroy):
"""Un-show widgets
:param widgets_to_destroy: list of widgets to .destroy()
:return:
"""
self.hide_controls(widgets_to_destroy)
self.pack_forget()
def hide_controls(self, widgets_to_destroy=None):
"""Hide visible widgets, leaving only blank frame
:param widgets_to_destroy: list of widgets that should be .destroy()ed rather than hidden
:return:
"""
for control in self.controls: # order isn't important
self.controls[control].pack_forget()
if widgets_to_destroy:
for widget in widgets_to_destroy:
widget.destroy()
def show_error(self, error_msg):
"""Hide everything, display an error message and a 'Back' button
:param error_msg: the message to show
:return:
"""
def reset_view(widgets_to_destroy):
self.hide_controls(widgets_to_destroy)
self.show_controls()
error_widgets = []
error_text = Tkinter.Label(self.parent, text=error_msg)
error_text.pack()
error_widgets.append(error_text)
back_button = Tkinter.Button(self.parent, text="Back", command=lambda: reset_view(error_widgets))
back_button.pack()
error_widgets.append(back_button)
| mit | 5,434,954,006,917,956,000 | 27.619565 | 105 | 0.582226 | false | 3.983359 | false | false | false |
distributed-system-analysis/pbench | server/bin/pbench-server-prep-shim-002.py | 1 | 12572 | #!/usr/bin/env python3
# -*- mode: python -*-
# This script is used to prepare the tarballs that a version 002 client
# submits for further processing. It copies the tarballs and their MD5
# sums to the archive (after checking) and sets the state links, so
# that the dispatch script will pick them up and get the ball
# rolling. IOW, it does impedance matching between version 002 clients
# and the server scripts.
import os
import sys
import glob
import shutil
import selinux
import tempfile
from pathlib import Path
from pbench.common.exceptions import BadConfig
from pbench.common.logger import get_pbench_logger
from pbench.common.utils import md5sum
from pbench.server import PbenchServerConfig
from pbench.server.report import Report
from pbench.server.utils import quarantine
from pbench.server.database.models.tracker import Dataset, States, DatasetError
from pbench.server.database import init_db
_NAME_ = "pbench-server-prep-shim-002"
class Results:
def __init__(
self, nstatus="", ntotal=0, ntbs=0, nquarantined=0, ndups=0, nerrs=0,
):
self.nstatus = nstatus
self.ntotal = ntotal
self.ntbs = ntbs
self.nquarantined = nquarantined
self.ndups = ndups
self.nerrs = nerrs
def fetch_config_val(config, logger):
qdir = config.get("pbench-server", "pbench-quarantine-dir")
if not qdir:
logger.error("Failed: getconf.py pbench-quarantine-dir pbench-server")
return None, None
qdir = Path(qdir).resolve()
if not qdir.is_dir():
logger.error("Failed: {} does not exist, or is not a directory", qdir)
return None, None
# we are explicitly handling version-002 data in this shim
receive_dir_prefix = config.get("pbench-server", "pbench-receive-dir-prefix")
if not receive_dir_prefix:
logger.error("Failed: getconf.py pbench-receive-dir-prefix pbench-server")
return None, None
receive_dir = Path(f"{receive_dir_prefix}-002").resolve()
if not receive_dir.is_dir():
logger.error("Failed: {} does not exist, or is not a directory", receive_dir)
return None, None
return (qdir, receive_dir)
def qdirs_check(qdir_val, qdir, logger):
try:
os.makedirs(qdir)
except FileExistsError:
# directory already exists, ignore
pass
except Exception:
logger.exception(
"os.mkdir: Unable to create {} destination directory: {}", qdir_val, qdir,
)
return None
return qdir
def md5_check(tb, tbmd5, logger):
# read the md5sum from md5 file
try:
with tbmd5.open() as f:
archive_md5_hex_value = f.readline().split(" ")[0]
except Exception:
archive_md5_hex_value = None
logger.exception("Quarantine: Could not read {}", tbmd5)
# get hex value of the tarball's md5sum
try:
(_, archive_tar_hex_value) = md5sum(tb)
except Exception:
archive_tar_hex_value = None
logger.exception("Quarantine: Could not read {}", tb)
return (archive_md5_hex_value, archive_tar_hex_value)
def process_tb(config, logger, receive_dir, qdir_md5, duplicates, errors):
# Check for results that are ready for processing: version 002 agents
# upload the MD5 file as xxx.md5.check and they rename it to xxx.md5
# after they are done with MD5 checking so that's what we look for.
list_check = glob.glob(
os.path.join(receive_dir, "**", "*.tar.xz.md5"), recursive=True
)
archive = config.ARCHIVE
logger.info("{}", config.TS)
list_check.sort()
nstatus = ""
ntotal = ntbs = nerrs = nquarantined = ndups = 0
for tbmd5 in list_check:
ntotal += 1
# full pathname of tarball
tb = Path(tbmd5[0:-4])
tbmd5 = Path(tbmd5)
# directory
tbdir = tb.parent
# resultname: get the basename foo.tar.xz and then strip the .tar.xz
resultname = tb.name
controller = tbdir.name
dest = archive / controller
# Create a new dataset tracker in UPLOADING state, and add it to the
# database.
#
# NOTE: Technically, this particular workflow has no "UPLOADING" as
# the `pbench-server-prep-shim-002` command isn't invoked until the
# tarball and MD5 has been entirely uploaded by the agent via `ssh`;
# this method however can't be supported once we have authorized user
# ownership, and the model fits the server `PUT` method where an
# unexpected termination could leave a tarball in "Uploading" state.
#
# TODO: We have no way to identify an owner here, so assign it to
# the arbitrary "pbench" user. This will go away when we drop this
# component entirely in favor of PUT.
try:
dataset = Dataset.create(
controller=controller, path=resultname, owner="pbench"
)
except DatasetError as e:
logger.error(
"Unable to create dataset {}>{}: {}", controller, resultname, str(e)
)
# TODO: Should we quarantine over this? Note it's not quite
# straightforward, as quarantine() expects that the Dataset has
# been created, so we'll get a cascade failure. Since prep-shim's
# days are numbered, I'm inclined not to worry about it here.
dataset = None
if all([(dest / resultname).is_file(), (dest / tbmd5.name).is_file()]):
logger.error("{}: Duplicate: {} duplicate name", config.TS, tb)
quarantine((duplicates / controller), logger, tb, tbmd5)
ndups += 1
continue
archive_tar_hex_value, archive_md5_hex_value = md5_check(tb, tbmd5, logger)
if any(
[
archive_tar_hex_value != archive_md5_hex_value,
archive_tar_hex_value is None,
archive_md5_hex_value is None,
]
):
logger.error("{}: Quarantined: {} failed MD5 check", config.TS, tb)
logger.info("{}: FAILED", tb.name)
logger.info("md5sum: WARNING: 1 computed checksum did NOT match")
quarantine((qdir_md5 / controller), logger, tb, tbmd5)
nquarantined += 1
continue
if dataset:
try:
dataset.md5 = archive_md5_hex_value
dataset.update()
except DatasetError as e:
logger.warn(
"Unable to update dataset {} with md5: {}", str(dataset), str(e)
)
# make the destination directory and its TODO subdir if necessary.
try:
os.makedirs(dest / "TODO")
except FileExistsError:
# directory already exists, ignore
pass
except Exception:
logger.error("{}: Error in creating TODO directory.", config.TS)
quarantine(os.path.join(errors, controller), logger, tb, tbmd5)
nerrs += 1
continue
# First, copy the small .md5 file to the destination. That way, if
# that operation fails it will fail quickly since the file is small.
try:
shutil.copy2(tbmd5, dest)
except Exception:
logger.error(
"{}: Error in copying .md5 file to Destination path.", config.TS
)
try:
os.remove(dest / tbmd5.name)
except FileNotFoundError:
logger.error(
"{}: Warning: cleanup of copy failure failed itself.", config.TS
)
quarantine((errors / controller), logger, tb, tbmd5)
nerrs += 1
continue
# Next, mv the "large" tar ball to the destination. If the destination
# is on the same device, the move should be quick. If the destination is
# on a different device, the move will be a copy and delete, and will
# take a bit longer. If it fails, the file will NOT be at the
# destination.
try:
shutil.move(str(tb), str(dest))
except Exception:
logger.error(
"{}: Error in moving tarball file to Destination path.", config.TS
)
try:
os.remove(dest / resultname)
except FileNotFoundError:
logger.error(
"{}: Warning: cleanup of copy failure failed itself.", config.TS
)
quarantine((errors / controller), logger, tb, tbmd5)
nerrs += 1
continue
# Restore the SELinux context properly
try:
selinux.restorecon(dest / tb.name)
selinux.restorecon(dest / tbmd5.name)
except Exception as e:
# log it but do not abort
logger.error("{}: Error: 'restorecon {}', {}", config.TS, dest / tb.name, e)
# Now that we have successfully moved the tar ball and its .md5 to the
# destination, we can remove the original .md5 file.
try:
os.remove(tbmd5)
except Exception as exc:
logger.error(
"{}: Warning: cleanup of successful copy operation failed: '{}'",
config.TS,
exc,
)
try:
os.symlink((dest / resultname), (dest / "TODO" / resultname))
except Exception as exc:
logger.error("{}: Error in creation of symlink. '{}'", config.TS, exc)
# if we fail to make the link, we quarantine the (already moved)
# tarball and .md5.
quarantine(
(errors / controller), logger, (dest / tb), (dest / tbmd5),
)
nerrs += 1
continue
ntbs += 1
try:
if dataset:
dataset.advance(States.UPLOADED)
except Exception:
logger.exception("Unable to finalize {}", dataset)
nstatus = f"{nstatus}{config.TS}: processed {tb}\n"
logger.info(f"{tb.name}: OK")
return Results(
nstatus=nstatus,
ntotal=ntotal,
ntbs=ntbs,
nquarantined=nquarantined,
ndups=ndups,
nerrs=nerrs,
)
def main(cfg_name):
if not cfg_name:
print(
f"{_NAME_}: ERROR: No config file specified; set"
" _PBENCH_SERVER_CONFIG env variable or use --config <file> on the"
" command line",
file=sys.stderr,
)
return 2
try:
config = PbenchServerConfig(cfg_name)
except BadConfig as e:
print(f"{_NAME_}: {e} (config file {cfg_name})", file=sys.stderr)
return 1
logger = get_pbench_logger(_NAME_, config)
# We're going to need the Postgres DB to track dataset state, so setup
# DB access.
init_db(config, logger)
qdir, receive_dir = fetch_config_val(config, logger)
if qdir is None and receive_dir is None:
return 2
qdir_md5 = qdirs_check("quarantine", Path(qdir, "md5-002"), logger)
duplicates = qdirs_check("duplicates", Path(qdir, "duplicates-002"), logger)
# The following directory holds tarballs that are quarantined because
# of operational errors on the server. They should be retried after
# the problem is fixed: basically, move them back into the reception
# area for 002 agents and wait.
errors = qdirs_check("errors", Path(qdir, "errors-002"), logger)
if qdir_md5 is None or duplicates is None or errors is None:
return 1
counts = process_tb(config, logger, receive_dir, qdir_md5, duplicates, errors)
result_string = (
f"{config.TS}: Processed {counts.ntotal} entries,"
f" {counts.ntbs} tarballs successful,"
f" {counts.nquarantined} quarantined tarballs,"
f" {counts.ndups} duplicately-named tarballs,"
f" {counts.nerrs} errors."
)
logger.info(result_string)
# prepare and send report
with tempfile.NamedTemporaryFile(mode="w+t", dir=config.TMP) as reportfp:
reportfp.write(f"{counts.nstatus}{result_string}\n")
reportfp.seek(0)
report = Report(config, _NAME_)
report.init_report_template()
try:
report.post_status(config.timestamp(), "status", reportfp.name)
except Exception as exc:
logger.warning("Report post Unsuccesful: '{}'", exc)
return 0
if __name__ == "__main__":
cfg_name = os.environ.get("_PBENCH_SERVER_CONFIG")
status = main(cfg_name)
sys.exit(status)
| gpl-3.0 | -8,785,501,480,780,962,000 | 33.443836 | 88 | 0.595291 | false | 3.914072 | true | false | false |
Donkyhotay/MoonPy | zope/rdb/tests/test_gadflyphantom.py | 1 | 2917 | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Gadfly Database adapter phatom tests
$Id: $
"""
__docformat__ = 'restructuredtext'
import os, shutil
import tempfile, threading
from unittest import TestCase, TestSuite, main, makeSuite
from zope.rdb.gadflyda import GadflyAdapter, setGadflyRoot
class GadflyTestBase(TestCase):
def setUp(self):
TestCase.setUp(self)
self.tempdir = None
def tearDown(self):
TestCase.tearDown(self)
if self.tempdir:
shutil.rmtree(self.tempdir)
setGadflyRoot()
def getGadflyRoot(self):
if not self.tempdir:
self.tempdir = tempfile.mkdtemp('gadfly')
setGadflyRoot(self.tempdir)
return self.tempdir
def _create(self, *args):
return GadflyAdapter(*args)
def exec_sql(adapter, sql, args, fetch=False):
conn = adapter()
cur =conn.cursor()
cur.execute(sql, args)
rows = []
if fetch:
rows = cur.fetchall()
conn.commit()
return rows
class TestPhantom(GadflyTestBase):
def setUp(self):
GadflyTestBase.setUp(self)
dir = self.getGadflyRoot()
os.mkdir(os.path.join(dir, "demo"))
self.adapter = self._create("dbi://demo")
conn = self.adapter()
cur = conn.cursor()
cur.execute("create table t1 (name varchar)")
conn.commit()
def test_Phantom(self):
adapter = self.adapter
insert = "insert into t1 values (?)"
select = "select name from t1"
delete = "delete from t1"
count = 0
for name in ('a', 'b', 'c'):
t = threading.Thread(target=exec_sql,
args=(adapter, insert, (name,)))
t.start()
t.join()
rows = exec_sql(adapter, select, args=(), fetch=True)
count += 1
self.assertEqual(len(rows), count)
exec_sql(adapter, delete, args=())
t = threading.Thread(target=exec_sql,
args=(adapter, delete, ()))
t.start()
t.join()
rows = exec_sql(adapter, select, args=(), fetch=True)
self.assertEqual(len(rows), 0)
def test_suite():
return TestSuite((
makeSuite(TestPhantom),
))
if __name__=='__main__':
main(defaultTest='test_suite')
| gpl-3.0 | -2,021,767,907,325,298,700 | 27.881188 | 78 | 0.57182 | false | 4.001372 | true | false | false |
ArcherSys/ArcherSys | Lib/test/testcodec.py | 1 | 3278 | <<<<<<< HEAD
<<<<<<< HEAD
""" Test Codecs (used by test_charmapcodec)
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x78: "abc", # 1-n decoding mapping
b"abc": 0x0078,# 1-n encoding mapping
0x01: None, # decoding mapping to <undefined>
0x79: "", # decoding mapping to <remove character>
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
=======
""" Test Codecs (used by test_charmapcodec)
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x78: "abc", # 1-n decoding mapping
b"abc": 0x0078,# 1-n encoding mapping
0x01: None, # decoding mapping to <undefined>
0x79: "", # decoding mapping to <remove character>
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
""" Test Codecs (used by test_charmapcodec)
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x78: "abc", # 1-n decoding mapping
b"abc": 0x0078,# 1-n encoding mapping
0x01: None, # decoding mapping to <undefined>
0x79: "", # decoding mapping to <remove character>
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | 195,848,417,422,403,780 | 20.853333 | 68 | 0.680293 | false | 3.41103 | true | false | false |
piyushroshan/tuxofwar2012 | questiondb.py | 1 | 1400 | # Model Defining Questions Database
import string
from google.appengine.ext import db
class questionm(db.Model):
questionNumber = db.IntegerProperty(required=True)
question = db.StringProperty(required=True, multiline=True)
qimage = db.StringProperty()
opt1 = db.StringProperty(required=True, multiline=True)
opt2 = db.StringProperty(required=True, multiline=True)
opt3 = db.StringProperty(required=True, multiline=True)
opt4 = db.StringProperty(required=True, multiline=True)
ans = db.StringProperty(required=True)
def getQuestion(num,var):
query = questionm.all()
q = query.filter('questionNumber =',num).get()
if q:
return ("{"+
"\"num\" : " + "\""+ str(var) +"\""+","+
"\"question\" : "+"\""+q.question.replace('\r\n','<br />')+"\""+","+
"\"image\" : "+"\""+q.qimage+"\""+","+
"\"options\" : " + "["+
"\""+q.opt1.replace('\r\n','<br />')+"\""+","+
"\""+q.opt2.replace('\r\n','<br />')+"\""+","+
"\""+q.opt3.replace('\r\n','<br />')+"\""+","+
"\""+q.opt4.replace('\r\n','<br />')+"\""+
"]"+
"}")
else:
return ("{"+
"\"num\" : " + "\""+"\""+","+
"\"question\" : "+"\""+"Sorry question not found. We'll fix it Soon"+"\""+","+
"\"image\" : "+"\""+"\""+","+
"\"options\" : " + "["+
"\""+""+"\""+","+
"\""+""+"\""+","+
"\""+""+"\""+","+
"\""+""+"\""+
"]"+
"}")
| gpl-2.0 | -2,501,730,747,631,768,600 | 32.333333 | 81 | 0.487857 | false | 2.755906 | false | false | false |
playpauseandstop/setman | setman/utils/importlib.py | 1 | 1408 | """
Backported from `importlib <http://pypi.python.org/pypi/importlib>` library,
which itself backported from Python 3.x branch.
"""
# While not critical (and in no way guaranteed!), it would be nice to keep this
# code compatible with Python 2.3.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| bsd-3-clause | -9,140,119,519,811,707,000 | 33.341463 | 79 | 0.617898 | false | 4.372671 | false | false | false |
authman/Python201609 | Nguyen_Ken/Assignments/Flask/registration_form/server.py | 1 | 1721 | from flask import Flask, render_template, request, redirect, session, flash
import re
app = Flask(__name__)
app.secret_key = 'secretsquirrel'
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/process', methods=['POST'])
def process():
session['email'] = request.form['email']
session['first_name'] = request.form['first_name']
session['last_name'] = request.form['last_name']
session['password'] = request.form['password1']
session['confirm_password'] = request.form['password2']
if len(session['email']) < 1:
flash('Please enter your email', 'error')
elif not EMAIL_REGEX.match(session['email']):
flash('That is not a valid email address', 'error')
elif len(session['first_name']) < 1:
flash('Please enter your first name', 'error')
elif not session['first_name'].isalpha():
flash('Your name cannot contain numbers or special characters', 'error')
elif len(session['last_name']) < 1:
flash('Please enter your last name', 'error')
elif not session['last_name'].isalpha():
flash('Your name cannot contain numbers or special characters', 'error')
elif len(session['password']) < 1:
flash('Please enter a password', 'error')
elif len(session['password']) < 8:
flash('Your password must be greater than 8 characters', 'error')
elif not session['confirm_password'] == session['password']:
flash('Your password does not match!', 'error')
else:
flash('Thanks for submitting your information', 'success')
return redirect('/')
app.run(debug=True)
| mit | 6,433,111,740,301,173,000 | 27.683333 | 80 | 0.632191 | false | 3.799117 | false | false | false |
altova/sec-edgar-tools | sec_filing_to_xlsx.py | 1 | 11948 | # Copyright 2015 Altova GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__copyright__ = "Copyright 2015-2017 Altova GmbH"
__license__ = 'http://www.apache.org/licenses/LICENSE-2.0'
# This script generates Excel reports from a SEC EDGAR filing.
# NOTE: You must first download the source code of the 3rd party Python module xlsxwriter from https://pypi.python.org/pypi/XlsxWriter
# and extract the xslxwriter folder in the archive to the lib/python3.4 subfolder of the RaptorXML server installation directory.
#
# Example invocation:
# raptorxmlxbrl valxbrl --script=sec_filing_to_xlsx.py nanonull.xbrl
import os, datetime, itertools
from altova import *
try:
import xlsxwriter
except:
raise ImportError('Please install the 3rd party python module xlsxwrite from https://pypi.python.org/pypi/XlsxWriter')
lang='en-US'
formats = {}
def isPeriodStart(role):
return role in (
'http://www.xbrl.org/2003/role/periodStartLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodStart',
'http://www.xbrl.org/2009/role/negatedPeriodStartLabel'
)
def isPeriodEnd(role):
return role in (
'http://www.xbrl.org/2003/role/periodEndLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodEnd',
'http://www.xbrl.org/2009/role/negatedPeriodEndLabel'
)
def isTotal(role):
return role in (
'http://www.xbrl.org/2003/role/totalLabel',
'http://xbrl.us/us-gaap/role/label/negatedTotal',
'http://www.xbrl.org/2009/role/negatedTotalLabel'
)
def isNegated(role):
return role in (
'http://xbrl.us/us-gaap/role/label/negated',
'http://www.xbrl.org/2009/role/negatedLabel',
'http://www.xbrl.org/2009/role/negatedNetLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodEnd',
'http://www.xbrl.org/2009/role/negatedPeriodEndLabel',
'http://xbrl.us/us-gaap/role/label/negatedPeriodStart',
'http://www.xbrl.org/2009/role/negatedPeriodStartLabel',
'http://www.xbrl.org/2009/role/negatedTerseLabel',
'http://xbrl.us/us-gaap/role/label/negatedTotal',
'http://www.xbrl.org/2009/role/negatedTotalLabel'
)
def domainMembersFromPresentationTreeRecursive(network,parent,domain_members):
for rel in network.relationships_from(parent):
domain_members.append(rel.target)
domainMembersFromPresentationTreeRecursive(network,rel.target,domain_members)
def conceptsFromPresentationTreeRecursive(network,parent,concepts):
for rel in network.relationships_from(parent):
if not rel.target.abstract:
concepts.append((rel.target,rel.preferred_label))
conceptsFromPresentationTreeRecursive(network,rel.target,concepts)
def analyzePresentationTree(network,roots):
concepts = []
dimensions = {}
for rel in network.relationships_from(roots[0]):
if isinstance(rel.target,xbrl.xdt.Hypercube):
for rel2 in network.relationships_from(rel.target):
if isinstance(rel2.target,xbrl.xdt.Dimension):
domainMembersFromPresentationTreeRecursive(network,rel2.target,dimensions.setdefault(rel2.target,[]))
else:
conceptsFromPresentationTreeRecursive(network,rel2.target,concepts)
else:
conceptsFromPresentationTreeRecursive(network,rel.target,concepts)
return concepts, dimensions
def calcTableData(instance,role,contexts,concepts,dimensions):
table = {'columns': [], 'height': len(concepts)}
bIsCashFlow = 'cash' in role[1].lower() and 'flow' in role[1].lower()
for context in contexts:
cs = xbrl.ConstraintSet(context)
period = cs[xbrl.Aspect.PERIOD]
dimension_aspects = [value for aspect,value in cs.items() if isinstance(aspect,xbrl.xdt.Dimension)]
bEliminate = False
for val in dimension_aspects:
domain = dimensions.get(val.dimension,None)
if not domain or val.value not in domain:
bEliminate = True
for dim in set(dimensions.keys())-set([value.dimension for value in dimension_aspects]):
if dim.default_member and dim.default_member not in dimensions[dim]:
bEliminate = True
if bEliminate:
continue
bEmpty = True
bHasCash = False
column = {'period': period, 'dimensions': dimension_aspects, 'rows': []}
for concept in concepts:
cs[xbrl.Aspect.CONCEPT] = concept[0]
if isPeriodStart(concept[1]):
if period.period_type == xbrl.PeriodType.START_END:
cs[xbrl.Aspect.PERIOD] = xbrl.PeriodAspectValue.from_instant(period.start)
else:
column['rows'].append({'concept': concept, 'facts': xbrl.FactSet()})
continue
elif isPeriodEnd(concept[1]):
if period.period_type == xbrl.PeriodType.START_END:
cs[xbrl.Aspect.PERIOD] = xbrl.PeriodAspectValue.from_instant(period.end)
else:
column['rows'].append({'concept': concept, 'facts': xbrl.FactSet()})
continue
else:
cs[xbrl.Aspect.PERIOD] = period
facts = instance.facts.filter(cs,allow_additional_dimensions=False)
if len(facts):
bEmpty = False
if bIsCashFlow and not bHasCash and concept[0].is_duration():
bHasCash = 'cash' in next(iter(concept[0].labels(label_role=concept[1],lang=lang))).text.lower()
column['rows'].append({'concept': concept, 'facts': facts})
if not bEmpty and (not bIsCashFlow or bHasCash):
table['columns'].append(column)
return table
def formatConcept(concept):
preferredLabel = concept[1] if concept[1] else 'http://www.xbrl.org/2003/role/label'
labels = list(concept[0].labels(label_role=preferredLabel,lang=lang))
if labels:
return labels[0].text
return str(concept[0].qname)
def formatPeriod(period):
if period.period_type == xbrl.PeriodType.INSTANT:
return period.instant.strftime('%d. %B %Y')
elif period.period_type == xbrl.PeriodType.START_END:
return '%s to %s' % (period.start.strftime('%d. %B %Y'), period.end.strftime('%d. %B %Y'))
elif period.period_type == xbrl.PeriodType.FOREVER:
return 'Forever'
return ''
def formatDimensionValue(dimValue):
return formatConcept((dimValue.value,'http://www.xbrl.org/2003/role/terseLabel'))
def formatFact(dts,fact,preferredLabel=None):
if fact.xsi_nil:
return ('#N/A',None)
elif fact.concept.is_numeric():
if fact.concept.is_fraction():
val = fact.effective_fraction_value
else:
val = fact.effective_numeric_value
if isNegated(preferredLabel):
val *= -1
if fact.concept.is_monetary():
if isTotal(preferredLabel):
return (val,formats['monetary_total'])
return (val,formats['monetary'])
return (val,None)
elif fact.concept.is_qname():
concept = dts.resolve_concept(fact.qname_value)
if concept:
for label in concept.labels():
return (label.text,None)
return (str(fact.qname_value),None)
else:
return (fact.normalized_value,None)
def getDuration(column):
p = column['period']
if p.period_type == xbrl.PeriodType.INSTANT:
return 0
return (p.end.year - p.start.year) * 12 + p.end.month - p.start.month
def getEndDate(column):
p = column['period']
if p.period_type == xbrl.PeriodType.INSTANT:
return p.instant
return p.end
def generateTable(workbook, dts, role, table):
columns = sorted(table['columns'],key=lambda x: (-getDuration(x),getEndDate(x)),reverse=True)
worksheet = workbook.add_worksheet(role[1].split(' - ')[0])
worksheet.set_column(0,0,70)
worksheet.set_column(1,1+len(table['columns']),20)
worksheet.write(0,0,role[1].split(' - ')[2],formats['caption'])
col = 1
row_start = 1
for duration, group in itertools.groupby(columns,key=getDuration):
cols = list(group)
if duration > 0:
if len(cols) > 1:
worksheet.merge_range(0,col,0,col+len(cols)-1,'%d Months Ended' % getDuration(cols[0]),formats['center'])
else:
worksheet.write(0,col,'%d Months Ended' % getDuration(cols[0]),formats['center'])
row = 1
else:
row = 0
for column in cols:
worksheet.write(row,col,getEndDate(column)-datetime.timedelta(days=1),formats['date'])
for i, dimValue in enumerate(column['dimensions']):
dimLabel = formatDimensionValue(dimValue)
if '[Domain]' not in dimLabel:
worksheet.write(row+1+i,col,dimLabel)
col += 1
row_start = max(row_start,row+2+len(column['dimensions']))
for row in range(table['height']):
concept = columns[0]['rows'][row]['concept']
worksheet.write(row_start+row,0,formatConcept(concept),formats['header'])
for col, column in enumerate(columns):
for fact in column['rows'][row]['facts']:
worksheet.write(row_start+row,1+col,*formatFact(dts,fact,concept[1]))
footnotes = [footnote.text for footnote in fact.footnotes(lang=lang)]
if footnotes:
worksheet.write_comment(row_start+row,1+col,'\n'.join(footnotes),{'x_scale':5,'y_scale':2})
def generateTables(path, dts, instance):
global formats
workbook = xlsxwriter.Workbook(path)
formats['center'] = workbook.add_format({'align':'center'})
formats['caption'] = workbook.add_format({'text_wrap':True,'bold':True})
formats['header'] = workbook.add_format({'text_wrap':True})
formats['date'] = workbook.add_format({'num_format':'mmm. d, yyyy','bold':True})
formats['monetary'] = workbook.add_format({'num_format': '#,##0_);[Red](#,##0)'})
formats['monetary_total'] = workbook.add_format({'num_format': '#,##0_);[Red](#,##0)', 'underline':33})
# Calculate table data
tables = {}
contexts = list(instance.contexts)
roles = [(role, dts.role_type(role).definition.value) for role in dts.presentation_link_roles()]
roles = sorted(roles, key=lambda role: role[1].split(' - ')[0])
for role in roles:
presentation_network = dts.presentation_base_set(role[0]).network_of_relationships()
roots = list(presentation_network.roots)
tables[role] = calcTableData(instance,role,contexts,*analyzePresentationTree(presentation_network,roots))
# Generate excel sheet for each non-empty table
for role in roles:
if tables[role]['columns']:
generateTable(workbook, dts, role, tables[role])
workbook.close()
# Main entry point, will be called by RaptorXML after the XBRL instance validation job has finished
def on_xbrl_finished(job, instance):
# instance object will be None if XBRL 2.1 validation was not successful
if instance:
path = os.path.join(job.output_dir,'table.xlsx')
generateTables(path, instance.dts, instance)
# Register new output file with RaptorXML engine
job.append_output_filename(path) | apache-2.0 | -8,564,099,729,764,443,000 | 42.286232 | 134 | 0.640214 | false | 3.601447 | false | false | false |
Kyly/mustaske | test/selenium_src/leave_room.py | 1 | 3018 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class LeaveRoom(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:3000/"
self.verificationErrors = []
self.accept_next_alert = True
def test_leave_room(self):
driver = self.driver
driver.get(self.base_url)
driver.find_element_by_css_selector("input.form-control").clear()
driver.find_element_by_css_selector("input.form-control").send_keys("Test_Room")
driver.find_element_by_id("make-room").click()
ownerRoomName = driver.find_element_by_css_selector("span.navbar-brand.room-name").text
self.assertEqual("Test_Room",ownerRoomName)
driver.find_element_by_css_selector("span.fa.fa-cogs").click()
ownerRoomID = driver.find_element_by_class_name("drop-down-room-id").text
driver.execute_script("$(window.open('"+self.base_url+"'))")
driver.switch_to_window(driver.window_handles[-1])
driver.find_element_by_css_selector("input.form-control").clear()
driver.find_element_by_css_selector("input.form-control").send_keys(ownerRoomID)
driver.find_element_by_id("join-room").click()
audienceRoomName = driver.find_element_by_css_selector("span.navbar-brand.room-name").text
self.assertEqual(ownerRoomName,audienceRoomName)
driver.find_element_by_css_selector("span.fa.fa-cogs").click()
audienceRoomID = driver.find_element_by_class_name("drop-down-room-id").text
self.assertEqual(ownerRoomID,audienceRoomID)
driver.find_element_by_xpath("//li/ul/li[4]/a/span").click()
try: self.assertTrue(self.is_element_present(By.ID, "join-create-room"))
except AssertionError as e: self.verificationErrors.append(str(e))
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| mit | 4,663,683,868,890,262,000 | 41.507042 | 98 | 0.661034 | false | 3.721332 | true | false | false |
jayvdb/flake8-copyright | setup.py | 1 | 1860 | # -=- encoding: utf-8 -=-
#
# Copyright (C) 2014 Savoir-faire Linux Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from setuptools import setup
def get_version(fname='flake8_copyright.py'):
with open(fname) as f:
for line in f:
if line.startswith('__version__'):
return eval(line.split('=')[-1])
def get_long_description():
descr = []
for fname in ('README.rst',):
with open(fname) as f:
descr.append(f.read())
return '\n\n'.join(descr)
setup(
name='flake8-copyright',
version=get_version(),
description='Adds copyright checks to flake8',
long_description=get_long_description(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
author='Virgil Dupras',
author_email='[email protected]',
url='https://github.com/savoirfairelinux/flake8-copyright',
keywords='pep8 flake8 copyright',
py_modules=['flake8_copyright'],
install_requires=[
'setuptools',
],
entry_points={
'flake8.extension': ['flake8_copyright = flake8_copyright:CopyrightChecker'],
},
)
| gpl-3.0 | 3,025,203,135,656,725,500 | 30.525424 | 85 | 0.667204 | false | 3.835052 | false | false | false |
mohitsethi/solum | solum/api/controllers/v1/extension.py | 1 | 2846 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
import six
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from solum.api.controllers import common_types
from solum.api.controllers.v1 import types as api_types
from solum.openstack.common.gettextutils import _
class Extension(api_types.Base):
"""The Extension resource represents changes that the Provider has added
onto a Platform in addition to the ones supplied by Solum by default.
This may include additional protocol semantics, resource types,
application lifecycle states, resource attributes, etc. Anything may be
added, as long as it does not contradict the base functionality offered
by Solum.
"""
version = wtypes.text
"Version of the extension."
documentation = common_types.Uri
"Documentation URI to the extension."
@classmethod
def sample(cls):
return cls(uri='http://example.com/v1/extensions/mysql',
name='mysql',
type='extension',
tags=['large'],
project_id='1dae5a09ef2b4d8cbf3594b0eb4f6b94',
user_id='55f41cf46df74320b9486a35f5d28a11',
description='A mysql extension',
version='2.13',
documentation='http://example.com/docs/ext/mysql')
class ExtensionController(rest.RestController):
"""Manages operations on a single extension."""
def __init__(self, extension_id):
pecan.request.context['extension_id'] = extension_id
self._id = extension_id
@wsme_pecan.wsexpose(Extension, wtypes.text)
def get(self):
"""Return this extension."""
error = _("Not implemented")
pecan.response.translatable_error = error
raise wsme.exc.ClientSideError(six.text_type(error))
class ExtensionsController(rest.RestController):
"""Manages operations on the extensions collection."""
@pecan.expose()
def _lookup(self, extension_id, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
return ExtensionController(extension_id), remainder
@wsme_pecan.wsexpose([Extension])
def get_all(self):
"""Return all extensions, based on the query provided."""
return []
| apache-2.0 | -1,911,150,574,673,588,200 | 34.575 | 76 | 0.682713 | false | 4.112717 | false | false | false |
zhaogaolong/oneFinger | openstack/api/opentack_ansible.py | 1 | 1743 | #!/usr/bin/env python
# coding:utf8
# import pdb
import ansible.runner
from one_finger.cloud_logging import cloud_logging as logging
log = logging.logger
class CmmAndRun():
def __init__(self, module_name='shell', host=None, cmd=None, timeout=20):
self.username = 'root'
self.module_name = module_name
self.host = host
self.cmd = cmd
self.timeout = timeout
self.update_ansible_hosts()
def update_ansible_hosts(self):
status = False
b = open('/etc/ansible/hosts')
for line in b.readlines():
if self.host in line:
status = True
b.close()
if not status:
b = open('/etc/ansible/hosts','a')
b.writelines(self.host)
b.writelines('\n')
b.close()
def start(self):
runner = ansible.runner.Runner(
module_name=self.module_name,
module_args=self.cmd,
pattern=self.host,
timeout=self.timeout,
)
log.debug('ansible %s RunCommand: %s' % (self.host, self.cmd))
# import pdb
# pdb.set_trace()
datastructure = runner.run()
# print datastructure
log.debug('ansible sttout %s' % datastructure)
# print datastructure
if datastructure['dark']:
pass
else:
if not datastructure['contacted'][self.host]['rc']:
data = datastructure['contacted'][self.host]['stdout']
return data
else:
return None
if __name__ == '__main__':
ac = CmmAndRun(host='172.16.254.1', cmd='date')
print ac.start() | apache-2.0 | -7,011,476,624,317,718,000 | 24.846154 | 77 | 0.522088 | false | 4.062937 | false | false | false |
moiseshiraldo/inviMarket | inviMarket/views/register.py | 1 | 2814 | # -*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.core.mail import send_mail
from django.conf import settings
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from django.utils import timezone
from django.utils.translation import ugettext as _
import hashlib
import datetime
import random
from inviMarket.models import Profile
from inviMarket.forms import RegisterForm
def register(request):
"""
Display the user registration form and store the :model:`auth.User` and
his :model:`inviMarket.Profile` in the database.
**Context**
``form``
An instace of the user registration form.
``error``
A string variable containing any general error message.
**Template:**
:template:`inviMarket/register.html`
"""
error = None
if request.user.is_authenticated():
return redirect('index')
if request.method == 'POST':
form = RegisterForm(request.POST)
if 'terms' not in request.POST:
error= _("You must read and accept the terms and conditions.")
elif form.is_valid():
if form.cleaned_data['last_name'] != "":
return redirect('confirm')
new_user = form.save()
# Create a random activation key and store it in the user profile
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(salt+new_user.email).hexdigest()
key_expires = timezone.now() + datetime.timedelta(2)
lang = request.LANGUAGE_CODE
profile = Profile(user=new_user, activation_key=activation_key,
key_expires=key_expires, lang=lang, last_visit=timezone.now())
profile.save()
# Send the activation key to the user
text = render_to_string('email/activation.txt',
{'name': new_user.first_name,
'uidb64': urlsafe_base64_encode(force_bytes(new_user.id)),
'key': activation_key,
'domain': settings.DOMAIN,
})
html = render_to_string('email/activation.html',
{'name': new_user.first_name,
'uidb64': urlsafe_base64_encode(force_bytes(new_user.id)),
'key': activation_key,
'domain': settings.DOMAIN,
})
subject = "Account activation"
send_mail(subject, text, "inviMarket <[email protected]>",
[new_user.email], html_message=html,fail_silently=False)
return redirect('confirm')
else:
form = RegisterForm()
return render(request, 'register.html', {'form': form, 'error': error}) | agpl-3.0 | -5,880,959,783,419,259,000 | 36.533333 | 78 | 0.615849 | false | 4.1875 | false | false | false |
scealiontach/cryptotrading | src/autotrader/hashnest.py | 1 | 4020 | import urllib,urllib.parse,urllib.request,urllib.error
import json
import time,datetime
import hashlib,hmac,base64
import logging
LOG=logging.getLogger(__name__)
#SELL='sale'
#BUY='purchase'
class hashnest(object):
URL = 'https://www.hashnest.com/api/v1/'
def __init__(self,username,key,secret):
self.username=username
self.key=key
self.secret=secret
def get_nonce(self):
self.utcnow=a=datetime.datetime.utcnow()
b=datetime.datetime(1970,1,1,0,0,0,0)
self.nonce= int((a-b).total_seconds()*1000)
return self.nonce
def signature(self,req):
nonce=self.get_nonce()
message = str(nonce) + self.username + self.key
req['access_key']=self.key
req['nonce']=nonce
req['signature']= hmac.new(self.secret.encode(), msg=message.encode(), digestmod=hashlib.sha256).hexdigest()
return urllib.parse.urlencode(req)
def request(self,url,req={}):
url = self.URL + url
data= self.signature(req)
url=url+'?'+data
req = urllib.request.Request(url, method='POST')
retry=True
retry_count=0
while retry:
try:
with urllib.request.urlopen(req) as resp:
r=resp.read()
retObj=json.loads(r.decode())
time.sleep(1)
retry=False
except urllib.error.HTTPError as e:
if e.code!=401:
raise e
else:
if retry_count<10:
time.sleep(retry_count**2)
retry_count+=1
else:
raise e
pass
pass
pass
pass
return retObj
def get_account_info(self):
return self.request('account')
def get_account_balance(self):
return self.request('currency_accounts')
def get_account_hashrate(self):
return self.request('hash_accounts')
def get_account_orders(self,cmi):
param={'currency_market_id':cmi}
return self.request('orders/active',param)
def get_account_trade_history(self,cmi,page=1,page_amount=10):
param={'currency_market_id':cmi}
param['page']=page
param['page_per_amount']=page_amount
return self.request('orders/history',param)
def create_order(self,cmi,amount,ppc,category):
param={'currency_market_id':cmi}
param['amount']=amount
param['ppc']=ppc
param['category']=category
return self.request('orders',param)
def cancel_order(self,order_id):
param={'order_id':order_id}
return self.request('orders/revoke',param)
def cancel_all_orders(self,cmi,category):
param={'currency_market_id':cmi}
param['category']=category
return self.request('orders/quick_revoke',param)
def get_opened_markets(self):
return self.request('currency_markets')
def get_book(self,cmi):
param={'currency_market_id':cmi}
return self.request('currency_markets/orders',param)
def get_trade_history(self,cmi,page=1,page_amount=10):
param={'currency_market_id':cmi}
param['page']=page
param['page_per_amount']=page_amount
param['page_size']=page_amount
return self.request('currency_markets/order_history',param)
def pretty_print_POST(req):
"""
At this point it is completely built and ready
to be fired; it is "prepared".
However pay attention at the formatting used in
this function because it is programmed to be pretty
printed and may differ from the actual request.
"""
return '{}\n{}\n{}\n\n{}'.format(
'-----------START-----------',
req.get_method() + ' ' + req.get_full_url(),
'\n'.join('{}: {}'.format(k, v) for k, v in req.header_items()),
req.data,
)
| apache-2.0 | -3,889,870,154,564,766,000 | 29.687023 | 116 | 0.568159 | false | 3.880309 | false | false | false |
foxmask/django-th | django_th/html_entities.py | 1 | 1275 | # coding: utf-8
import html.entities as htmlentities
import re
class HtmlEntities:
def __init__(self, my_string):
self.my_string = my_string
def html_entity_decode_char(self, m, defs=htmlentities.entitydefs):
"""
decode html entity into one of the html char
"""
try:
char = defs[m.group(1)]
return "&{char};".format(char=char)
except ValueError:
return m.group(0)
except KeyError:
return m.group(0)
def html_entity_decode_codepoint(self, m,
defs=htmlentities.codepoint2name):
"""
decode html entity into one of the codepoint2name
"""
try:
char = defs[m.group(1)]
return "&{char};".format(char=char)
except ValueError:
return m.group(0)
except KeyError:
return m.group(0)
@property
def html_entity_decode(self):
"""
entry point of this set of tools
to decode html entities
"""
pattern = re.compile(r"&#(\w+?);")
string = pattern.sub(self.html_entity_decode_char, self.my_string)
return pattern.sub(self.html_entity_decode_codepoint, string)
| bsd-3-clause | 8,504,328,862,743,241,000 | 27.977273 | 74 | 0.545098 | false | 4.086538 | false | false | false |
cihai/cihai-python | tests/test_conversion.py | 1 | 3543 | # -*- coding: utf-8 -*-
"""Tests for cihai.
test.conversion
~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import, print_function, unicode_literals
from cihai import conversion
from cihai._compat import string_types, text_type
def test_text_type():
c1 = '(same as U+7A69 穩) firm; stable; secure'
c2 = text_type()
assert isinstance(c1, string_types)
assert isinstance(c2, text_type)
"""Return UCN character from Python Unicode character.
Converts a one character Python unicode string (e.g. u'\\u4e00') to the
corresponding Unicode UCN ('U+4E00').
U+369D kSemanticVariant U+595E<kMatthews U+594E<kMatthews
U+3CE2 kTraditionalVariant U+23FB7
U+3FF7 kSemanticVariant U+7CD9<kMatthews,kMeyerWempe
U+345A kDefinition (non-classical form of 那) that, there
U+349A kDefinition (same as U+7A69 穩) firm; stable; secure,
dependent upon others
U+34B5 kMandarin mào
U+356D kCantonese au3 jaau1
"""
def test_ucn_from_unicode():
text = '一'
python_unicode = u'\u4e00'
expected = "U+4E00"
bytes_expected = b"U+4E00"
assert conversion.python_to_ucn(python_unicode) == expected
assert isinstance(conversion.python_to_ucn(python_unicode), text_type)
assert isinstance(conversion.python_to_ucn(python_unicode, as_bytes=True), bytes)
assert conversion.python_to_ucn(text, as_bytes=True) == bytes_expected
def test_ucn_from_unicode_16():
text = '𦄀'
python_unicode = u'\u26100'
expected = "U+26100"
bytes_expected = b"U+26100"
assert conversion.python_to_ucn(python_unicode) == expected
assert isinstance(conversion.python_to_ucn(python_unicode), text_type)
assert isinstance(conversion.python_to_ucn(python_unicode, as_bytes=True), bytes)
assert conversion.python_to_ucn(text, as_bytes=True) == bytes_expected
def test_ucn_to_unicode():
before = 'U+4E00'
expected = '\u4e00'
result = conversion.ucn_to_unicode(before)
assert result == expected
assert isinstance(result, text_type)
# wide character
before = 'U+20001'
expected = '\U00020001'
result = conversion.ucn_to_unicode(before)
assert result == expected
assert isinstance(result, text_type)
before = '(same as U+7A69 穩) firm; stable; secure'
expected = '(same as 穩 穩) firm; stable; secure'
result = conversion.ucnstring_to_unicode(before)
assert result == expected
assert isinstance(result, text_type)
"""Return EUC character from a Python Unicode character.
Converts a one character Python unicode string (e.g. u'\\u4e00') to the
corresponding EUC hex ('d2bb').
"""
def test_hexd():
assert conversion.hexd(0xFFFF) == 'ffff'
def test_euc_from_unicode():
expected = '一' # u'\u4e00'
euc_bytestring = b'd2bb'
euc_unicode = 'd2bb'
result = conversion.python_to_euc(expected, as_bytes=True)
assert euc_bytestring == result
assert isinstance(result, bytes)
result = conversion.python_to_euc(expected)
assert euc_unicode == result
assert isinstance(result, text_type)
def test_euc_to_utf8():
expected = '一'
euc_bytestring = b'b0ec'
result = conversion.euc_to_utf8(euc_bytestring)
assert expected == result
def test_euc_to_unicode():
expected = '一'
expected_ustring = u'\u4e00'
euc_bytestring = b'd2bb'
result = conversion.euc_to_unicode(euc_bytestring)
assert expected == expected_ustring
assert isinstance(result, text_type)
assert expected == result
assert expected_ustring == result
| bsd-3-clause | 5,327,725,858,940,054,000 | 23.608392 | 85 | 0.687411 | false | 3.089552 | true | false | false |
sajeeshcs/nested_projects_keystone | keystone/credential/controllers.py | 1 | 4503 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from keystone.common import controller
from keystone.common import dependency
from keystone.common import validation
from keystone.credential import schema
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import jsonutils
@dependency.requires('credential_api')
class CredentialV3(controller.V3Controller):
collection_name = 'credentials'
member_name = 'credential'
def __init__(self):
super(CredentialV3, self).__init__()
self.get_member_from_driver = self.credential_api.get_credential
def _assign_unique_id(self, ref, trust_id=None):
# Generates and assigns a unique identifier to
# a credential reference.
if ref.get('type', '').lower() == 'ec2':
try:
blob = jsonutils.loads(ref.get('blob'))
except (ValueError, TypeError):
raise exception.ValidationError(
message=_('Invalid blob in credential'))
if not blob or not isinstance(blob, dict):
raise exception.ValidationError(attribute='blob',
target='credential')
if blob.get('access') is None:
raise exception.ValidationError(attribute='access',
target='blob')
ret_ref = ref.copy()
ret_ref['id'] = hashlib.sha256(blob['access']).hexdigest()
# Update the blob with the trust_id, so credentials created
# with a trust scoped token will result in trust scoped
# tokens when authentication via ec2tokens happens
if trust_id is not None:
blob['trust_id'] = trust_id
ret_ref['blob'] = jsonutils.dumps(blob)
return ret_ref
else:
return super(CredentialV3, self)._assign_unique_id(ref)
@controller.protected()
@validation.validated(schema.credential_create, 'credential')
def create_credential(self, context, credential):
trust_id = self._get_trust_id_for_request(context)
ref = self._assign_unique_id(self._normalize_dict(credential),
trust_id)
ref = self.credential_api.create_credential(ref['id'], ref)
return CredentialV3.wrap_member(context, ref)
@staticmethod
def _blob_to_json(ref):
# credentials stored via ec2tokens before the fix for #1259584
# need json serializing, as that's the documented API format
blob = ref.get('blob')
if isinstance(blob, dict):
new_ref = ref.copy()
new_ref['blob'] = jsonutils.dumps(blob)
return new_ref
else:
return ref
@controller.filterprotected('user_id')
def list_credentials(self, context, filters):
hints = CredentialV3.build_driver_hints(context, filters)
refs = self.credential_api.list_credentials(hints)
ret_refs = [self._blob_to_json(r) for r in refs]
return CredentialV3.wrap_collection(context, ret_refs,
hints=hints)
@controller.protected()
def get_credential(self, context, credential_id):
ref = self.credential_api.get_credential(credential_id)
ret_ref = self._blob_to_json(ref)
return CredentialV3.wrap_member(context, ret_ref)
@controller.protected()
@validation.validated(schema.credential_update, 'credential')
def update_credential(self, context, credential_id, credential):
self._require_matching_id(credential_id, credential)
ref = self.credential_api.update_credential(credential_id, credential)
return CredentialV3.wrap_member(context, ref)
@controller.protected()
def delete_credential(self, context, credential_id):
return self.credential_api.delete_credential(credential_id)
| apache-2.0 | -4,493,071,998,995,930,600 | 41.084112 | 78 | 0.642016 | false | 4.367604 | false | false | false |
guoxuesong/deepstacks | deepstacks/lasagne/utils.py | 1 | 2721 | #!/usr/bin/env python
# coding:utf-8
# vi:tabstop=4:shiftwidth=4:expandtab:sts=4
import theano
import lasagne
from join import join_layer as JoinLayer
from ..utils.curry import curry
floatX = theano.config.floatX
def ordered_errors(errors, m=None, prefix='', deterministic=False):
res = []
for t in errors:
if m is None:
res += [[prefix+t, map(curry(lasagne.layers.get_output,deterministic=deterministic), errors[t])]]
else:
tmp = map(lambda x: JoinLayer(x, m), errors[t])
res += [[prefix+t, map(curry(lasagne.layers.get_output,deterministic=deterministic), tmp)]]
return sorted(res, key=lambda x: x[0])
def get_loss(errors, watchpoints, loss0=None):
errors = ordered_errors(errors)
watch_errors = ordered_errors(watchpoints)
errors1 = []
watch_errors1 = []
train_watch_errors1 = []
tagslice = []
count = 0
valtagslice = []
valcount = 0
for tag, errs in errors:
errors1 += errs
tagslice += [[tag, slice(count, count+len(errs))]]
count += len(errs)
for tag, errs in watch_errors:
if tag.startswith('train:'):
train_watch_errors1 += errs
tagslice += [[tag, slice(count, count+len(errs))]]
count += len(errs)
else:
watch_errors1 += errs
valtagslice += [[tag, slice(valcount, valcount+len(errs))]]
valcount += len(errs)
errors1 = [errors1]
watch_errors1 = [watch_errors1]
train_watch_errors1 = [train_watch_errors1]
loss = loss0 if loss0 is not None else 0.0
losslist = []
vallosslist = []
tmp = 0.0
for ee in errors1:
for err in ee:
if err is not None:
tmp = err.mean(dtype=floatX)
losslist = losslist+[tmp]
loss = loss+tmp
for ee in watch_errors1:
for err in ee:
if err is not None:
tmp = err.mean(dtype=floatX)
vallosslist = vallosslist+[tmp]
# loss = loss+tmp
for ee in train_watch_errors1:
for err in ee:
if err is not None:
tmp = err.mean(dtype=floatX)
losslist = losslist+[tmp]
# loss = loss+tmp
return loss, losslist, tagslice
def get_watchslice(watchpoints):
trainwatch = {}
valwatch = {}
for tag, errs in watchpoints:
if tag.startswith('train:'):
trainwatch[tag] = errs
else:
valwatch[tag] = errs
ig, train_values, train_tagslice = get_loss(trainwatch, [])
ig, val_values, val_tagslice = get_loss(valwatch, [])
return train_values, train_tagslice, val_values, val_tagslice
| mit | 6,609,764,372,025,347,000 | 30.275862 | 109 | 0.575891 | false | 3.524611 | false | false | false |
DevynCJohnson/Pybooster | pylib/convvolume.py | 1 | 120739 | #!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Volume measurement conversions.
@file convvolume.py
@package pybooster.convvolume
@version 2020.08.08
@author Devyn Collier Johnson <[email protected]>
@copyright LGPLv3
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from typing import Union
__all__: list = [
# CANADIAN CUPS #
r'cancup2cubicinch',
r'cancup2cubicmeter',
r'cancup2liter',
r'cancup2metriccup',
r'cancup2metrictablespoon',
r'cancup2metricteaspoon',
r'cancup2ukfluidounce',
r'cancup2ukgallon',
r'cancup2ukgill',
r'cancup2ukpint',
r'cancup2ukquart',
r'cancup2uktablespoon',
r'cancup2ukteaspoon',
r'cancup2uscup',
r'cancup2usdrygallon',
r'cancup2usdrypint',
r'cancup2usdryquart',
r'cancup2usfluidounce',
r'cancup2usgill',
r'cancup2usliquidgallon',
r'cancup2usliquidpint',
r'cancup2usliquidquart',
r'cancup2ustablespoon',
r'cancup2usteaspoon',
# CUBIC INCHES #
r'cubicinch2cancup',
r'cubicinch2cubicmeter',
r'cubicinch2liter',
r'cubicinch2metriccup',
r'cubicinch2metrictablespoon',
r'cubicinch2metricteaspoon',
r'cubicinch2ukfluidounce',
r'cubicinch2ukgallon',
r'cubicinch2ukgill',
r'cubicinch2ukpint',
r'cubicinch2ukquart',
r'cubicinch2uktablespoon',
r'cubicinch2ukteaspoon',
r'cubicinch2uscup',
r'cubicinch2usdrygallon',
r'cubicinch2usdrypint',
r'cubicinch2usdryquart',
r'cubicinch2usfluidounce',
r'cubicinch2usgill',
r'cubicinch2usliquidgallon',
r'cubicinch2usliquidpint',
r'cubicinch2usliquidquart',
r'cubicinch2ustablespoon',
r'cubicinch2usteaspoon',
# CUBIC METERS #
r'cubicmeter2cancup',
r'cubicmeter2cubicinch',
r'cubicmeter2liter',
r'cubicmeter2metriccup',
r'cubicmeter2metrictablespoon',
r'cubicmeter2metricteaspoon',
r'cubicmeter2ukfluidounce',
r'cubicmeter2ukgallon',
r'cubicmeter2ukgill',
r'cubicmeter2ukpint',
r'cubicmeter2ukquart',
r'cubicmeter2uktablespoon',
r'cubicmeter2ukteaspoon',
r'cubicmeter2uscup',
r'cubicmeter2usdrygallon',
r'cubicmeter2usdrypint',
r'cubicmeter2usdryquart',
r'cubicmeter2usfluidounce',
r'cubicmeter2usgill',
r'cubicmeter2usliquidgallon',
r'cubicmeter2usliquidpint',
r'cubicmeter2usliquidquart',
r'cubicmeter2ustablespoon',
r'cubicmeter2usteaspoon',
# LITERS #
r'liter2cancup',
r'liter2cubicinch',
r'liter2cubicmeter',
r'liter2metriccup',
r'liter2metrictablespoon',
r'liter2metricteaspoon',
r'liter2ukfluidounce',
r'liter2ukgallon',
r'liter2ukgill',
r'liter2ukpint',
r'liter2ukquart',
r'liter2uktablespoon',
r'liter2ukteaspoon',
r'liter2uscup',
r'liter2usdrygallon',
r'liter2usdrypint',
r'liter2usdryquart',
r'liter2usfluidounce',
r'liter2usgill',
r'liter2usliquidgallon',
r'liter2usliquidpint',
r'liter2usliquidquart',
r'liter2ustablespoon',
r'liter2usteaspoon',
# METRIC CUPS #
r'metriccup2cancup',
r'metriccup2cubicinch',
r'metriccup2cubicmeter',
r'metriccup2liter',
r'metriccup2metrictablespoon',
r'metriccup2metricteaspoon',
r'metriccup2ukfluidounce',
r'metriccup2ukgallon',
r'metriccup2ukgill',
r'metriccup2ukpint',
r'metriccup2ukquart',
r'metriccup2uktablespoon',
r'metriccup2ukteaspoon',
r'metriccup2uscup',
r'metriccup2usdrygallon',
r'metriccup2usdrypint',
r'metriccup2usdryquart',
r'metriccup2usfluidounce',
r'metriccup2usgill',
r'metriccup2usliquidgallon',
r'metriccup2usliquidpint',
r'metriccup2usliquidquart',
r'metriccup2ustablespoon',
r'metriccup2usteaspoon',
# METRIC TABLESPOONS #
r'metrictablespoon2cancup',
r'metrictablespoon2cubicinch',
r'metrictablespoon2cubicmeter',
r'metrictablespoon2liter',
r'metrictablespoon2metriccup',
r'metrictablespoon2metricteaspoon',
r'metrictablespoon2ukfluidounce',
r'metrictablespoon2ukgallon',
r'metrictablespoon2ukgill',
r'metrictablespoon2ukpint',
r'metrictablespoon2ukquart',
r'metrictablespoon2uktablespoon',
r'metrictablespoon2ukteaspoon',
r'metrictablespoon2uscup',
r'metrictablespoon2usdrygallon',
r'metrictablespoon2usdrypint',
r'metrictablespoon2usdryquart',
r'metrictablespoon2usfluidounce',
r'metrictablespoon2usgill',
r'metrictablespoon2usliquidgallon',
r'metrictablespoon2usliquidpint',
r'metrictablespoon2usliquidquart',
r'metrictablespoon2ustablespoon',
r'metrictablespoon2usteaspoon',
# METRIC TEASPOONS #
r'metricteaspoon2cancup',
r'metricteaspoon2cubicinch',
r'metricteaspoon2cubicmeter',
r'metricteaspoon2liter',
r'metricteaspoon2metriccup',
r'metricteaspoon2metrictablespoon',
r'metricteaspoon2ukfluidounce',
r'metricteaspoon2ukgallon',
r'metricteaspoon2ukgill',
r'metricteaspoon2ukpint',
r'metricteaspoon2ukquart',
r'metricteaspoon2uktablespoon',
r'metricteaspoon2ukteaspoon',
r'metricteaspoon2uscup',
r'metricteaspoon2usdrygallon',
r'metricteaspoon2usdrypint',
r'metricteaspoon2usdryquart',
r'metricteaspoon2usfluidounce',
r'metricteaspoon2usgill',
r'metricteaspoon2usliquidgallon',
r'metricteaspoon2usliquidpint',
r'metricteaspoon2usliquidquart',
r'metricteaspoon2ustablespoon',
r'metricteaspoon2usteaspoon',
# UK FLUID OUNCES #
r'ukfluidounce2cancup',
r'ukfluidounce2cubicinch',
r'ukfluidounce2cubicmeter',
r'ukfluidounce2liter',
r'ukfluidounce2metriccup',
r'ukfluidounce2metrictablespoon',
r'ukfluidounce2metricteaspoon',
r'ukfluidounce2ukgallon',
r'ukfluidounce2ukgill',
r'ukfluidounce2ukpint',
r'ukfluidounce2ukquart',
r'ukfluidounce2uktablespoon',
r'ukfluidounce2ukteaspoon',
r'ukfluidounce2uscup',
r'ukfluidounce2usdrygallon',
r'ukfluidounce2usdrypint',
r'ukfluidounce2usdryquart',
r'ukfluidounce2usfluidounce',
r'ukfluidounce2usgill',
r'ukfluidounce2usliquidgallon',
r'ukfluidounce2usliquidpint',
r'ukfluidounce2usliquidquart',
r'ukfluidounce2ustablespoon',
r'ukfluidounce2usteaspoon',
# UK GALLONS #
r'ukgallon2cancup',
r'ukgallon2cubicinch',
r'ukgallon2cubicmeter',
r'ukgallon2liter',
r'ukgallon2metriccup',
r'ukgallon2metrictablespoon',
r'ukgallon2metricteaspoon',
r'ukgallon2ukfluidounce',
r'ukgallon2ukgill',
r'ukgallon2ukpint',
r'ukgallon2ukquart',
r'ukgallon2uktablespoon',
r'ukgallon2ukteaspoon',
r'ukgallon2uscup',
r'ukgallon2usdrygallon',
r'ukgallon2usdrypint',
r'ukgallon2usdryquart',
r'ukgallon2usfluidounce',
r'ukgallon2usgill',
r'ukgallon2usliquidgallon',
r'ukgallon2usliquidpint',
r'ukgallon2usliquidquart',
r'ukgallon2ustablespoon',
r'ukgallon2usteaspoon',
# UK GILLS #
r'ukgill2cancup',
r'ukgill2cubicinch',
r'ukgill2cubicmeter',
r'ukgill2liter',
r'ukgill2metriccup',
r'ukgill2metrictablespoon',
r'ukgill2metricteaspoon',
r'ukgill2ukfluidounce',
r'ukgill2ukgallon',
r'ukgill2ukpint',
r'ukgill2ukquart',
r'ukgill2uktablespoon',
r'ukgill2ukteaspoon',
r'ukgill2uscup',
r'ukgill2usdrygallon',
r'ukgill2usdrypint',
r'ukgill2usdryquart',
r'ukgill2usfluidounce',
r'ukgill2usgill',
r'ukgill2usliquidgallon',
r'ukgill2usliquidpint',
r'ukgill2usliquidquart',
r'ukgill2ustablespoon',
r'ukgill2usteaspoon',
# UK PINTS #
r'ukpint2cancup',
r'ukpint2cubicinch',
r'ukpint2cubicmeter',
r'ukpint2liter',
r'ukpint2metriccup',
r'ukpint2metrictablespoon',
r'ukpint2metricteaspoon',
r'ukpint2ukfluidounce',
r'ukpint2ukgallon',
r'ukpint2ukgill',
r'ukpint2ukquart',
r'ukpint2uktablespoon',
r'ukpint2ukteaspoon',
r'ukpint2uscup',
r'ukpint2usdrygallon',
r'ukpint2usdrypint',
r'ukpint2usdryquart',
r'ukpint2usfluidounce',
r'ukpint2usgill',
r'ukpint2usliquidgallon',
r'ukpint2usliquidpint',
r'ukpint2usliquidquart',
r'ukpint2ustablespoon',
r'ukpint2usteaspoon',
# UK QUARTS #
r'ukquart2cancup',
r'ukquart2cubicinch',
r'ukquart2cubicmeter',
r'ukquart2liter',
r'ukquart2metriccup',
r'ukquart2metrictablespoon',
r'ukquart2metricteaspoon',
r'ukquart2ukfluidounce',
r'ukquart2ukgallon',
r'ukquart2ukgill',
r'ukquart2ukpint',
r'ukquart2uktablespoon',
r'ukquart2ukteaspoon',
r'ukquart2uscup',
r'ukquart2usdrygallon',
r'ukquart2usdrypint',
r'ukquart2usdryquart',
r'ukquart2usfluidounce',
r'ukquart2usgill',
r'ukquart2usliquidgallon',
r'ukquart2usliquidpint',
r'ukquart2usliquidquart',
r'ukquart2ustablespoon',
r'ukquart2usteaspoon',
# UK TABLESPOONS #
r'uktablespoon2cancup',
r'uktablespoon2cubicinch',
r'uktablespoon2cubicmeter',
r'uktablespoon2liter',
r'uktablespoon2metriccup',
r'uktablespoon2metrictablespoon',
r'uktablespoon2metricteaspoon',
r'uktablespoon2ukfluidounce',
r'uktablespoon2ukgallon',
r'uktablespoon2ukgill',
r'uktablespoon2ukpint',
r'uktablespoon2ukquart',
r'uktablespoon2ukteaspoon',
r'uktablespoon2uscup',
r'uktablespoon2usdrygallon',
r'uktablespoon2usdrypint',
r'uktablespoon2usdryquart',
r'uktablespoon2usfluidounce',
r'uktablespoon2usgill',
r'uktablespoon2usliquidgallon',
r'uktablespoon2usliquidpint',
r'uktablespoon2usliquidquart',
r'uktablespoon2ustablespoon',
r'uktablespoon2usteaspoon',
# UK TEASPOONS #
r'ukteaspoon2cancup',
r'ukteaspoon2cubicinch',
r'ukteaspoon2cubicmeter',
r'ukteaspoon2liter',
r'ukteaspoon2metriccup',
r'ukteaspoon2metrictablespoon',
r'ukteaspoon2metricteaspoon',
r'ukteaspoon2ukfluidounce',
r'ukteaspoon2ukgallon',
r'ukteaspoon2ukgill',
r'ukteaspoon2ukpint',
r'ukteaspoon2ukquart',
r'ukteaspoon2uktablespoon',
r'ukteaspoon2uscup',
r'ukteaspoon2usdrygallon',
r'ukteaspoon2usdrypint',
r'ukteaspoon2usdryquart',
r'ukteaspoon2usfluidounce',
r'ukteaspoon2usgill',
r'ukteaspoon2usliquidgallon',
r'ukteaspoon2usliquidpint',
r'ukteaspoon2usliquidquart',
r'ukteaspoon2ustablespoon',
r'ukteaspoon2usteaspoon',
# US CUPS #
r'uscup2cancup',
r'uscup2cubicinch',
r'uscup2cubicmeter',
r'uscup2liter',
r'uscup2metriccup',
r'uscup2metrictablespoon',
r'uscup2metricteaspoon',
r'uscup2ukfluidounce',
r'uscup2ukgallon',
r'uscup2ukgill',
r'uscup2ukpint',
r'uscup2ukquart',
r'uscup2uktablespoon',
r'uscup2ukteaspoon',
r'uscup2usdrygallon',
r'uscup2usdrypint',
r'uscup2usdryquart',
r'uscup2usfluidounce',
r'uscup2usgill',
r'uscup2usliquidgallon',
r'uscup2usliquidpint',
r'uscup2usliquidquart',
r'uscup2ustablespoon',
r'uscup2usteaspoon',
# US DRY GALLONS #
r'usdrygallon2cancup',
r'usdrygallon2cubicinch',
r'usdrygallon2cubicmeter',
r'usdrygallon2liter',
r'usdrygallon2metriccup',
r'usdrygallon2metrictablespoon',
r'usdrygallon2metricteaspoon',
r'usdrygallon2ukfluidounce',
r'usdrygallon2ukgallon',
r'usdrygallon2ukgill',
r'usdrygallon2ukpint',
r'usdrygallon2ukquart',
r'usdrygallon2uktablespoon',
r'usdrygallon2ukteaspoon',
r'usdrygallon2uscup',
r'usdrygallon2usdrypint',
r'usdrygallon2usdryquart',
r'usdrygallon2usfluidounce',
r'usdrygallon2usgill',
r'usdrygallon2usliquidgallon',
r'usdrygallon2usliquidpint',
r'usdrygallon2usliquidquart',
r'usdrygallon2ustablespoon',
r'usdrygallon2usteaspoon',
# US DRY PINTS #
r'usdrypint2cancup',
r'usdrypint2cubicinch',
r'usdrypint2cubicmeter',
r'usdrypint2liter',
r'usdrypint2metriccup',
r'usdrypint2metrictablespoon',
r'usdrypint2metricteaspoon',
r'usdrypint2ukfluidounce',
r'usdrypint2ukgallon',
r'usdrypint2ukgill',
r'usdrypint2ukpint',
r'usdrypint2ukquart',
r'usdrypint2uktablespoon',
r'usdrypint2ukteaspoon',
r'usdrypint2uscup',
r'usdrypint2usdrygallon',
r'usdrypint2usdryquart',
r'usdrypint2usfluidounce',
r'usdrypint2usgill',
r'usdrypint2usliquidgallon',
r'usdrypint2usliquidpint',
r'usdrypint2usliquidquart',
r'usdrypint2ustablespoon',
r'usdrypint2usteaspoon',
# US DRY QUARTS #
r'usdryquart2cancup',
r'usdryquart2cubicinch',
r'usdryquart2cubicmeter',
r'usdryquart2liter',
r'usdryquart2metriccup',
r'usdryquart2metrictablespoon',
r'usdryquart2metricteaspoon',
r'usdryquart2ukfluidounce',
r'usdryquart2ukgallon',
r'usdryquart2ukgill',
r'usdryquart2ukpint',
r'usdryquart2ukquart',
r'usdryquart2uktablespoon',
r'usdryquart2ukteaspoon',
r'usdryquart2uscup',
r'usdryquart2usdrygallon',
r'usdryquart2usdrypint',
r'usdryquart2usfluidounce',
r'usdryquart2usgill',
r'usdryquart2usliquidgallon',
r'usdryquart2usliquidpint',
r'usdryquart2usliquidquart',
r'usdryquart2ustablespoon',
r'usdryquart2usteaspoon',
# US FLUID OUNCES #
r'usfluidounce2cancup',
r'usfluidounce2cubicinch',
r'usfluidounce2cubicmeter',
r'usfluidounce2liter',
r'usfluidounce2metriccup',
r'usfluidounce2metrictablespoon',
r'usfluidounce2metricteaspoon',
r'usfluidounce2ukfluidounce',
r'usfluidounce2ukgallon',
r'usfluidounce2ukgill',
r'usfluidounce2ukpint',
r'usfluidounce2ukquart',
r'usfluidounce2uktablespoon',
r'usfluidounce2ukteaspoon',
r'usfluidounce2uscup',
r'usfluidounce2usdrygallon',
r'usfluidounce2usdrypint',
r'usfluidounce2usdryquart',
r'usfluidounce2usgill',
r'usfluidounce2usliquidgallon',
r'usfluidounce2usliquidpint',
r'usfluidounce2usliquidquart',
r'usfluidounce2ustablespoon',
r'usfluidounce2usteaspoon',
# US GILLS #
r'usgill2cancup',
r'usgill2cubicinch',
r'usgill2cubicmeter',
r'usgill2liter',
r'usgill2metriccup',
r'usgill2metrictablespoon',
r'usgill2metricteaspoon',
r'usgill2ukfluidounce',
r'usgill2ukgallon',
r'usgill2ukgill',
r'usgill2ukpint',
r'usgill2ukquart',
r'usgill2uktablespoon',
r'usgill2ukteaspoon',
r'usgill2uscup',
r'usgill2usdrygallon',
r'usgill2usdrypint',
r'usgill2usdryquart',
r'usgill2usfluidounce',
r'usgill2usliquidgallon',
r'usgill2usliquidpint',
r'usgill2usliquidquart',
r'usgill2ustablespoon',
r'usgill2usteaspoon',
# US LIQUID GALLONS #
r'usliquidgallon2cancup',
r'usliquidgallon2cubicinch',
r'usliquidgallon2cubicmeter',
r'usliquidgallon2liter',
r'usliquidgallon2metriccup',
r'usliquidgallon2metrictablespoon',
r'usliquidgallon2metricteaspoon',
r'usliquidgallon2ukfluidounce',
r'usliquidgallon2ukgallon',
r'usliquidgallon2ukgill',
r'usliquidgallon2ukpint',
r'usliquidgallon2ukquart',
r'usliquidgallon2uktablespoon',
r'usliquidgallon2ukteaspoon',
r'usliquidgallon2uscup',
r'usliquidgallon2usdrygallon',
r'usliquidgallon2usdrypint',
r'usliquidgallon2usdryquart',
r'usliquidgallon2usfluidounce',
r'usliquidgallon2usgill',
r'usliquidgallon2usliquidpint',
r'usliquidgallon2usliquidquart',
r'usliquidgallon2ustablespoon',
r'usliquidgallon2usteaspoon',
# US LIQUID PINTS #
r'usliquidpint2cancup',
r'usliquidpint2cubicinch',
r'usliquidpint2cubicmeter',
r'usliquidpint2liter',
r'usliquidpint2metriccup',
r'usliquidpint2metrictablespoon',
r'usliquidpint2metricteaspoon',
r'usliquidpint2ukfluidounce',
r'usliquidpint2ukgallon',
r'usliquidpint2ukgill',
r'usliquidpint2ukpint',
r'usliquidpint2ukquart',
r'usliquidpint2uktablespoon',
r'usliquidpint2ukteaspoon',
r'usliquidpint2uscup',
r'usliquidpint2usdrygallon',
r'usliquidpint2usdrypint',
r'usliquidpint2usdryquart',
r'usliquidpint2usfluidounce',
r'usliquidpint2usgill',
r'usliquidpint2usliquidgallon',
r'usliquidpint2usliquidquart',
r'usliquidpint2ustablespoon',
r'usliquidpint2usteaspoon',
# US LIQUID QUARTS #
r'usliquidquart2cancup',
r'usliquidquart2cubicinch',
r'usliquidquart2cubicmeter',
r'usliquidquart2liter',
r'usliquidquart2metriccup',
r'usliquidquart2metrictablespoon',
r'usliquidquart2metricteaspoon',
r'usliquidquart2ukfluidounce',
r'usliquidquart2ukgallon',
r'usliquidquart2ukgill',
r'usliquidquart2ukpint',
r'usliquidquart2ukquart',
r'usliquidquart2uktablespoon',
r'usliquidquart2ukteaspoon',
r'usliquidquart2uscup',
r'usliquidquart2usdrygallon',
r'usliquidquart2usdrypint',
r'usliquidquart2usdryquart',
r'usliquidquart2usfluidounce',
r'usliquidquart2usgill',
r'usliquidquart2usliquidgallon',
r'usliquidquart2usliquidpint',
r'usliquidquart2ustablespoon',
r'usliquidquart2usteaspoon',
# US TABLESPOONS #
r'ustablespoon2cancup',
r'ustablespoon2cubicinch',
r'ustablespoon2cubicmeter',
r'ustablespoon2liter',
r'ustablespoon2metriccup',
r'ustablespoon2metrictablespoon',
r'ustablespoon2metricteaspoon',
r'ustablespoon2ukfluidounce',
r'ustablespoon2ukgallon',
r'ustablespoon2ukgill',
r'ustablespoon2ukpint',
r'ustablespoon2ukquart',
r'ustablespoon2uktablespoon',
r'ustablespoon2ukteaspoon',
r'ustablespoon2uscup',
r'ustablespoon2usdrygallon',
r'ustablespoon2usdrypint',
r'ustablespoon2usdryquart',
r'ustablespoon2usfluidounce',
r'ustablespoon2usgill',
r'ustablespoon2usliquidgallon',
r'ustablespoon2usliquidpint',
r'ustablespoon2usliquidquart',
r'ustablespoon2usteaspoon',
# US TEASPOONS #
r'usteaspoon2cancup',
r'usteaspoon2cubicinch',
r'usteaspoon2cubicmeter',
r'usteaspoon2liter',
r'usteaspoon2metriccup',
r'usteaspoon2metrictablespoon',
r'usteaspoon2metricteaspoon',
r'usteaspoon2ukfluidounce',
r'usteaspoon2ukgallon',
r'usteaspoon2ukgill',
r'usteaspoon2ukpint',
r'usteaspoon2ukquart',
r'usteaspoon2uktablespoon',
r'usteaspoon2ukteaspoon',
r'usteaspoon2uscup',
r'usteaspoon2usdrygallon',
r'usteaspoon2usdrypint',
r'usteaspoon2usdryquart',
r'usteaspoon2usfluidounce',
r'usteaspoon2usgill',
r'usteaspoon2usliquidgallon',
r'usteaspoon2usliquidpint',
r'usteaspoon2usliquidquart',
r'usteaspoon2ustablespoon'
]
# CUBIC INCHES #
def cancup2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Cubic Inches."""
return round(_volume * 13.871, _round)
def cancup2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Cubic Meters."""
return round(_volume * 0.00022730372323498659, _round)
def cancup2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Liters."""
return round(_volume * 0.22730372323498658908, _round)
def cancup2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Metric Cups."""
return round(_volume * 0.90922, _round)
def cancup2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Metric Tablespoons."""
return round(_volume * 15.154, _round)
def cancup2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> Metric Teaspoons."""
return round(_volume * 45.461, _round)
def cancup2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Fluid Ounces."""
return round(_volume * 8.0, _round)
def cancup2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Gallons."""
return round(_volume * 0.050000, _round)
def cancup2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Gills."""
return round(_volume * 1.6000, _round)
def cancup2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Pints."""
return round(_volume * 0.40000, _round)
def cancup2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Quarts."""
return round(_volume * 0.20000, _round)
def cancup2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Tablespoons."""
return round(_volume * 16.0, _round)
def cancup2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> UK Teaspoons."""
return round(_volume * 64.0, _round)
def cancup2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Cups."""
return round(_volume * 0.96076, _round)
def cancup2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Dry Gallons."""
return round(_volume * 0.051603, _round)
def cancup2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Dry Pints."""
return round(_volume * 0.41282, _round)
def cancup2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Dry Quarts."""
return round(_volume * 0.20641, _round)
def cancup2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Fluid Ounces."""
return round(_volume * 7.6861, _round)
def cancup2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Gills."""
return round(_volume * 1.9215, _round)
def cancup2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Liquid Gallons."""
return round(_volume * 0.060047, _round)
def cancup2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Liquid Pints."""
return round(_volume * 0.48038, _round)
def cancup2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Liquid Quarts."""
return round(_volume * 0.24019, _round)
def cancup2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Tablespoons."""
return round(_volume * 15.372, _round)
def cancup2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Canadian Cups -> US Teaspoons."""
return round(_volume * 46.116, _round)
# CUBIC INCHES #
def cubicinch2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Canadian Cups."""
return round(_volume * 0.072093, _round)
def cubicinch2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Cubic Meters."""
return round(_volume * 0.000016386995, _round)
def cubicinch2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Liters."""
return round(_volume * 0.016386995, _round)
def cubicinch2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Metric Cups."""
return round(_volume * 0.065548, _round)
def cubicinch2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Metric Tablespoons."""
return round(_volume * 1.0925, _round)
def cubicinch2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> Metric Teaspoons."""
return round(_volume * 3.2774, _round)
def cubicinch2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Fluid Ounces."""
return round(_volume * 0.57674, _round)
def cubicinch2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Gallons."""
return round(_volume * 0.0036047, _round)
def cubicinch2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Gills."""
return round(_volume * 0.11535, _round)
def cubicinch2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Pints."""
return round(_volume * 0.028837, _round)
def cubicinch2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Quarts."""
return round(_volume * 0.014419, _round)
def cubicinch2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Tablespoons."""
return round(_volume * 1.1535, _round)
def cubicinch2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> UK Teaspoons."""
return round(_volume * 4.6139, _round)
def cubicinch2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Cups."""
return round(_volume * 0.069264, _round)
def cubicinch2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Dry Gallons."""
return round(_volume * 0.0037202, _round)
def cubicinch2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Dry Pints."""
return round(_volume * 0.029762, _round)
def cubicinch2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Dry Quarts."""
return round(_volume * 0.014881, _round)
def cubicinch2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Fluid Ounces."""
return round(_volume * 0.55411, _round)
def cubicinch2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Gills."""
return round(_volume * 0.13853, _round)
def cubicinch2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Liquid Gallons."""
return round(_volume * 0.0043290, _round)
def cubicinch2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Liquid Pints."""
return round(_volume * 0.034632, _round)
def cubicinch2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Liquid Quarts."""
return round(_volume * 0.017316, _round)
def cubicinch2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Tablespoons."""
return round(_volume * 1.1082, _round)
def cubicinch2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Inches -> US Teaspoons."""
return round(_volume * 3.3247, _round)
# CUBIC METERS #
def cubicmeter2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Canadian Cups."""
return round(_volume * 4399.4, _round)
def cubicmeter2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Cubic Inches."""
return round(_volume * 61024.0, _round)
def cubicmeter2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Liters."""
return round(_volume * 1000.0, _round)
def cubicmeter2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Metric Cups."""
return round(_volume * 4000.0, _round)
def cubicmeter2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Metric Tablespoons."""
return round(_volume * 66667.0, _round)
def cubicmeter2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> Metric Teaspoons."""
return round(_volume * 200000.0, _round)
def cubicmeter2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Fluid Ounces."""
return round(_volume * 35195.0, _round)
def cubicmeter2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Gallons."""
return round(_volume * 219.97, _round)
def cubicmeter2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Gills."""
return round(_volume * 7039.0, _round)
def cubicmeter2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Pints."""
return round(_volume * 1759.8, _round)
def cubicmeter2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Quarts."""
return round(_volume * 879.88, _round)
def cubicmeter2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Tablespoons."""
return round(_volume * 70390.0, _round)
def cubicmeter2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> UK Teaspoons."""
return round(_volume * 281560.0, _round)
def cubicmeter2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Cups."""
return round(_volume * 4226.8, _round)
def cubicmeter2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Dry Gallons."""
return round(_volume * 227.02, _round)
def cubicmeter2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Dry Pints."""
return round(_volume * 1816.2, _round)
def cubicmeter2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Dry Quarts."""
return round(_volume * 908.08, _round)
def cubicmeter2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Fluid Ounces."""
return round(_volume * 33814, _round)
def cubicmeter2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Gills."""
return round(_volume * 8453.5, _round)
def cubicmeter2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Liquid Gallons."""
return round(_volume * 264.17, _round)
def cubicmeter2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Liquid Pints."""
return round(_volume * 2113.4, _round)
def cubicmeter2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Liquid Quarts."""
return round(_volume * 1056.7, _round)
def cubicmeter2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Tablespoons."""
return round(_volume * 67628.0, _round)
def cubicmeter2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Cubic Meters -> US Teaspoons."""
return round(_volume * 202880.0, _round)
# LITERS #
def liter2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Canadian Cups."""
return round(_volume * 4.3994, _round)
def liter2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Cubic Inches."""
return round(_volume * 61.024, _round)
def liter2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Cubic Meters."""
return round(_volume * 0.001, _round)
def liter2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Metric Cups."""
return round(_volume * 4.0, _round)
def liter2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Metric Tablespoons."""
return round(_volume * 66.667, _round)
def liter2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> Metric Teaspoons."""
return round(_volume * 200.0, _round)
def liter2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Fluid Ounces."""
return round(_volume * 35.195, _round)
def liter2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Gallons."""
return round(_volume * 0.21997, _round)
def liter2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Gills."""
return round(_volume * 7.039, _round)
def liter2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Pints."""
return round(_volume * 1.7598, _round)
def liter2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Quarts."""
return round(_volume * 0.87988, _round)
def liter2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Tablespoons."""
return round(_volume * 70.39, _round)
def liter2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> UK Teaspoons."""
return round(_volume * 281.560, _round)
def liter2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Cups."""
return round(_volume * 4.2268, _round)
def liter2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Dry Gallons."""
return round(_volume * 0.22702, _round)
def liter2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Dry Pints."""
return round(_volume * 1.8162, _round)
def liter2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Dry Quarts."""
return round(_volume * 0.90808, _round)
def liter2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Fluid Ounces."""
return round(_volume * 33.814, _round)
def liter2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Gills."""
return round(_volume * 8.4535, _round)
def liter2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Liquid Gallons."""
return round(_volume * 0.26417, _round)
def liter2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Liquid Pints."""
return round(_volume * 2.1134, _round)
def liter2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Liquid Quarts."""
return round(_volume * 1.0567, _round)
def liter2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Tablespoons."""
return round(_volume * 67.628, _round)
def liter2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Liters -> US Teaspoons."""
return round(_volume * 202.8800, _round)
# METRIC CUPS #
def metriccup2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Canadian Cups."""
return round(_volume * 1.0998, _round)
def metriccup2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Cubic Inches."""
return round(_volume * 15.256, _round)
def metriccup2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Cubic Meters."""
return round(_volume * 0.00025, _round)
def metriccup2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Liters."""
return round(_volume * 0.250, _round)
def metriccup2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Metric Tablespoons."""
return round(_volume * 16.667, _round)
def metriccup2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> Metric Teaspoons."""
return round(_volume * 50.0, _round)
def metriccup2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Fluid Ounces."""
return round(_volume * 8.7988, _round)
def metriccup2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Gallons."""
return round(_volume * 0.054992, _round)
def metriccup2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Gills."""
return round(_volume * 1.7598, _round)
def metriccup2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Pints."""
return round(_volume * 0.43994, _round)
def metriccup2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Quarts."""
return round(_volume * 0.21997, _round)
def metriccup2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Tablespoons."""
return round(_volume * 17.598, _round)
def metriccup2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> UK Teaspoons."""
return round(_volume * 70.390, _round)
def metriccup2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Cups."""
return round(_volume * 1.0567, _round)
def metriccup2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Dry Gallons."""
return round(_volume * 0.056755, _round)
def metriccup2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Dry Pints."""
return round(_volume * 0.45404, _round)
def metriccup2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Dry Quarts."""
return round(_volume * 0.22702, _round)
def metriccup2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Fluid Ounces."""
return round(_volume * 8.4535, _round)
def metriccup2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Gills."""
return round(_volume * 2.1134, _round)
def metriccup2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Liquid Gallons."""
return round(_volume * 0.066043, _round)
def metriccup2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Liquid Pints."""
return round(_volume * 0.52834, _round)
def metriccup2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Liquid Quarts."""
return round(_volume * 0.26417, _round)
def metriccup2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Tablespoons."""
return round(_volume * 16.907, _round)
def metriccup2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Cups -> US Teaspoons."""
return round(_volume * 50.721, _round)
# METRIC TABLESPOONS #
def metrictablespoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Canadian Cups."""
return round(_volume * 0.065991, _round)
def metrictablespoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Cubic Inches."""
return round(_volume * 0.91536, _round)
def metrictablespoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Cubic Meters."""
return round(_volume * 0.000014999925000375, _round)
def metrictablespoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Liters."""
return round(_volume * 0.01499992500037499813, _round)
def metrictablespoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Metric Cups."""
return round(_volume * 0.060, _round)
def metrictablespoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> Metric Teaspoons."""
return round(_volume * 3.0, _round)
def metrictablespoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Fluid Ounces."""
return round(_volume * 0.52793, _round)
def metrictablespoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Gallons."""
return round(_volume * 0.0032995, _round)
def metrictablespoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Gills."""
return round(_volume * 0.10559, _round)
def metrictablespoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Pints."""
return round(_volume * 0.026396, _round)
def metrictablespoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Quarts."""
return round(_volume * 0.013198, _round)
def metrictablespoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Tablespoons."""
return round(_volume * 1.0559, _round)
def metrictablespoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> UK Teaspoons."""
return round(_volume * 4.2234, _round)
def metrictablespoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Cups."""
return round(_volume * 0.063401, _round)
def metrictablespoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Dry Gallons."""
return round(_volume * 0.0034053, _round)
def metrictablespoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Dry Pints."""
return round(_volume * 0.027242, _round)
def metrictablespoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Dry Quarts."""
return round(_volume * 0.013621, _round)
def metrictablespoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Fluid Ounces."""
return round(_volume * 0.50721, _round)
def metrictablespoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Gills."""
return round(_volume * 0.12680, _round)
def metrictablespoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Liquid Gallons."""
return round(_volume * 0.0039626, _round)
def metrictablespoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Liquid Pints."""
return round(_volume * 0.031701, _round)
def metrictablespoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Liquid Quarts."""
return round(_volume * 0.015850, _round)
def metrictablespoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Tablespoons."""
return round(_volume * 1.0144, _round)
def metrictablespoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Tablespoons -> US Teaspoons."""
return round(_volume * 3.0433, _round)
# METRIC TEASPOONS #
def metricteaspoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Canadian Cups."""
return round(_volume * 0.021997, _round)
def metricteaspoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Cubic Inches."""
return round(_volume * 0.30512, _round)
def metricteaspoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Cubic Meters."""
return round(_volume * 0.000005, _round)
def metricteaspoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Liters."""
return round(_volume * 0.005, _round)
def metricteaspoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Metric Cups."""
return round(_volume * 0.020000, _round)
def metricteaspoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> Metric Tablespoons."""
return round(_volume * 0.33333333, _round)
def metricteaspoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Fluid Ounces."""
return round(_volume * 0.17598, _round)
def metricteaspoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Gallons."""
return round(_volume * 0.0010998, _round)
def metricteaspoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Gills."""
return round(_volume * 0.035195, _round)
def metricteaspoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Pints."""
return round(_volume * 0.0087988, _round)
def metricteaspoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Quarts."""
return round(_volume * 0.0043994, _round)
def metricteaspoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Tablespoons."""
return round(_volume * 0.35195, _round)
def metricteaspoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> UK Teaspoons."""
return round(_volume * 1.4078, _round)
def metricteaspoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Cups."""
return round(_volume * 0.021134, _round)
def metricteaspoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Dry Gallons."""
return round(_volume * 0.0011351, _round)
def metricteaspoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Dry Pints."""
return round(_volume * 0.0090808, _round)
def metricteaspoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Dry Quarts."""
return round(_volume * 0.0045404, _round)
def metricteaspoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Fluid Ounces."""
return round(_volume * 0.16907, _round)
def metricteaspoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Gills."""
return round(_volume * 0.042268, _round)
def metricteaspoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Liquid Gallons."""
return round(_volume * 0.0013209, _round)
def metricteaspoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Liquid Pints."""
return round(_volume * 0.010567, _round)
def metricteaspoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Liquid Quarts."""
return round(_volume * 0.0052834, _round)
def metricteaspoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Tablespoons."""
return round(_volume * 0.33814, _round)
def metricteaspoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""Metric Teaspoons -> US Teaspoons."""
return round(_volume * 1.0144, _round)
# UK FLUID OUNCES #
def ukfluidounce2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Canadian Cups."""
return round(_volume * 0.12500, _round)
def ukfluidounce2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Cubic Inches."""
return round(_volume * 1.7339, _round)
def ukfluidounce2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Cubic Meters."""
return round(_volume * 0.00002841312686461145, _round)
def ukfluidounce2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Liters."""
return round(_volume * 0.02841312686461145049, _round)
def ukfluidounce2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Metric Cups."""
return round(_volume * 0.11365, _round)
def ukfluidounce2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Metric Tablespoons."""
return round(_volume * 1.8942, _round)
def ukfluidounce2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> Metric Teaspoons."""
return round(_volume * 5.6826, _round)
def ukfluidounce2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Gallons."""
return round(_volume * 0.0062500, _round)
def ukfluidounce2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Gills."""
return round(_volume * 0.20000, _round)
def ukfluidounce2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Pints."""
return round(_volume * 0.050000, _round)
def ukfluidounce2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Quarts."""
return round(_volume * 0.025000, _round)
def ukfluidounce2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Tablespoons."""
return round(_volume * 2.0000, _round)
def ukfluidounce2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> UK Teaspoons."""
return round(_volume * 8.0000, _round)
def ukfluidounce2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Cups."""
return round(_volume * 0.12009, _round)
def ukfluidounce2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Dry Gallons."""
return round(_volume * 0.0064504, _round)
def ukfluidounce2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Dry Pints."""
return round(_volume * 0.051603, _round)
def ukfluidounce2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Dry Quarts."""
return round(_volume * 0.025801, _round)
def ukfluidounce2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Fluid Ounces."""
return round(_volume * 0.96076, _round)
def ukfluidounce2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Gills."""
return round(_volume * 0.24019, _round)
def ukfluidounce2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Liquid Gallons."""
return round(_volume * 0.0075059, _round)
def ukfluidounce2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Liquid Pints."""
return round(_volume * 0.060047, _round)
def ukfluidounce2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Liquid Quarts."""
return round(_volume * 0.030024, _round)
def ukfluidounce2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Tablespoons."""
return round(_volume * 1.9215, _round)
def ukfluidounce2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Fluid Ounces -> US Teaspoons."""
return round(_volume * 5.7646, _round)
# UK GALLONS #
def ukgallon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Canadian Cups."""
return round(_volume * 20.000, _round)
def ukgallon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Cubic Inches."""
return round(_volume * 277.42, _round)
def ukgallon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Cubic Meters."""
return round(_volume * 0.00454607446469973178, _round)
def ukgallon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Liters."""
return round(_volume * 4.54607446469973178161, _round)
def ukgallon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Metric Cups."""
return round(_volume * 18.184, _round)
def ukgallon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Metric Tablespoons."""
return round(_volume * 303.07, _round)
def ukgallon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> Metric Teaspoons."""
return round(_volume * 909.22, _round)
def ukgallon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Fluid Ounces."""
return round(_volume * 160.00, _round)
def ukgallon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Gills."""
return round(_volume * 32.000, _round)
def ukgallon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Pints."""
return round(_volume * 8.0000, _round)
def ukgallon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Quarts."""
return round(_volume * 4.0000, _round)
def ukgallon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Tablespoons."""
return round(_volume * 320.00, _round)
def ukgallon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> UK Teaspoons."""
return round(_volume * 1280.0, _round)
def ukgallon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Cups."""
return round(_volume * 19.215, _round)
def ukgallon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Dry Gallons."""
return round(_volume * 1.0321, _round)
def ukgallon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Dry Pints."""
return round(_volume * 8.2565, _round)
def ukgallon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Dry Quarts."""
return round(_volume * 4.1282, _round)
def ukgallon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Fluid Ounces."""
return round(_volume * 153.72, _round)
def ukgallon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Gills."""
return round(_volume * 38.430, _round)
def ukgallon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Liquid Gallons."""
return round(_volume * 1.2009, _round)
def ukgallon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Liquid Pints."""
return round(_volume * 9.6076, _round)
def ukgallon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Liquid Quarts."""
return round(_volume * 4.8038, _round)
def ukgallon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Tablespoons."""
return round(_volume * 307.44, _round)
def ukgallon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gallons -> US Teaspoons."""
return round(_volume * 922.33, _round)
# UK GILLS #
def ukgill2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Canadian Cups."""
return round(_volume * 0.62500, _round)
def ukgill2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Cubic Inches."""
return round(_volume * 8.6694, _round)
def ukgill2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Cubic Meters."""
return round(_volume * 0.00014206563432305725, _round)
def ukgill2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Liters."""
return round(_volume * 0.14206563432305725245, _round)
def ukgill2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Metric Cups."""
return round(_volume * 0.56826, _round)
def ukgill2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Metric Tablespoons."""
return round(_volume * 9.4710, _round)
def ukgill2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> Metric Teaspoons."""
return round(_volume * 28.413, _round)
def ukgill2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Fluid Ounces."""
return round(_volume * 5.0000, _round)
def ukgill2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Gallons."""
return round(_volume * 0.031250, _round)
def ukgill2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Pints."""
return round(_volume * 0.25000, _round)
def ukgill2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Quarts."""
return round(_volume * 0.12500, _round)
def ukgill2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Tablespoons."""
return round(_volume * 10.000, _round)
def ukgill2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> UK Teaspoons."""
return round(_volume * 40.000, _round)
def ukgill2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Cups."""
return round(_volume * 0.60047, _round)
def ukgill2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Dry Gallons."""
return round(_volume * 0.032252, _round)
def ukgill2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Dry Pints."""
return round(_volume * 0.25801, _round)
def ukgill2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Dry Quarts."""
return round(_volume * 0.12901, _round)
def ukgill2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Fluid Ounces."""
return round(_volume * 4.8038, _round)
def ukgill2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Gills."""
return round(_volume * 1.2009, _round)
def ukgill2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Liquid Gallons."""
return round(_volume * 0.037530, _round)
def ukgill2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Liquid Pints."""
return round(_volume * 0.30024, _round)
def ukgill2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Liquid Quarts."""
return round(_volume * 0.15012, _round)
def ukgill2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Tablespoons."""
return round(_volume * 9.6076, _round)
def ukgill2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Gills -> US Teaspoons."""
return round(_volume * 28.823, _round)
# UK PINTS #
def ukpint2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Canadian Cups."""
return round(_volume * 2.5000, _round)
def ukpint2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Cubic Inches."""
return round(_volume * 34.677, _round)
def ukpint2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Cubic Meters."""
return round(_volume * 0.00056824639163541312, _round)
def ukpint2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Liters."""
return round(_volume * 0.56824639163541311513, _round)
def ukpint2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Metric Cups."""
return round(_volume * 2.2730, _round)
def ukpint2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Metric Tablespoons."""
return round(_volume * 37.884, _round)
def ukpint2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> Metric Teaspoons."""
return round(_volume * 113.65, _round)
def ukpint2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Fluid Ounces."""
return round(_volume * 20.000, _round)
def ukpint2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Gallons."""
return round(_volume * 0.12500, _round)
def ukpint2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Gills."""
return round(_volume * 4.0000, _round)
def ukpint2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Quarts."""
return round(_volume * 0.50000, _round)
def ukpint2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Tablespoons."""
return round(_volume * 40.000, _round)
def ukpint2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> UK Teaspoons."""
return round(_volume * 160.00, _round)
def ukpint2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Cups."""
return round(_volume * 2.4019, _round)
def ukpint2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Dry Gallons."""
return round(_volume * 0.0011351, _round)
def ukpint2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Dry Pints."""
return round(_volume * 1.0321, _round)
def ukpint2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Dry Quarts."""
return round(_volume * 0.51603, _round)
def ukpint2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Fluid Ounces."""
return round(_volume * 19.215, _round)
def ukpint2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Gills."""
return round(_volume * 4.8038, _round)
def ukpint2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Liquid Gallons."""
return round(_volume * 0.15012, _round)
def ukpint2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Liquid Pints."""
return round(_volume * 1.2009, _round)
def ukpint2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Liquid Quarts."""
return round(_volume * 0.60047, _round)
def ukpint2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Tablespoons."""
return round(_volume * 38.430, _round)
def ukpint2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Pints -> US Teaspoons."""
return round(_volume * 115.29, _round)
# UK QUARTS #
def ukquart2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Canadian Cups."""
return round(_volume * 5.0000, _round)
def ukquart2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Cubic Inches."""
return round(_volume * 69.355, _round)
def ukquart2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Cubic Meters."""
return round(_volume * 0.00113651861617493295, _round)
def ukquart2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Liters."""
return round(_volume * 1.1365186161749329454, _round)
def ukquart2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Metric Cups."""
return round(_volume * 4.5461, _round)
def ukquart2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Metric Tablespoons."""
return round(_volume * 75.768, _round)
def ukquart2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> Metric Teaspoons."""
return round(_volume * 227.30, _round)
def ukquart2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Fluid Ounces."""
return round(_volume * 40.000, _round)
def ukquart2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Gallons."""
return round(_volume * 0.25000, _round)
def ukquart2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Gills."""
return round(_volume * 8.0000, _round)
def ukquart2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Pints."""
return round(_volume * 2.0000, _round)
def ukquart2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Tablespoons."""
return round(_volume * 80.000, _round)
def ukquart2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> UK Teaspoons."""
return round(_volume * 320.00, _round)
def ukquart2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Cups."""
return round(_volume * 4.8038, _round)
def ukquart2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Dry Gallons."""
return round(_volume * 0.25801, _round)
def ukquart2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Dry Pints."""
return round(_volume * 2.0641, _round)
def ukquart2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Dry Quarts."""
return round(_volume * 1.0321, _round)
def ukquart2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Fluid Ounces."""
return round(_volume * 38.430, _round)
def ukquart2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Gills."""
return round(_volume * 9.6076, _round)
def ukquart2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Liquid Gallons."""
return round(_volume * 0.30024, _round)
def ukquart2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Liquid Pints."""
return round(_volume * 2.4019, _round)
def ukquart2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Liquid Quarts."""
return round(_volume * 1.2009, _round)
def ukquart2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Tablespoons."""
return round(_volume * 76.861, _round)
def ukquart2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Quarts -> US Teaspoons."""
return round(_volume * 230.58, _round)
# UK TABLESPOONS #
def uktablespoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Canadian Cups."""
return round(_volume * 0.062500, _round)
def uktablespoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Cubic Inches."""
return round(_volume * 0.86694, _round)
def uktablespoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Cubic Meters."""
return round(_volume * 0.00001420656343230573, _round)
def uktablespoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Liters."""
return round(_volume * 0.01420656343230572525, _round)
def uktablespoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Metric Cups."""
return round(_volume * 0.056826, _round)
def uktablespoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Metric Tablespoons."""
return round(_volume * 0.94710, _round)
def uktablespoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> Metric Teaspoons."""
return round(_volume * 2.8413, _round)
def uktablespoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Fluid Ounces."""
return round(_volume * 0.50000, _round)
def uktablespoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Gallons."""
return round(_volume * 0.0031250, _round)
def uktablespoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Gills."""
return round(_volume * 0.10000, _round)
def uktablespoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Pints."""
return round(_volume * 0.025000, _round)
def uktablespoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Quarts."""
return round(_volume * 0.012500, _round)
def uktablespoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> UK Teaspoons."""
return round(_volume * 4.0000, _round)
def uktablespoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Cups."""
return round(_volume * 0.060048, _round)
def uktablespoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Dry Gallons."""
return round(_volume * 0.0032252, _round)
def uktablespoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Dry Pints."""
return round(_volume * 0.025801, _round)
def uktablespoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Dry Quarts."""
return round(_volume * 0.012901, _round)
def uktablespoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Fluid Ounces."""
return round(_volume * 0.48038, _round)
def uktablespoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Gills."""
return round(_volume * 0.12010, _round)
def uktablespoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Liquid Gallons."""
return round(_volume * 0.0037530, _round)
def uktablespoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Liquid Pints."""
return round(_volume * 0.030024, _round)
def uktablespoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Liquid Quarts."""
return round(_volume * 0.015012, _round)
def uktablespoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Tablespoons."""
return round(_volume * 0.96076, _round)
def uktablespoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Tablespoons -> US Teaspoons."""
return round(_volume * 2.8823, _round)
# UK TEASPOONS #
def ukteaspoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Canadian Cups."""
return round(_volume * 0.015625, _round)
def ukteaspoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Cubic Inches."""
return round(_volume * 0.21673, _round)
def ukteaspoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Cubic Meters."""
return round(_volume * 0.00000355164085807643, _round)
def ukteaspoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Liters."""
return round(_volume * 0.00355164085807643131, _round)
def ukteaspoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Metric Cups."""
return round(_volume * 0.014207, _round)
def ukteaspoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Metric Tablespoons."""
return round(_volume * 0.23678, _round)
def ukteaspoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> Metric Teaspoons."""
return round(_volume * 0.71033, _round)
def ukteaspoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Fluid Ounces."""
return round(_volume * 0.12500, _round)
def ukteaspoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Gallons."""
return round(_volume * 0.00078125, _round)
def ukteaspoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Gills."""
return round(_volume * 0.025000, _round)
def ukteaspoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Pints."""
return round(_volume * 0.0062500, _round)
def ukteaspoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Quarts."""
return round(_volume * 0.0031250, _round)
def ukteaspoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> UK Tablespoons."""
return round(_volume * 0.25000, _round)
def ukteaspoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Cups."""
return round(_volume * 0.015012, _round)
def ukteaspoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Dry Gallons."""
return round(_volume * 0.00080630, _round)
def ukteaspoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Dry Pints."""
return round(_volume * 0.0064504, _round)
def ukteaspoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Dry Quarts."""
return round(_volume * 0.0032252, _round)
def ukteaspoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Fluid Ounces."""
return round(_volume * 0.12010, _round)
def ukteaspoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Gills."""
return round(_volume * 0.030024, _round)
def ukteaspoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Liquid Gallons."""
return round(_volume * 0.00093824, _round)
def ukteaspoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Liquid Pints."""
return round(_volume * 0.0075060, _round)
def ukteaspoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Liquid Quarts."""
return round(_volume * 0.0037530, _round)
def ukteaspoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Tablespoons."""
return round(_volume * 0.24019, _round)
def ukteaspoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""UK Teaspoons -> US Teaspoons."""
return round(_volume * 0.72057, _round)
# US CUPS #
def uscup2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Canadian Cups."""
return round(_volume * 1.0408, _round)
def uscup2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Cubic Inches."""
return round(_volume * 14.438, _round)
def uscup2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Cubic Meters."""
return round(_volume * 0.0002365855966688748, _round)
def uscup2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Liters."""
return round(_volume * 0.2365855966688747989, _round)
def uscup2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Metric Cups."""
return round(_volume * 0.94635, _round)
def uscup2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Metric Tablespoons."""
return round(_volume * 15.773, _round)
def uscup2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> Metric Teaspoons."""
return round(_volume * 47.318, _round)
def uscup2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Fluid Ounces."""
return round(_volume * 8.3267, _round)
def uscup2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Gallons."""
return round(_volume * 0.052042, _round)
def uscup2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Gills."""
return round(_volume * 1.6653, _round)
def uscup2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Pints."""
return round(_volume * 0.41634, _round)
def uscup2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Quarts."""
return round(_volume * 0.20817, _round)
def uscup2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Tablespoons."""
return round(_volume * 16.653, _round)
def uscup2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> UK Teaspoons."""
return round(_volume * 66.614, _round)
def uscup2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Dry Gallons."""
return round(_volume * 0.053710, _round)
def uscup2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Dry Pints."""
return round(_volume * 0.42968, _round)
def uscup2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Dry Quarts."""
return round(_volume * 0.21484, _round)
def uscup2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Fluid Ounces."""
return round(_volume * 8.0000, _round)
def uscup2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Gills."""
return round(_volume * 2.0000, _round)
def uscup2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Liquid Gallons."""
return round(_volume * 0.062500, _round)
def uscup2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Liquid Pints."""
return round(_volume * 0.50000, _round)
def uscup2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Liquid Quarts."""
return round(_volume * 0.25000, _round)
def uscup2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Tablespoons."""
return round(_volume * 16.000, _round)
def uscup2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Cups -> US Teaspoons."""
return round(_volume * 48.000, _round)
# US DRY GALLONS #
def usdrygallon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Canadian Cups."""
return round(_volume * 19.379, _round)
def usdrygallon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Cubic Inches."""
return round(_volume * 268.80, _round)
def usdrygallon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Cubic Meters."""
return round(_volume * 0.00440489824685049775, _round)
def usdrygallon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Liters."""
return round(_volume * 4.4048982468504977535, _round)
def usdrygallon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Metric Cups."""
return round(_volume * 17.620, _round)
def usdrygallon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Metric Tablespoons."""
return round(_volume * 293.66, _round)
def usdrygallon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> Metric Teaspoons."""
return round(_volume * 880.98, _round)
def usdrygallon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Fluid Ounces."""
return round(_volume * 155.03, _round)
def usdrygallon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Gallons."""
return round(_volume * 0.96894, _round)
def usdrygallon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Gills."""
return round(_volume * 31.006, _round)
def usdrygallon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Pints."""
return round(_volume * 7.7515, _round)
def usdrygallon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Quarts."""
return round(_volume * 3.8758, _round)
def usdrygallon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Tablespoons."""
return round(_volume * 310.06, _round)
def usdrygallon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> UK Teaspoons."""
return round(_volume * 1240.2, _round)
def usdrygallon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Cups."""
return round(_volume * 18.618, _round)
def usdrygallon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Dry Pints."""
return round(_volume * 8.0000, _round)
def usdrygallon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Dry Quarts."""
return round(_volume * 4.0000, _round)
def usdrygallon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Fluid Ounces."""
return round(_volume * 148.95, _round)
def usdrygallon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Gills."""
return round(_volume * 37.237, _round)
def usdrygallon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Liquid Gallons."""
return round(_volume * 1.1636, _round)
def usdrygallon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Liquid Pints."""
return round(_volume * 9.3092, _round)
def usdrygallon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Liquid Quarts."""
return round(_volume * 4.6546, _round)
def usdrygallon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Tablespoons."""
return round(_volume * 297.89, _round)
def usdrygallon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Gallons -> US Teaspoons."""
return round(_volume * 893.68, _round)
# US DRY PINTS #
def usdrypint2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Canadian Cups."""
return round(_volume * 2.4223, _round)
def usdrypint2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Cubic Inches."""
return round(_volume * 33.600, _round)
def usdrypint2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Cubic Meters."""
return round(_volume * 0.00055060015416804317, _round)
def usdrypint2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Liters."""
return round(_volume * 0.55060015416804316705, _round)
def usdrypint2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Metric Cups."""
return round(_volume * 2.2024, _round)
def usdrypint2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Metric Tablespoons."""
return round(_volume * 36.707, _round)
def usdrypint2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> Metric Teaspoons."""
return round(_volume * 110.12, _round)
def usdrypint2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Fluid Ounces."""
return round(_volume * 19.379, _round)
def usdrypint2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Gallons."""
return round(_volume * 0.12112, _round)
def usdrypint2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Gills."""
return round(_volume * 3.8758, _round)
def usdrypint2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Pints."""
return round(_volume * 0.96894, _round)
def usdrypint2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Quarts."""
return round(_volume * 0.48447, _round)
def usdrypint2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Tablespoons."""
return round(_volume * 38.758, _round)
def usdrypint2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> UK Teaspoons."""
return round(_volume * 155.03, _round)
def usdrypint2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Cups."""
return round(_volume * 2.3273, _round)
def usdrypint2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Dry Gallons."""
return round(_volume * 0.12500, _round)
def usdrypint2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Dry Quarts."""
return round(_volume * 0.50000, _round)
def usdrypint2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Fluid Ounces."""
return round(_volume * 18.618, _round)
def usdrypint2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Gills."""
return round(_volume * 4.6546, _round)
def usdrypint2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Liquid Gallons."""
return round(_volume * 0.14546, _round)
def usdrypint2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Liquid Pints."""
return round(_volume * 1.1636, _round)
def usdrypint2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Liquid Quarts."""
return round(_volume * 0.58182, _round)
def usdrypint2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Tablespoons."""
return round(_volume * 37.237, _round)
def usdrypint2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Pints -> US Teaspoons."""
return round(_volume * 111.71, _round)
# US DRY QUARTS #
def usdryquart2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Canadian Cups."""
return round(_volume * 4.8447, _round)
def usdryquart2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Cubic Inches."""
return round(_volume * 67.201, _round)
def usdryquart2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Cubic Meters."""
return round(_volume * 0.00110122456171262444, _round)
def usdryquart2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Liters."""
return round(_volume * 1.10122456171262443838, _round)
def usdryquart2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Metric Cups."""
return round(_volume * 4.4049, _round)
def usdryquart2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Metric Tablespoons."""
return round(_volume * 73.415, _round)
def usdryquart2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> Metric Teaspoons."""
return round(_volume * 220.24, _round)
def usdryquart2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Fluid Ounces."""
return round(_volume * 38.758, _round)
def usdryquart2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Gallons."""
return round(_volume * 0.24223, _round)
def usdryquart2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Gills."""
return round(_volume * 7.7515, _round)
def usdryquart2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Pints."""
return round(_volume * 1.9379, _round)
def usdryquart2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Quarts."""
return round(_volume * 0.96894, _round)
def usdryquart2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Tablespoons."""
return round(_volume * 77.515, _round)
def usdryquart2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> UK Teaspoons."""
return round(_volume * 310.06, _round)
def usdryquart2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Cups."""
return round(_volume * 4.6546, _round)
def usdryquart2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Dry Gallons."""
return round(_volume * 0.25000, _round)
def usdryquart2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Dry Pints."""
return round(_volume * 2.0000, _round)
def usdryquart2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Fluid Ounces."""
return round(_volume * 37.237, _round)
def usdryquart2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Gills."""
return round(_volume * 9.3092, _round)
def usdryquart2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Liquid Gallons."""
return round(_volume * 0.29091, _round)
def usdryquart2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Liquid Pints."""
return round(_volume * 2.3273, _round)
def usdryquart2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Liquid Quarts."""
return round(_volume * 1.1636, _round)
def usdryquart2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Tablespoons."""
return round(_volume * 74.473, _round)
def usdryquart2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Dry Quarts -> US Teaspoons."""
return round(_volume * 223.42, _round)
# US FLUID OUNCES #
def usfluidounce2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Canadian Cups."""
return round(_volume * 0.13011, _round)
def usfluidounce2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Cubic Inches."""
return round(_volume * 1.8047, _round)
def usfluidounce2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Cubic Meters."""
return round(_volume * 0.00002957354941740108, _round)
def usfluidounce2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Liters."""
return round(_volume * 0.02957354941740107648, _round)
def usfluidounce2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Metric Cups."""
return round(_volume * 0.11829, _round)
def usfluidounce2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Metric Tablespoons."""
return round(_volume * 1.9716, _round)
def usfluidounce2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> Metric Teaspoons."""
return round(_volume * 5.9147, _round)
def usfluidounce2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Fluid Ounces."""
return round(_volume * 1.0408, _round)
def usfluidounce2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Gallons."""
return round(_volume * 0.0065053, _round)
def usfluidounce2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Gills."""
return round(_volume * 0.20817, _round)
def usfluidounce2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Pints."""
return round(_volume * 0.052042, _round)
def usfluidounce2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Quarts."""
return round(_volume * 0.026021, _round)
def usfluidounce2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Tablespoons."""
return round(_volume * 2.0817, _round)
def usfluidounce2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> UK Teaspoons."""
return round(_volume * 8.3267, _round)
def usfluidounce2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Cups."""
return round(_volume * 0.12500, _round)
def usfluidounce2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Dry Gallons."""
return round(_volume * 0.0067138, _round)
def usfluidounce2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Dry Pints."""
return round(_volume * 0.053710, _round)
def usfluidounce2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Dry Quarts."""
return round(_volume * 0.026855, _round)
def usfluidounce2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Gills."""
return round(_volume * 0.25000, _round)
def usfluidounce2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Liquid Gallons."""
return round(_volume * 0.0078125, _round)
def usfluidounce2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Liquid Pints."""
return round(_volume * 0.062500, _round)
def usfluidounce2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Liquid Quarts."""
return round(_volume * 0.031250, _round)
def usfluidounce2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Tablespoons."""
return round(_volume * 2.0000, _round)
def usfluidounce2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Fluid Ounces -> US Teaspoons."""
return round(_volume * 6.0000, _round)
# US GILLS #
def usgill2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Canadian Cups."""
return round(_volume * 0.52042, _round)
def usgill2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Cubic Inches."""
return round(_volume * 7.2187, _round)
def usgill2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Cubic Meters."""
return round(_volume * 0.00011829419766960431, _round)
def usgill2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Liters."""
return round(_volume * 0.11829419766960430591, _round)
def usgill2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Metric Cups."""
return round(_volume * 0.47318, _round)
def usgill2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Metric Tablespoons."""
return round(_volume * 7.8863, _round)
def usgill2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> Metric Teaspoons."""
return round(_volume * 23.659, _round)
def usgill2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Fluid Ounces."""
return round(_volume * 4.1634, _round)
def usgill2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Gallons."""
return round(_volume * 0.026021, _round)
def usgill2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Gills."""
return round(_volume * 0.83267, _round)
def usgill2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Pints."""
return round(_volume * 0.20817, _round)
def usgill2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Quarts."""
return round(_volume * 0.10408, _round)
def usgill2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Tablespoons."""
return round(_volume * 8.3267, _round)
def usgill2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> UK Teaspoons."""
return round(_volume * 33.307, _round)
def usgill2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Cups."""
return round(_volume * 0.50000, _round)
def usgill2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Dry Gallons."""
return round(_volume * 0.026855, _round)
def usgill2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Dry Pints."""
return round(_volume * 0.21484, _round)
def usgill2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Dry Quarts."""
return round(_volume * 0.10742, _round)
def usgill2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Fluid Ounces."""
return round(_volume * 4.0000, _round)
def usgill2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Liquid Gallons."""
return round(_volume * 0.031250, _round)
def usgill2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Liquid Pints."""
return round(_volume * 0.25000, _round)
def usgill2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Liquid Quarts."""
return round(_volume * 0.12500, _round)
def usgill2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Tablespoons."""
return round(_volume * 8.0000, _round)
def usgill2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Gills -> US Teaspoons."""
return round(_volume * 24.000, _round)
# US LIQUID GALLONS #
def usliquidgallon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Canadian Cups."""
return round(_volume * 16.653, _round)
def usliquidgallon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Cubic Inches."""
return round(_volume * 231.00, _round)
def usliquidgallon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Cubic Meters."""
return round(_volume * 0.00378544119317106409, _round)
def usliquidgallon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Liters."""
return round(_volume * 3.78544119317106408752, _round)
def usliquidgallon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Metric Cups."""
return round(_volume * 15.142, _round)
def usliquidgallon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Metric Tablespoons."""
return round(_volume * 252.36, _round)
def usliquidgallon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> Metric Teaspoons."""
return round(_volume * 757.08, _round)
def usliquidgallon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Fluid Ounces."""
return round(_volume * 133.23, _round)
def usliquidgallon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Gallons."""
return round(_volume * 0.83267, _round)
def usliquidgallon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Gills."""
return round(_volume * 26.646, _round)
def usliquidgallon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Pints."""
return round(_volume * 6.6614, _round)
def usliquidgallon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Quarts."""
return round(_volume * 3.3307, _round)
def usliquidgallon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Tablespoons."""
return round(_volume * 266.46, _round)
def usliquidgallon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> UK Teaspoons."""
return round(_volume * 1065.8, _round)
def usliquidgallon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Cups."""
return round(_volume * 16.000, _round)
def usliquidgallon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Dry Gallons."""
return round(_volume * 0.85937, _round)
def usliquidgallon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Dry Pints."""
return round(_volume * 6.8749, _round)
def usliquidgallon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Dry Quarts."""
return round(_volume * 3.4375, _round)
def usliquidgallon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Fluid Ounces."""
return round(_volume * 128.00, _round)
def usliquidgallon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Gills."""
return round(_volume * 32.000, _round)
def usliquidgallon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Liquid Pints."""
return round(_volume * 8.0000, _round)
def usliquidgallon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Liquid Quarts."""
return round(_volume * 4.0000, _round)
def usliquidgallon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Tablespoons."""
return round(_volume * 256.00, _round)
def usliquidgallon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Gallons -> US Teaspoons."""
return round(_volume * 768.00, _round)
# US LIQUID PINTS #
def usliquidpint2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Canadian Cups."""
return round(_volume * 2.0817, _round)
def usliquidpint2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Cubic Inches."""
return round(_volume * 28.875, _round)
def usliquidpint2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Cubic Meters."""
return round(_volume * 0.0004731711933377496, _round)
def usliquidpint2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Liters."""
return round(_volume * 0.4731711933377495978, _round)
def usliquidpint2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Metric Cups."""
return round(_volume * 1.8927, _round)
def usliquidpint2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Metric Tablespoons."""
return round(_volume * 31.545, _round)
def usliquidpint2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> Metric Teaspoons."""
return round(_volume * 94.635, _round)
def usliquidpint2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Fluid Ounces."""
return round(_volume * 16.653, _round)
def usliquidpint2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Gallons."""
return round(_volume * 0.10408, _round)
def usliquidpint2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Gills."""
return round(_volume * 3.3307, _round)
def usliquidpint2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Pints."""
return round(_volume * 0.83267, _round)
def usliquidpint2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Quarts."""
return round(_volume * 0.41634, _round)
def usliquidpint2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Tablespoons."""
return round(_volume * 33.307, _round)
def usliquidpint2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> UK Teaspoons."""
return round(_volume * 133.23, _round)
def usliquidpint2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Cups."""
return round(_volume * 2.0000, _round)
def usliquidpint2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Dry Gallons."""
return round(_volume * 0.10742, _round)
def usliquidpint2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Dry Pints."""
return round(_volume * 0.85937, _round)
def usliquidpint2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Dry Quarts."""
return round(_volume * 0.42968, _round)
def usliquidpint2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Fluid Ounces."""
return round(_volume * 16.000, _round)
def usliquidpint2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Gills."""
return round(_volume * 4.0000, _round)
def usliquidpint2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Liquid Gallons."""
return round(_volume * 0.12500, _round)
def usliquidpint2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Liquid Quarts."""
return round(_volume * 0.50000, _round)
def usliquidpint2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Tablespoons."""
return round(_volume * 32.000, _round)
def usliquidpint2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Pints -> US Teaspoons."""
return round(_volume * 96.000, _round)
# US LIQUID QUARTS #
def usliquidquart2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Canadian Cups."""
return round(_volume * 4.1634, _round)
def usliquidquart2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Cubic Inches."""
return round(_volume * 57.750, _round)
def usliquidquart2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Cubic Meters."""
return round(_volume * 0.0009463423866754992, _round)
def usliquidquart2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Liters."""
return round(_volume * 0.94634238667549919561, _round)
def usliquidquart2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Metric Cups."""
return round(_volume * 3.7854, _round)
def usliquidquart2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Metric Tablespoons."""
return round(_volume * 63.090, _round)
def usliquidquart2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> Metric Teaspoons."""
return round(_volume * 189.27, _round)
def usliquidquart2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Fluid Ounces."""
return round(_volume * 33.307, _round)
def usliquidquart2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Gallons."""
return round(_volume * 0.20817, _round)
def usliquidquart2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Gills."""
return round(_volume * 6.6614, _round)
def usliquidquart2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Pints."""
return round(_volume * 1.6653, _round)
def usliquidquart2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Quarts."""
return round(_volume * 0.83267, _round)
def usliquidquart2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Tablespoons."""
return round(_volume * 66.614, _round)
def usliquidquart2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> UK Teaspoons."""
return round(_volume * 266.46, _round)
def usliquidquart2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Cups."""
return round(_volume * 4.0000, _round)
def usliquidquart2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Dry Gallons."""
return round(_volume * 0.21484, _round)
def usliquidquart2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Dry Pints."""
return round(_volume * 1.7187, _round)
def usliquidquart2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Dry Quarts."""
return round(_volume * 0.85937, _round)
def usliquidquart2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Fluid Ounces."""
return round(_volume * 32.000, _round)
def usliquidquart2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Gills."""
return round(_volume * 8.0000, _round)
def usliquidquart2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Liquid Gallons."""
return round(_volume * 0.25000, _round)
def usliquidquart2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Liquid Pints."""
return round(_volume * 2.0000, _round)
def usliquidquart2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Tablespoons."""
return round(_volume * 64.000, _round)
def usliquidquart2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Liquid Quarts -> US Teaspoons."""
return round(_volume * 192.00, _round)
# US TABLESPOONS #
def ustablespoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Canadian Cups."""
return round(_volume * 0.065053, _round)
def ustablespoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Cubic Inches."""
return round(_volume * 0.90234, _round)
def ustablespoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Cubic Meters."""
return round(_volume * 0.00001478677470870054, _round)
def ustablespoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Liters."""
return round(_volume * 0.01478677470870053824, _round)
def ustablespoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Metric Cups."""
return round(_volume * 0.059147, _round)
def ustablespoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Metric Tablespoons."""
return round(_volume * 0.98578, _round)
def ustablespoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> Metric Teaspoons."""
return round(_volume * 2.9574, _round)
def ustablespoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Fluid Ounces."""
return round(_volume * 0.52042, _round)
def ustablespoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Gallons."""
return round(_volume * 0.0032526, _round)
def ustablespoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Gills."""
return round(_volume * 0.10408, _round)
def ustablespoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Pints."""
return round(_volume * 0.026021, _round)
def ustablespoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Quarts."""
return round(_volume * 0.013011, _round)
def ustablespoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Tablespoons."""
return round(_volume * 1.0408, _round)
def ustablespoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> UK Teaspoons."""
return round(_volume * 4.1634, _round)
def ustablespoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Cups."""
return round(_volume * 0.062500, _round)
def ustablespoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Dry Gallons."""
return round(_volume * 0.0033569, _round)
def ustablespoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Dry Pints."""
return round(_volume * 0.026855, _round)
def ustablespoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Dry Quarts."""
return round(_volume * 0.013428, _round)
def ustablespoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Fluid Ounces."""
return round(_volume * 0.50000, _round)
def ustablespoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Gills."""
return round(_volume * 0.12500, _round)
def ustablespoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Liquid Gallons."""
return round(_volume * 0.0039062, _round)
def ustablespoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Liquid Pints."""
return round(_volume * 0.031250, _round)
def ustablespoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Liquid Quarts."""
return round(_volume * 0.015625, _round)
def ustablespoon2usteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Tablespoons -> US Teaspoons."""
return round(_volume * 3.0000, _round)
# US TEASPOONS #
def usteaspoon2cancup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Canadian Cups."""
return round(_volume * 0.021684, _round)
def usteaspoon2cubicinch(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Cubic Inches."""
return round(_volume * 0.30078, _round)
def usteaspoon2cubicmeter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Cubic Meters."""
return round(_volume * 0.00000492902208201893, _round)
def usteaspoon2liter(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Liters."""
return round(_volume * 0.00492902208201892744, _round)
def usteaspoon2metriccup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Metric Cups."""
return round(_volume * 0.019716, _round)
def usteaspoon2metrictablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Metric Tablespoons."""
return round(_volume * 0.32859, _round)
def usteaspoon2metricteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> Metric Teaspoons."""
return round(_volume * 0.98578, _round)
def usteaspoon2ukfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Fluid Ounces."""
return round(_volume * 0.17347, _round)
def usteaspoon2ukgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Gallons."""
return round(_volume * 0.0010842, _round)
def usteaspoon2ukgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Gills."""
return round(_volume * 0.034695, _round)
def usteaspoon2ukpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Pints."""
return round(_volume * 0.0086737, _round)
def usteaspoon2ukquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Quarts."""
return round(_volume * 0.0043368, _round)
def usteaspoon2uktablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Tablespoons."""
return round(_volume * 0.34695, _round)
def usteaspoon2ukteaspoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> UK Teaspoons."""
return round(_volume * 1.3878, _round)
def usteaspoon2uscup(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Cups."""
return round(_volume * 0.020833, _round)
def usteaspoon2usdrygallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Dry Gallons."""
return round(_volume * 0.0011190, _round)
def usteaspoon2usdrypint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Dry Pints."""
return round(_volume * 0.0089517, _round)
def usteaspoon2usdryquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Dry Quarts."""
return round(_volume * 0.0044759, _round)
def usteaspoon2usfluidounce(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Fluid Ounces."""
return round(_volume * 0.16667, _round)
def usteaspoon2usgill(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Gills."""
return round(_volume * 0.041667, _round)
def usteaspoon2usliquidgallon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Liquid Gallons."""
return round(_volume * 0.0013021, _round)
def usteaspoon2usliquidpint(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Liquid Pints."""
return round(_volume * 0.010417, _round)
def usteaspoon2usliquidquart(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Liquid Quarts."""
return round(_volume * 0.0052083, _round)
def usteaspoon2ustablespoon(_volume: Union[float, int], _round: int = 3) -> float:
"""US Teaspoons -> US Tablespoons."""
return round(_volume * 0.33333, _round)
| lgpl-3.0 | 8,123,197,804,647,962,000 | 31.317719 | 150 | 0.65307 | false | 2.778228 | false | false | false |
slickqa/slickqaweb | slickqaweb/api/files.py | 1 | 7168 | __author__ = 'jcorbett'
from slickqaweb.app import app
from flask import request, Response
from bson import ObjectId
from slickqaweb.model.storedFile import StoredFile
from slickqaweb.model.fileChunk import FileChunk
from slickqaweb.model.serialize import deserialize_that
from .standardResponses import JsonResponse, read_request
from hashlib import md5
import re
import logging
from .apidocs import add_resource, accepts, returns, argument_doc, note
from mongoengine import ListField, EmbeddedDocumentField, ReferenceField, BinaryField
add_resource('/files', 'Upload, or Download files on slick.')
@app.route("/api/files/<file_id>")
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@returns(StoredFile)
def get_stored_file(file_id):
"""Get the "stored file" or the summary about the file."""
return JsonResponse(StoredFile.objects(id=ObjectId(file_id)).first())
@app.route("/api/files/<file_id>", methods=["PUT"])
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@accepts(StoredFile)
@returns(StoredFile)
def update_stored_file(file_id):
"""Update the properties of a stored file, you only have to include changed properties"""
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
stored_file = deserialize_that(read_request(), stored_file)
stored_file.save()
return JsonResponse(stored_file)
@app.route("/api/files", methods=["POST"])
@accepts(StoredFile)
@returns(StoredFile)
@note("The chunkSize will be set by the server for you, even if you provide it. Make sure you supply a valid mimetype.")
def create_stored_file():
"""Create a new StoredFile object to store file content for."""
new_stored_file = deserialize_that(read_request(), StoredFile())
new_stored_file.chunkSize = 262144
new_stored_file.save()
return JsonResponse(new_stored_file)
@app.route("/api/files/<file_id>/content", methods=["POST"])
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@accepts(BinaryField(help_text="binary data of file"))
@returns(StoredFile)
@note("Use is not recommended unless your file is really small. Instead add individual chunks to the file.")
def set_file_content(file_id):
"""Upload all the content at once (for small files)."""
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
data = request.data
stored_file.md5 = md5(data).hexdigest()
stored_file.length = len(data)
num_of_chunks = len(data) / 262144
if (len(data) % 262144) > 0:
num_of_chunks += 1
for i in range(num_of_chunks):
chunk = FileChunk()
chunk.files_id = stored_file.id
chunk.n = i
chunk.data = data[i * 262144:(i + 1) * 262144]
chunk.save()
stored_file.save()
return JsonResponse(stored_file)
@app.route("/api/files/<file_id>/addchunk", methods=["POST"])
@app.route("/api/results/<result_id>/files/<file_id>/addchunk", methods=["POST"])
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@accepts(BinaryField(help_text="binary data of the chunk."))
@returns(StoredFile)
def add_chunk_to_file(file_id, result_id=None):
"""Add content to a file (chunk by chunk)."""
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
num_of_chunks = len(FileChunk.objects(files_id=stored_file.id))
chunk = FileChunk()
chunk.files_id = stored_file.id
chunk.n = num_of_chunks
chunk.data = request.data
chunk.save()
stored_file.length += len(request.data)
stored_file.save()
return JsonResponse(stored_file)
#@app.route("/api/files/<file_id>/content/<filename>", methods=["HEAD"])
#def get_header_for_file(file_id, filename):
# logger = logging.getLogger('slickqaweb.api.files.get_header_for_file')
# stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
# if stored_file is None:
# return Response("File with id '{}' and name '{}' not found.".format(file_id, filename), mimetype="text/plain", status=404)
# logger.debug("Returning header information for file with id {} and name {}".format(file_id, filename))
@app.route("/api/files/<file_id>/content/<filename>")
@argument_doc('file_id', 'The id (string representation of the ObjectID) of the stored file.')
@argument_doc('filename', 'The filename of the stored file. This is actually ignored, but makes for nice looking URLs.')
@returns(BinaryField(help_text="The file content."))
@note("This sets the http header to the mimetype from the stored file, and streams the file to the requester.")
def get_file_content(file_id, filename):
"""Get the content of a file."""
logger = logging.getLogger('slickqaweb.api.files.get_file_content')
stored_file = StoredFile.objects(id=ObjectId(file_id)).first()
if stored_file is None:
return Response("File with id '{}' and name '{}' not found.".format(file_id, filename), mimetype="text/plain", status=404)
range_header = request.headers.get('Range', None)
response = None
if not range_header:
logger.info("Returning file in classic mode")
def write_chunks():
for chunk in FileChunk.objects(files_id=stored_file.id).order_by('+n'):
yield chunk.data
response = Response(write_chunks(), mimetype=stored_file.mimetype, direct_passthrough=True)
else:
logger.debug("Returning file with id {} and filename {} and md5sum {} in ranged mode.".format(file_id, filename, stored_file.md5))
byte1, byte2 = 0, (stored_file.length - 1)
m = re.search('(\d+)-(\d*)', range_header)
g = m.groups()
if g[0]:
byte1 = int(g[0])
if g[1]:
possible_byte2 = int(g[1])
if possible_byte2 < byte2:
byte2 = possible_byte2
data = []
start_chunk_number = byte1 / stored_file.chunkSize
end_chunk_number = byte2 / stored_file.chunkSize
if byte2 % stored_file.chunkSize > 0:
end_chunk_number += 1
start_index = byte1 % stored_file.chunkSize
end_index = byte2 % stored_file.chunkSize
logger.debug("Using range information {}-{}/{}, chunks {}:{}-{}:{}".format(byte1, byte2, stored_file.length - 1, start_chunk_number, start_index, end_chunk_number, end_index))
def write_chunks():
for chunk in FileChunk.objects(files_id=stored_file.id).order_by('+n'):
if chunk.n >= start_chunk_number and chunk.n <= end_chunk_number:
start = 0
end = stored_file.chunkSize
if chunk.n == start_chunk_number:
start = start_index
if chunk.n == end_chunk_number:
end = end_index
yield chunk.data[start:end]
response = Response(write_chunks(), 206, mimetype=stored_file.mimetype, direct_passthrough=True)
response.headers.add('Content-Range', 'bytes {0}-{1}/{2}'.format(byte1, byte2, stored_file.length))
response.headers.add('Accept-Ranges', 'bytes')
return response
| apache-2.0 | -2,193,309,757,777,340,400 | 43.8 | 183 | 0.667132 | false | 3.546759 | false | false | false |
r3kall/AnimeRecommenderSystem | animerecommendersystem/recommender_systems/CollaborativeFilteringRS.py | 1 | 8293 | """
This files offers a recommendation system based on collaborative filtering technique.
1) Let U be the user we want to give recommendations to, for each user U2 != U we need to compute distance(U, U2) (*)
and get the top K neighbors. These neighbors should have watched a lot of animes also watched by U,
giving to them similar rates.
2) Once we have these K neighbors, we compute an aggregate rate to the animes they watched
by using the rates given by them (excluding the ones already watched by U, obviously).
In other words, we try to return as recommendations the animes watched by most of the neighbors
and with an high rate by (almost) all of them.
(*)HOW DO WE COMPUTE THE DISTANCE BETWEEN U AND U2?
Idea: cosine similarity
In particolar, for each anime watched by both users, we should compute the product of rates.
"""
import math
from collections import defaultdict
from animerecommendersystem.utils import definitions
STD_NUM_NEIGHBORS = 5
STD_NUM_RECOMM = 10
AVG_NEAREST_DISTANCE = 0.50
RELAX_RATIO = 1.1
# Constants for vote prediction
MAX_PREDICT_RATE = 10.
MIN_PREDICT_RATE = 3.
class CollaborativeFilteringRS:
def __init__(self, users_anime_lists, num_neighbors=STD_NUM_NEIGHBORS,
num_recommendations=STD_NUM_RECOMM, approx=True):
self.users_anime_lists = users_anime_lists
self.num_neighbors = num_neighbors
self.num_recommendations = num_recommendations
self.approx = approx
def compute_distance(self, username1, username2):
# Take the list of animes for each user
user1_animes = self.users_anime_lists[username1]
user2_animes = self.users_anime_lists[username2]
distance_numerator = 0
square_sum_1 = 0
square_sum_2 = 0
# Create a set that contains animes watched by at least one of the user.
total_set_animes = set(user1_animes['list'].keys())
total_set_animes |= set(user2_animes['list'].keys())
for anime in total_set_animes:
watched1 = False
watched2 = False
user1_rate = 0
user2_rate = 0
if anime in user1_animes['list'].keys():
watched1 = True
user1_rate = user1_animes['list'][anime]['rate']
if user1_rate == 0:
user1_rate = self.estimate_rate(user1_animes, anime)
square_sum_1 += user1_rate * user1_rate
if anime in user2_animes['list'].keys():
watched2 = True
user2_rate = user2_animes['list'][anime]['rate']
if user2_rate == 0:
user2_rate = self.estimate_rate(user2_animes, anime)
square_sum_2 += user2_rate * user2_rate
# If both users' lists contain this anime, then we need to increase the similarity
if watched1 and watched2:
distance_numerator += user1_rate * user2_rate
# At the end, use the values collected so far to compute the distance between users.
distance_denominator = math.sqrt(square_sum_1) * math.sqrt(square_sum_2)
similarity = distance_numerator / distance_denominator
distance = 1 - similarity
return distance
@staticmethod
def estimate_rate(user_animes, anime):
neighbor_rate = user_animes['mean_rate']
if neighbor_rate == 0:
anime_state = user_animes['list'][anime]['curr_state']
if anime_state == definitions.COMPLETED:
neighbor_rate = definitions.COMPLETED_RATE
elif anime_state == definitions.WATCHING:
neighbor_rate = definitions.WATCHING_RATE
elif anime_state == definitions.DROPPED:
neighbor_rate = definitions.DROPPED_RATE
elif anime_state == definitions.PLANNED:
neighbor_rate = definitions.PLANNED_RATE
elif anime_state == definitions.ON_HOLD:
neighbor_rate = definitions.ON_HOLD_RATE
return neighbor_rate
def get_neighbors(self, user):
if self.approx is True:
return self.get_approx_neighbors(user)
else:
return self.get_exact_neighbors(user)
def get_approx_neighbors(self, user):
"""
Basic idea: compute distance between 'username''s list and all other users, and pick the nearest ones.
=> PROBLEM: TOO SLOW.
=> SOLUTION: no need to pick the nearest one, but some near users will be still ok.
"""
neighbors = defaultdict(float)
how_many_good = 0
for user2 in self.users_anime_lists.keys():
if user2 == user or self.users_anime_lists[user2].get('list') is None:
continue
distance = self.compute_distance(user, user2)
neighbors[user2] = distance
# If this user is close enough to our target, then we take him as a neighbor
if distance <= AVG_NEAREST_DISTANCE * RELAX_RATIO:
how_many_good += 1
if how_many_good == self.num_neighbors:
break
# Sort neighbors according to distance, and return them
sorted_neighbors = sorted(neighbors, key=neighbors.get, reverse=False)
# return a dict, so we have also the similarity as info
res = dict()
for neighbor in sorted_neighbors[0:self.num_neighbors]:
# similarity
res[neighbor] = 1 - neighbors[neighbor]
return res
def get_exact_neighbors(self, user):
distances_dict = defaultdict(float)
for user2 in self.users_anime_lists.keys():
if user2 == user or self.users_anime_lists[user2].get('list') is None:
continue
distance = self.compute_distance(user, user2)
distances_dict[user2] = distance
# Once we have all distances, sort the dict by value and return a list containing
# the usernames of the nearest ones.
sorted_neighbors = sorted(distances_dict, key=distances_dict.get, reverse=False)
return sorted_neighbors[0:self.num_neighbors]
def get_recommendations(self, user):
neighbors_dict = self.get_neighbors(user)
predictions_rates_dict = defaultdict(float)
predictions_rates_num_dict = dict()
predictions_rates_den_dict = dict()
user_animes = self.users_anime_lists[user]
for neighbor in neighbors_dict.keys():
neighbor_animes = self.users_anime_lists[neighbor]
for anime in neighbor_animes['list'].keys():
if anime not in user_animes['list'].keys():
neighbor_rate = neighbor_animes['list'][anime]['rate']
if neighbor_rate > 0:
predictions_rates_num_dict[anime] = predictions_rates_num_dict.get(anime, 0) + \
neighbors_dict[neighbor] * \
(neighbor_rate - self.users_anime_lists[neighbor]['mean_rate'])
predictions_rates_den_dict[anime] = predictions_rates_den_dict.get(anime, 0) + neighbors_dict[
neighbor]
for anime in predictions_rates_num_dict.keys():
if predictions_rates_den_dict[anime] == 0:
predictions_rates_dict[anime] = self.users_anime_lists[user]['mean_rate']
else:
predictions_rates_dict[anime] = self.users_anime_lists[user]['mean_rate'] + \
(float(predictions_rates_num_dict[anime]) / float(
predictions_rates_den_dict[anime]))
if predictions_rates_dict[anime] < MIN_PREDICT_RATE:
predictions_rates_dict[anime] = MIN_PREDICT_RATE
elif predictions_rates_dict[anime] > MAX_PREDICT_RATE:
predictions_rates_dict[anime] = MAX_PREDICT_RATE
sorted_animes = sorted(predictions_rates_dict, key=predictions_rates_dict.get, reverse=True)
results = dict()
for anime in sorted_animes[0:self.num_recommendations]:
results[anime] = predictions_rates_dict[anime]
return results
| gpl-3.0 | -702,501,294,063,169,000 | 42.647368 | 123 | 0.606415 | false | 3.987019 | false | false | false |
icereval/scrapi | scrapi/harvesters/stepic.py | 1 | 2795 | """
Stepic.org harvester of MOOC-online courses for SHARE Notification Service
Example API query: https://stepic.org:443/api/lessons/100
"""
from __future__ import unicode_literals
import json
import pycountry
from dateutil.parser import parse
from scrapi import requests
from scrapi.base import JSONHarvester
from scrapi.linter.document import RawDocument
def process_owner(owners_id):
resp = requests.get("https://stepic.org/api/users/" + str(owners_id)).json()
try:
person = resp[u'users'][0]
except KeyError:
person = {u'first_name': '', u'last_name': ''}
owner = {
'name': " ".join([person[u'first_name'], person[u'last_name']]),
'givenName': person[u'first_name'],
'additionalName': '',
'familyName': person[u'last_name'],
'email': '',
'sameAs': [],
}
return [owner]
class StepicHarvester(JSONHarvester):
short_name = 'stepic'
long_name = 'Stepic.org Online Education Platform'
url = 'http://www.stepic.org'
count = 0
URL = 'https://stepic.org/api/lessons'
@property
def schema(self):
return {
'contributors': ('/owner', process_owner),
'uris': {
'canonicalUri': ('/id', lambda x: self.url + '/' + str(x))
},
'title': '/title',
'providerUpdatedDateTime': ('/update_date', lambda x: parse(x).isoformat()),
'description': '/title',
'languages': ('/language', lambda x: [pycountry.languages.get(alpha2=x).terminology])
}
def harvest(self, start_date=None, end_date=None):
# TODO - stepic has no means of querying by date, we should add handling for the
# start and end date once it does.
search_url = self.URL
records = self.get_records(search_url)
record_list = []
for record in records:
doc_id = record['id']
record_list.append(
RawDocument(
{
'doc': json.dumps(record),
'source': self.short_name,
'docID': ('stepic_doc' + str(doc_id)).decode('utf-8'),
'filetype': 'json'
}
)
)
return record_list
def get_records(self, search_url):
all_lessons = []
resp = requests.get(self.URL + '?page=last').json()
last_lesson_id = resp['lessons'][-1]['id']
for pk in range(last_lesson_id + 1):
lesson = requests.get(search_url + "/" + str(pk), expected=[200, 403, 404])
if lesson.status_code == 200:
lesson_list = lesson.json()['lessons'][0]
all_lessons.append(lesson_list)
return all_lessons
| apache-2.0 | -3,222,189,658,091,499,500 | 31.126437 | 97 | 0.546333 | false | 3.77193 | false | false | false |
Onager/plaso | plaso/analysis/interface.py | 1 | 2822 | # -*- coding: utf-8 -*-
"""This file contains the interface for analysis plugins."""
import abc
import calendar
import collections
import time
from plaso.analysis import definitions as analysis_definitions
from plaso.analysis import logger
from plaso.containers import events
from plaso.containers import reports
from plaso.lib import definitions
class AnalysisPlugin(object):
"""Class that defines the analysis plugin interface."""
# The name of the plugin. This is the name that is matched against when
# loading plugins, so it is important that this name is short, concise and
# explains the nature of the plugin easily. It also needs to be unique.
NAME = 'analysis_plugin'
def __init__(self):
"""Initializes an analysis plugin."""
super(AnalysisPlugin, self).__init__()
self._analysis_counter = collections.Counter()
self.plugin_type = analysis_definitions.PLUGIN_TYPE_REPORT
@property
def plugin_name(self):
"""str: name of the plugin."""
return self.NAME
def _CreateEventTag(self, event, labels):
"""Creates an event tag.
Args:
event (EventObject): event to tag.
labels (list[str]): event tag labels.
Returns:
EventTag: the event tag.
"""
event_identifier = event.GetIdentifier()
event_tag = events.EventTag()
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabels(labels)
event_identifier_string = event_identifier.CopyToString()
logger.debug('Tagged event: {0:s} with labels: {1:s}'.format(
event_identifier_string, ', '.join(labels)))
return event_tag
# pylint: disable=unused-argument
def CompileReport(self, mediator):
"""Compiles a report of the analysis.
After the plugin has received every copy of an event to analyze this
function will be called so that the report can be assembled.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: report.
"""
analysis_report = reports.AnalysisReport(plugin_name=self.NAME)
time_elements = time.gmtime()
time_compiled = calendar.timegm(time_elements)
analysis_report.time_compiled = (
time_compiled * definitions.MICROSECONDS_PER_SECOND)
analysis_report.analysis_counter = self._analysis_counter
return analysis_report
@abc.abstractmethod
def ExamineEvent(self, mediator, event, event_data, event_data_stream):
"""Analyzes an event.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
"""
| apache-2.0 | -2,152,688,887,689,937,000 | 29.673913 | 76 | 0.706591 | false | 4.13783 | false | false | false |
karras/gitlab-docsbot | setup.py | 1 | 2639 | # -*- coding: UTF-8 -*-
"""Setuptools package definition"""
from setuptools import setup
from setuptools import find_packages
import codecs
import os
import sys
version = sys.version_info[0]
if version > 2:
pass
else:
pass
__version__ = None
version_file = "autodocs/version.py"
with codecs.open(version_file, encoding="UTF-8") as f:
code = compile(f.read(), version_file, 'exec')
exec(code)
def find_data(packages, extensions):
"""Finds data files along with source.
:param packages: Look in these packages
:param extensions: Look for these extensions
"""
data = {}
for package in packages:
package_path = package.replace('.', '/')
for dirpath, _, filenames in os.walk(package_path):
for filename in filenames:
for extension in extensions:
if filename.endswith(".%s" % extension):
file_path = os.path.join(
dirpath,
filename
)
file_path = file_path[len(package) + 1:]
if package not in data:
data[package] = []
data[package].append(file_path)
return data
with codecs.open('README.md', 'r', encoding="UTF-8") as f:
README_TEXT = f.read()
setup(
name = "gitlab-autodocs",
version = __version__,
packages = find_packages(),
package_data=find_data(
find_packages(), ["py"]
),
data_files = [
('/etc', ['autodocs/config/gitlab-autodocs.yaml']),
('/lib/systemd/system', ['autodocs/config/gitlab-autodocs.service'])
],
entry_points = {
'console_scripts': [
'gitlab-autodocs = autodocs:main',
]
},
install_requires = [
"requests",
"pyyaml",
"python-gitlab"
],
author = "Adfinis SyGroup AG",
author_email = "https://adfinis-sygroup.ch/",
description = "GitLab CI Docs Bot",
long_description = README_TEXT,
keywords = "GitLab CI autodocs bot",
url = "https://github.com/karras/gitlab-docsbot",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Topic :: Software Development :: Build Tools"
]
)
| gpl-3.0 | 5,100,969,825,408,567,000 | 28 | 76 | 0.560818 | false | 4.072531 | false | false | false |
KarrLab/kinetic_datanator | datanator/data_source/metabolites_meta_collection.py | 1 | 14782 | from datanator_query_python.query import query_sabiork, query_xmdb
from datanator.util import chem_util
from datanator.util import file_util
from datanator.util import index_collection
import datanator.config.core
import pymongo
import re
from pymongo.collation import Collation, CollationStrength
class MetabolitesMeta(query_sabiork.QuerySabio):
''' meta_loc: database location to save the meta collection
'''
def __init__(self, cache_dirname=None, MongoDB=None, replicaSet=None, db=None,
verbose=False, max_entries=float('inf'), username = None,
password = None, authSource = 'admin', meta_loc = None):
self.cache_dirname = cache_dirname
self.verbose = verbose
self.MongoDB = MongoDB
self.replicaSet = replicaSet
self.max_entries = max_entries
self.username = username
self.password = password
self.authSource = authSource
self.meta_loc = meta_loc
super(MetabolitesMeta, self).__init__(cache_dirname=cache_dirname, MongoDB=MongoDB, replicaSet=replicaSet,
db=db, verbose=verbose, max_entries=max_entries, username = username,
password = password, authSource = authSource)
self.frequency = 50
self.chem_manager = chem_util.ChemUtil()
self.file_manager = file_util.FileUtil()
self.ymdb_query = query_xmdb.QueryXmdb(username=username, password=password, server=MongoDB, authSource=authSource,
database=db, collection_str='ymdb', readPreference='nearest')
self.ecmdb_query = query_xmdb.QueryXmdb(username=username, password=password, server=MongoDB, authSource=authSource,
database=db, collection_str='ecmdb', readPreference='nearest')
self.collation = Collation('en', strength=CollationStrength.SECONDARY)
self.client, self.db, self.collection = self.con_db('metabolites_meta')
def load_content(self):
collection_name = 'metabolites_meta'
ecmdb_fields = ['m2m_id', 'inchi', 'synonyms.synonym']
self.fill_metabolite_fields(
fields=ecmdb_fields, collection_src='ecmdb', collection_des = collection_name)
ymdb_fields = ['ymdb_id', 'inchi', 'synonyms.synonym']
self.fill_metabolite_fields(
fields=ymdb_fields, collection_src='ymdb', collection_des = collection_name)
_, _, collection = self.con_db(collection_name)
k = 0
for doc in self.collection.find(filter={}, projection={'inchi':1}):
if k > self.max_entries:
break
kinlaw_id = self.get_kinlawid_by_inchi([doc['inchi']])
rxn_participants = self.find_reaction_participants(kinlaw_id)
collection.update_one({'inchi': doc['inchi']},
{'$set': {'kinlaw_id': kinlaw_id,
'reaction_participants': rxn_participants}},
upsert=False)
k += 1
# i = 0
# cursor = collection.find(filter = {}, projection = {'similar_compounds_corrected':1, 'similar_compounds': 1})
# for doc in cursor:
# if i % self.frequency == 0:
# print(i)
# replacement = []
# for corrected in doc['similar_compounds_corrected']:
# for k, v in corrected.items():
# dic = {}
# dic[k] = v
# replacement.append(dic)
# collection.update_one({'_id': doc['_id']},
# {'$set': {'similar_compounds': replacement}},
# upsert=False)
# i += 1
def replace_key_in_similar_compounds(self):
query = {}
projection = {'similar_compounds': 1}
_, _, col = self.con_db('metabolites_meta')
docs = col.find(filter=query, projection=projection)
for doc in docs:
result = []
_list = doc['similar_compounds']
for dic in _list:
old_key = list(dic.keys())[0]
try:
new_key = col.find_one(filter={'inchi': old_key},
projection={'InChI_Key':1})['InChI_Key']
result.append( {new_key: dic[old_key]})
except TypeError:
result.append( {'NoStructure': -1} )
col.update_one({'_id': doc['_id']},
{'$set': {'similar_compounds': result} })
def fill_metabolite_fields(self, fields=None, collection_src=None, collection_des = None):
'''Fill in values of fields of interest from
metabolite collection: ecmdb or ymdb
Args:
fileds: list of fields of interest
collection_src: collection in which query will be done
collection_des: collection in which result will be updated
'''
projection = {}
for field in fields:
projection[field] = 1
projection['_id'] = 0
_, _, col_src = self.con_db(collection_src)
_, _, col_des = self.con_db(collection_des)
cursor = col_src.find(filter={}, projection=projection)
i = 0
for doc in cursor:
if i == self.max_entries:
break
if i % self.frequency == 0:
print('Getting fields of interest from {} document in {}'.format(i, collection_src))
doc['InChI_Key'] = self.chem_manager.inchi_to_inchikey(doc['inchi'])
if isinstance(doc.get('synonyms'), list):
continue
try:
synonyms = doc.get('synonyms', None).get('synonym')
except AttributeError:
synonyms = doc.get('synonyms', None)
col_des.update_one({'inchi': doc['inchi']},
{ '$set': { fields[0]: doc[fields[0]],
fields[1]: doc[fields[1]],
'synonyms': synonyms,
'InChI_Key': doc['InChI_Key']}},
upsert=True)
i += 1
def fill_names(self):
"""Fill names of metabolites in 'name' field
"""
docs = self.collection.find({})
count = self.collection.count_documents({})
for i, doc in enumerate(docs):
name = ''
inchi_key = doc['InChI_Key']
if i == self.max_entries:
break
if i % 100 == 0 and self.verbose:
print('Adding name to document {} out of {}.'.format(i, count))
if doc.get('ymdb_id') is None:
name = self.ecmdb_query.get_name_by_inchikey(inchi_key)
else:
name = self.ymdb_query.get_name_by_inchikey(inchi_key)
self.collection.update_one({'_id': doc['_id']},
{'$set': {'name': name}}, upsert=False)
def fill_standard_id(self, skip=0):
"""Fill meta collection with chebi_id, pubmed_id,
and kegg_id.
Args:
skip (:obj:`int`): skip first n number of records.
"""
con_0 = {'chebi_id': {'$exists': False}}
con_1 = {'chebi_id': None}
query = {'$or': [con_0, con_1]}
docs = self.collection.find(query, skip=skip)
count = self.collection.count_documents(query)
for i, doc in enumerate(docs):
if i == self.max_entries:
break
if i % 100 == 0 and self.verbose:
print('Processing doc {} out of {}'.format(i+skip, count))
m2m_id = doc.get('m2m_id')
ymdb_id = doc.get('ymdb_id')
if ymdb_id == 'YMDB00890' or ymdb_id == 'YMDB00862':
continue
if ymdb_id is not None: # ymdb has richer data than ecmdb
doc_e = self.ymdb_query.get_standard_ids_by_id(ymdb_id)
if doc_e['synonyms']:
synonyms = doc_e['synonyms']['synonym']
else:
synonyms = None
self.collection.update_many({'ymdb_id': ymdb_id},
{'$set': {'chebi_id': doc_e['chebi_id'],
'hmdb_id': doc_e['hmdb_id'],
'kegg_id': doc_e['kegg_id'],
'description': doc_e['description'],
'chemical_formula': doc_e['chemical_formula'],
'average_molecular_weight': doc_e['average_molecular_weight'],
'cas_registry_number': doc_e['cas_registry_number'],
'smiles': doc_e['smiles'],
'cellular_locations': doc_e['cellular_locations'],
'pubchem_compound_id': doc_e['pubchem_compound_id'],
'chemspider_id': doc_e['chemspider_id'],
'biocyc_id': doc_e['biocyc_id'],
'pathways': doc_e['pathways'],
'property': doc_e['property'],
'name': doc_e['name'],
'synonyms': synonyms}}, upsert=False)
elif m2m_id is not None:
doc_y = self.ecmdb_query.get_standard_ids_by_id(m2m_id)
if doc_y['synonyms']:
synonyms = doc_y['synonyms']['synonym']
else:
synonyms = None
self.collection.update_many({'m2m_id': m2m_id},
{'$set': {'chebi_id': doc_y['chebi_id'],
'hmdb_id': doc_y['hmdb_id'],
'kegg_id': doc_y['kegg_id'],
'description': doc_y['description'],
'chemical_formula': doc_y['chemical_formula'],
'average_molecular_weight': doc_y['average_molecular_weight'],
'cas_registry_number': doc_y['cas_registry_number'],
'smiles': doc_y['smiles'],
'cellular_locations': doc_y['cellular_locations'],
'pubchem_compound_id': doc_y['pubchem_compound_id'],
'chemspider_id': doc_y['chemspider_id'],
'biocyc_id': doc_y['biocyc_id'],
'pathways': doc_y['pathways'],
'property': doc_y['property'],
'name': doc_y['name'],
'synonyms': synonyms}}, upsert=False)
else:
continue
def remove_dups(self, _key):
"""Remove entries with the same _key.
Args:
_key(:obj:`str`): Name of fields in which dups will be identified.
"""
num, docs = self.get_duplicates('metabolites_meta', _key)
return num, docs
def reset_cellular_locations(self, start=0):
"""Github (https://github.com/KarrLab/datanator_rest_api/issues/69)
"""
query = {'cellular_locations': {'$ne': None}}
count = self.collection.count_documents(query) - start
for i, doc in enumerate(self.collection.find(filter=query, skip=start,
projection={'m2m_id': 1, 'ymdb_id': 1,
'cellular_locations': 1})):
if i == self.max_entries:
break
if self.verbose and i % 100 == 0:
print('Processing doc {} out of {} ...'.format(i, count))
cell_locations = doc['cellular_locations']
obj = []
if doc.get('ymdb_id'):
for loc in cell_locations:
location = loc['cellular_location']['cellular_location']
obj.append({
'reference': ['YMDB'],
'cellular_location': location
})
else:
for loc in cell_locations:
location = loc['cellular_location']['cellular_location']
obj.append({
'reference': ['ECMDB'],
'cellular_location': location
})
self.collection.update_one({'_id': doc['_id']},
{'$set': {'cellular_locations': obj}},
upsert=False)
def main():
db = 'datanator'
meta_loc = 'datanator'
username = datanator.config.core.get_config()['datanator']['mongodb']['user']
password = datanator.config.core.get_config()['datanator']['mongodb']['password']
MongoDB = datanator.config.core.get_config()['datanator']['mongodb']['server']
manager = MetabolitesMeta(cache_dirname=None, MongoDB=MongoDB, db=db,
verbose=True, max_entries=float('inf'),
username = username, password = password, meta_loc = meta_loc)
# # manager.load_content()
# collection_name = 'metabolites_meta'
# manager.fill_metabolite_fields(fields=['m2m_id', 'inchi', 'synonyms.synonym'],
# collection_src='ecmdb', collection_des = collection_name)
# manager.fill_metabolite_fields(fields=['ymdb_id', 'inchi', 'synonyms.synonym'],
# collection_src='ymdb',
# collection_des = collection_name)
# manager.fill_names()
# manager.fill_standard_id(skip=0)
# num, _ = manager.remove_dups('InChI_Key')
# print(num)
manager.reset_cellular_locations()
if __name__ == '__main__':
main()
| mit | -7,996,290,644,230,414,000 | 48.273333 | 124 | 0.468069 | false | 4.283396 | true | false | false |
metomi/rose | metomi/rosie/graph.py | 1 | 9686 | # Copyright (C) British Crown (Met Office) & Contributors.
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Plot suite ancestry."""
import textwrap
import time
import pygraphviz
import metomi.rose.metadata_graph
import metomi.rose.opt_parse
import metomi.rose.reporter
import metomi.rosie.suite_id
import metomi.rosie.ws_client
import metomi.rosie.ws_client_cli
class NoConnectionsEvent(metomi.rose.reporter.Event):
"""An event raised if the graph has no edges or nodes.
event.args[0] is the filter id string.
"""
KIND = metomi.rose.reporter.Reporter.KIND_ERR
def __str__(self):
return "%s: no copy relationships to other suites" % self.args[0]
class PrintSuiteDetails(metomi.rose.reporter.Event):
"""An event to print out suite details when writing to CLI"""
KIND = metomi.rose.reporter.Reporter.KIND_OUT
def __str__(self):
template = " %s"
argslist = [self.args[0]]
if len(self.args) > 1:
for arg in self.args[1]:
template += ", %s"
argslist.append(arg)
return template % tuple(argslist)
def get_suite_data(prefix, properties=None):
"""Retrieve a dictionary containing the contents of RosieWS
Adds in any extra requested properties
"""
if properties is None:
properties = []
ws_client = metomi.rosie.ws_client.RosieWSClient(
prefixes=[prefix],
event_handler=metomi.rose.reporter.Reporter()
)
suite_data = ws_client.search(prefix, all_revs=1)[0][0]
for dict_row in sorted(suite_data, key=lambda _: _["revision"]):
suite_id = metomi.rosie.suite_id.SuiteId.from_idx_branch_revision(
dict_row["idx"],
dict_row["branch"],
dict_row["revision"]
)
dict_row["suite"] = suite_id.to_string_with_version()
if "local" in properties:
dict_row["local"] = suite_id.get_status()
if "date" in properties:
dict_row["date"] = time.strftime(
metomi.rosie.ws_client_cli.DATE_TIME_FORMAT,
time.gmtime(dict_row.get("date"))
)
return suite_data
def calculate_edges(graph, suite_data, filter_id=None, properties=None,
max_distance=None):
"""Get all connected suites for a prefix, optionally filtered."""
if properties is None:
properties = []
node_rosie_properties = {}
edges = []
forward_edges = {}
back_edges = {}
for dict_row in sorted(suite_data, key=lambda _: _["revision"]):
idx = dict_row["idx"]
node_rosie_properties[idx] = []
for prop in properties:
node_rosie_properties[idx].append(dict_row.get(prop))
from_idx = dict_row.get("from_idx")
if from_idx is None:
continue
edges.append((from_idx, idx))
forward_edges.setdefault(from_idx, [])
forward_edges[from_idx].append(idx)
back_edges.setdefault(idx, [])
back_edges[idx].append(from_idx)
if filter_id is None:
# Plot all the edges we've found.
for edge in sorted(edges):
node0, node1 = edge
add_node(graph, node0, node_rosie_properties.get(node0))
add_node(graph, node1, node_rosie_properties.get(node1))
graph.add_edge(edge[0], edge[1])
else:
reporter = metomi.rose.reporter.Reporter()
# Only plot the connections involving filter_id.
node_stack = []
node_stack = [(filter_id, 0)]
add_node(graph, filter_id, node_rosie_properties.get(filter_id),
fillcolor="lightgrey", style="filled")
ok_nodes = set([])
while node_stack:
node, distance = node_stack.pop()
if max_distance is not None and distance > max_distance:
continue
ok_nodes.add(node)
for neighbour_node in (forward_edges.get(node, []) +
back_edges.get(node, [])):
if neighbour_node not in ok_nodes:
node_stack.append((neighbour_node, distance + 1))
if len(ok_nodes) == 1:
# There are no related suites.
reporter(NoConnectionsEvent(filter_id))
for edge in sorted(edges):
node0, node1 = edge
if node0 in ok_nodes and node1 in ok_nodes:
add_node(graph, node0, node_rosie_properties.get(node0))
add_node(graph, node1, node_rosie_properties.get(node1))
graph.add_edge(node0, node1)
def add_node(graph, node, node_label_properties, **kwargs):
"""Add a node with a particular label."""
label_lines = [node]
if node_label_properties is not None:
for property_value in node_label_properties:
label_lines.extend(textwrap.wrap(str(property_value)))
label_text = "\\n".join(label_lines) # \n must be escaped for graphviz.
kwargs.update({"label": label_text})
graph.add_node(node, **kwargs)
def make_graph(suite_data, filter_id, properties, prefix, max_distance=None):
"""Construct the pygraphviz graph."""
graph = pygraphviz.AGraph(directed=True)
graph.graph_attr["rankdir"] = "LR"
if filter_id:
graph.graph_attr["name"] = filter_id + " copy tree"
else:
graph.graph_attr["name"] = prefix + " copy tree"
calculate_edges(graph, suite_data, filter_id, properties,
max_distance=max_distance)
return graph
def output_graph(graph, filename=None, debug_mode=False):
"""Draw the graph to filename (or temporary file if None)."""
metomi.rose.metadata_graph.output_graph(graph, debug_mode=debug_mode,
filename=filename)
def print_graph(suite_data, filter_id, properties=None, max_distance=None):
"""Dump out list of graph entries relating to a suite"""
if properties is None:
properties = []
reporter = metomi.rose.reporter.Reporter()
ancestry = {}
# Process suite_data to get ancestry tree
for dict_row in sorted(suite_data, key=lambda _: _["revision"]):
idx = dict_row["idx"]
from_idx = dict_row.get("from_idx")
if idx not in ancestry:
ancestry[idx] = {'parent': None, 'children': []}
if from_idx:
ancestry[idx]['parent'] = from_idx
for prop in properties:
ancestry[idx][prop] = dict_row.get(prop)
if from_idx in ancestry:
ancestry[from_idx]['children'].append(idx)
else:
ancestry[from_idx] = {'parent': None, 'children': [idx]}
# Print out info
parent_id = ancestry[filter_id]['parent']
if parent_id:
reporter(PrintSuiteDetails(
parent_id, [ancestry[parent_id][p] for p in properties]),
prefix="[parent]")
else:
reporter(PrintSuiteDetails(None), prefix="[parent]")
children = ancestry[filter_id]['children']
generation = 1
# Print out each generation of child suites
while children:
next_children = []
for child in children:
reporter(PrintSuiteDetails(child,
[ancestry[child][p] for p in properties]),
prefix="[child%s]" % generation)
# If a child has children add to list of next generation children
if ancestry[child]['children']:
next_children += ancestry[child]['children']
if max_distance and generation >= max_distance:
break
generation += 1
children = next_children
def main():
"""Provide the CLI interface."""
opt_parser = metomi.rose.opt_parse.RoseOptionParser()
opt_parser.add_my_options("distance",
"output_file",
"prefix",
"property",
"text")
opts, args = opt_parser.parse_args()
filter_id = None
if args:
filter_id = args[0]
prefix = metomi.rosie.suite_id.SuiteId(id_text=filter_id).prefix
if opts.prefix:
opt_parser.error("No need to specify --prefix when specifying ID")
elif opts.prefix:
prefix = opts.prefix
else:
prefix = metomi.rosie.suite_id.SuiteId.get_prefix_default()
if opts.distance and not args:
opt_parser.error("distance option requires an ID")
if opts.text and not args:
opt_parser.error("print option requires an ID")
suite_data = get_suite_data(prefix, opts.property)
if opts.text:
print_graph(suite_data, filter_id, opts.property,
max_distance=opts.distance)
else:
graph = make_graph(suite_data, filter_id, opts.property, prefix,
max_distance=opts.distance)
output_graph(graph, filename=opts.output_file,
debug_mode=opts.debug_mode)
if __name__ == "__main__":
main()
| gpl-3.0 | -901,207,645,010,932,700 | 33.469751 | 79 | 0.596118 | false | 3.825434 | false | false | false |
herrnst/script.xbmc.lcdproc | resources/lib/lcdproc.py | 1 | 21691 | '''
XBMC LCDproc addon
Copyright (C) 2012-2018 Team Kodi
Copyright (C) 2012-2018 Daniel 'herrnst' Scheller
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import telnetlib
import time
import xbmc
from socket import *
from .settings import *
from .lcdbase import *
from .lcdproc_extra_imon import *
from .lcdproc_extra_mdm166a import *
from .infolabels import *
MAX_ROWS = 20
MAX_BIGDIGITS = 20
INIT_RETRY_INTERVAL = 2
INIT_RETRY_INTERVAL_MAX = 60
class LCDProc(LcdBase):
def __init__(self, settings):
self.m_bStop = True
self.m_lastInitAttempt = 0
self.m_initRetryInterval = INIT_RETRY_INTERVAL
self.m_used = True
self.tn = telnetlib.Telnet()
self.tnsocket = None
self.m_timeLastSockAction = time.time()
self.m_timeSocketIdleTimeout = 2
self.m_strLineText = [None]*MAX_ROWS
self.m_strLineType = [None]*MAX_ROWS
self.m_bstrLineIcon = [None]*MAX_ROWS
self.m_strDigits = [None]*MAX_BIGDIGITS
self.m_iProgressBarWidth = 0
self.m_iProgressBarLine = -1
self.m_bstrIconName = b"BLOCK_FILLED"
self.m_iBigDigits = int(8) # 12:45:78 / colons count as digit
self.m_iOffset = 1
self.m_bstrSetLineCmds = b""
self.m_cExtraIcons = None
LcdBase.__init__(self, settings)
def SendCommand(self, strCmd, bCheckRet):
countcmds = strCmd.count(b'\n')
sendcmd = strCmd
ret = True
# Single command without lf
if countcmds < 1:
countcmds = 1
sendcmd += b"\n"
try:
# Send to server via raw socket to prevent telnetlib tampering with
# certain chars (especially 0xFF -> telnet IAC)
self.tnsocket.sendall(sendcmd)
except:
# Something bad happened, abort
log(LOGERROR, "SendCommand: Telnet exception - send")
return False
# Update last socketaction timestamp
self.m_timeLastSockAction = time.time()
# Repeat for number of found commands
for i in range(1, (countcmds + 1)):
# Read in (multiple) responses
while True:
try:
# Read server reply
reply = self.tn.read_until(b"\n",3)
except:
# (Re)read failed, abort
log(LOGERROR, "SendCommand: Telnet exception - reread")
return False
# Skip these messages
if reply[:6] == b'listen':
continue
elif reply[:6] == b'ignore':
continue
elif reply[:3] == b'key':
continue
elif reply[:9] == b'menuevent':
continue
# Response seems interesting, so stop here
break
if not bCheckRet:
continue # no return checking desired, so be fine
if strCmd == b'noop' and reply == b'noop complete\n':
continue # noop has special reply
if reply == b'success\n':
continue
ret = False
# Leave information something undesired happened
if ret is False:
log(LOGWARNING, "Reply to '%s' was '%s'" % (strCmd.decode(self.m_strLCDEncoding), reply.decode(self.m_strLCDEncoding)))
return ret
def SetupScreen(self):
# Add screen first
if not self.SendCommand(b"screen_add xbmc", True):
return False
# Set screen priority
if not self.SendCommand(b"screen_set xbmc -priority info", True):
return False
# Turn off heartbeat if desired
if not self.m_Settings.getHeartBeat():
if not self.SendCommand(b"screen_set xbmc -heartbeat off", True):
return False
# Initialize command list var
strInitCommandList = b""
# Setup widgets (scrollers and hbars first)
for i in range(1,int(self.m_iRows)+1):
# Text widgets
strInitCommandList += b"widget_add xbmc lineScroller%i scroller\n" % (i)
# Progress bars
strInitCommandList += b"widget_add xbmc lineProgress%i hbar\n" % (i)
# Reset bars to zero
strInitCommandList += b"widget_set xbmc lineProgress%i 0 0 0\n" % (i)
self.m_strLineText[i-1] = ""
self.m_strLineType[i-1] = ""
# Setup icons last
for i in range(1,int(self.m_iRows)+1):
# Icons
strInitCommandList += b"widget_add xbmc lineIcon%i icon\n" % (i)
# Default icon
strInitCommandList += b"widget_set xbmc lineIcon%i 0 0 BLOCK_FILLED\n" % (i)
self.m_bstrLineIcon[i-1] = b""
for i in range(1,int(self.m_iBigDigits + 1)):
# Big Digit
strInitCommandList += b"widget_add xbmc lineBigDigit%i num\n" % (i)
# Set Digit
strInitCommandList += b"widget_set xbmc lineBigDigit%i 0 0\n" % (i)
self.m_strDigits[i] = b""
if not self.SendCommand(strInitCommandList, True):
return False
return True
def Initialize(self):
connected = False
if not self.m_used:
return False#nothing to do
#don't try to initialize too often
now = time.time()
if (now - self.m_lastInitAttempt) < self.m_initRetryInterval:
return False
self.m_lastInitAttempt = now
if self.Connect():
if LcdBase.Initialize(self):
# reset the retry interval after a successful connect
self.m_initRetryInterval = INIT_RETRY_INTERVAL
self.m_bStop = False
connected = True
else:
log(LOGERROR, "Connection successful but LCD.xml has errors, aborting connect")
if not connected:
# preventively close socket
self.CloseSocket()
# give up after INIT_RETRY_INTERVAL_MAX (60) seconds
if self.m_initRetryInterval > INIT_RETRY_INTERVAL_MAX:
self.m_used = False
log(LOGERROR,"Connect failed. Giving up. Please fix any connection problems and restart the addon.")
else:
self.m_initRetryInterval = self.m_initRetryInterval * 2
log(LOGERROR,"Connect failed. Retry in %d seconds." % self.m_initRetryInterval)
return connected
def DetermineExtraSupport(self):
rematch_imon = "SoundGraph iMON(.*)LCD"
rematch_mdm166a = "Targa(.*)mdm166a"
rematch_imonvfd = "Soundgraph(.*)VFD"
bUseExtraIcons = self.m_Settings.getUseExtraElements()
# Never cause script failure/interruption by this! This is totally optional!
try:
# Retrieve driver name for additional functionality
self.tn.write(b"info\n")
reply = self.tn.read_until(b"\n",3).strip().decode("ascii")
# When the LCDd driver doesn't supply a valid string, inform and return
if reply == "":
log(LOGINFO, "Empty driver information reply")
return
log(LOGINFO, "Driver information reply: " + reply)
if re.match(rematch_imon, reply):
log(LOGINFO, "SoundGraph iMON LCD detected")
if bUseExtraIcons:
self.m_cExtraIcons = LCDproc_extra_imon()
# override bigdigits counter, the imonlcd driver handles bigdigits
# different: digits count for two columns instead of three
self.m_iBigDigits = 7
elif re.match(rematch_mdm166a, reply):
log(LOGINFO, "Futaba/Targa USB mdm166a VFD detected")
if bUseExtraIcons:
self.m_cExtraIcons = LCDproc_extra_mdm166a()
elif re.match(rematch_imonvfd, reply):
log(LOGINFO, "SoundGraph iMON IR/VFD detected")
if self.m_cExtraIcons is not None:
self.m_cExtraIcons.Initialize()
except:
pass
def Connect(self):
self.CloseSocket()
try:
ip = self.m_Settings.getHostIp()
port = self.m_Settings.getHostPort()
log(LOGDEBUG,"Open " + str(ip) + ":" + str(port))
self.tn.open(ip, port)
# Start a new session
self.tn.write(b"hello\n")
# Receive LCDproc data to determine row and column information
reply = self.tn.read_until(b"\n",3).decode("ascii")
log(LOGDEBUG,"Reply: " + reply)
# parse reply by regex
lcdinfo = re.match("^connect .+ protocol ([0-9\.]+) lcd wid (\d+) hgt (\d+) cellwid (\d+) cellhgt (\d+)$", reply)
# if regex didn't match, LCDproc is incompatible or something's odd
if lcdinfo is None:
return False
# protocol version must currently either be 0.3 or 0.4
if float(lcdinfo.group(1)) not in [0.3, 0.4]:
log(LOGERROR, "Only LCDproc protocols 0.3 and 0.4 supported (got " + lcdinfo.group(1) +")")
return False
# set up class vars
self.m_iColumns = int(lcdinfo.group(2))
self.m_iRows = int(lcdinfo.group(3))
self.m_iCellWidth = int(lcdinfo.group(4))
self.m_iCellHeight = int(lcdinfo.group(5))
# tell users what's going on
log(LOGINFO, "Connected to LCDd at %s:%s, Protocol version %s - Geometry %sx%s characters (%sx%s pixels, %sx%s pixels per character)" % (str(ip), str(port), float(lcdinfo.group(1)), str(self.m_iColumns), str(self.m_iRows), str(self.m_iColumns * self.m_iCellWidth), str(self.m_iRows * self.m_iCellHeight), str(self.m_iCellWidth), str(self.m_iCellHeight)))
# Set up BigNum values based on display geometry
if self.m_iColumns < 13:
self.m_iBigDigits = 0 # No clock
elif self.m_iColumns < 17:
self.m_iBigDigits = 5 # HH:MM
elif self.m_iColumns < 20:
self.m_iBigDigits = 7 # H:MM:SS on play, HH:MM on clock
else:
self.m_iBigDigits = 8 # HH:MM:SS
# Check LCDproc if we can enable any extras or override values
# (might override e.g. m_iBigDigits!)
self.DetermineExtraSupport()
except:
log(LOGERROR,"Connect: Caught exception, aborting.")
return False
# retrieve raw socket object
self.tnsocket = self.tn.get_socket()
if self.tnsocket is None:
log(LOGERROR, "Retrieval of socket object failed!")
return False
if not self.SetupScreen():
log(LOGERROR, "Screen setup failed!")
return False
return True
def CloseSocket(self):
if self.tnsocket:
# no pyexceptions, please, we're disconnecting anyway
try:
# if we served extra elements, (try to) reset them
if self.m_cExtraIcons is not None:
if not self.SendCommand(self.m_cExtraIcons.GetClearAllCmd(), True):
log(LOGERROR, "CloseSocket(): Cannot clear extra icons")
# do gracefully disconnect (send directly as we won't get any response on this)
self.tn.write(b"bye\n")
# and close socket afterwards
self.tn.close()
except:
# exception caught on this, so what? :)
pass
# delete/cleanup extra support instance
del self.m_cExtraIcons
self.m_cExtraIcons = None
self.tnsocket = None
del self.tn
self.tn = telnetlib.Telnet()
def IsConnected(self):
if not self.tnsocket:
return False
# Ping only every SocketIdleTimeout seconds
if (self.m_timeLastSockAction + self.m_timeSocketIdleTimeout) > time.time():
return True
if not self.SendCommand(b"noop", True):
log(LOGERROR, "noop failed in IsConnected(), aborting!")
return False
return True
def SetBackLight(self, iLight):
if not self.tnsocket:
return
log(LOGDEBUG, "Switch Backlight to: " + str(iLight))
# Build command
if iLight == 0:
cmd = b"screen_set xbmc -backlight off\n"
elif iLight > 0:
cmd = b"screen_set xbmc -backlight on\n"
# Send to server
if not self.SendCommand(cmd, True):
log(LOGERROR, "SetBackLight(): Cannot change backlight state")
self.CloseSocket()
def SetContrast(self, iContrast):
#TODO: Not sure if you can control contrast from client
return
def Stop(self):
self.CloseSocket()
self.m_bStop = True
def Suspend(self):
if self.m_bStop or not self.tnsocket:
return
# Build command to suspend screen
cmd = b"screen_set xbmc -priority hidden\n"
# Send to server
if not self.SendCommand(cmd, True):
log(LOGERROR, "Suspend(): Cannot suspend")
self.CloseSocket()
def Resume(self):
if self.m_bStop or not self.tnsocket:
return
# Build command to resume screen
cmd = b"screen_set xbmc -priority info\n"
# Send to server
if not self.SendCommand(cmd, True):
log(LOGERROR, "Resume(): Cannot resume")
self.CloseSocket()
def GetColumns(self):
return int(self.m_iColumns)
def GetBigDigitTime(self, mode):
ret = ""
if self.m_InfoLabels.IsPlayerPlaying():
if not (mode == LCD_MODE.LCD_MODE_SCREENSAVER and self.m_InfoLabels.IsPlayerPaused()):
ret = self.m_InfoLabels.GetPlayerTime()[-self.m_iBigDigits:]
if ret == "": # no usable timestring, e.g. not playing anything
strSysTime = self.m_InfoLabels.GetSystemTime()
if self.m_iBigDigits >= 8: # return h:m:s
ret = strSysTime
elif self.m_iBigDigits >= 5: # return h:m when display too small
ret = strSysTime[:5]
return ret
def SetBigDigits(self, strTimeString, bForceUpdate):
iOffset = 1
iDigitCount = 1
iStringOffset = 0
strRealTimeString = ""
if strTimeString == "" or strTimeString == None:
return
iStringLength = int(len(strTimeString))
if self.m_bCenterBigDigits:
iColons = strTimeString.count(":")
iWidth = 3 * (iStringLength - iColons) + iColons
iOffset = 1 + max(self.m_iColumns - iWidth, 0) / 2
if iStringLength > self.m_iBigDigits:
iStringOffset = len(strTimeString) - self.m_iBigDigits
iOffset = 1;
if self.m_iOffset != iOffset:
# on offset change force redraw
bForceUpdate = True
self.m_iOffset = iOffset
for i in range(int(iStringOffset), int(iStringLength)):
if self.m_strDigits[iDigitCount] != strTimeString[i] or bForceUpdate:
self.m_strDigits[iDigitCount] = strTimeString[i]
if strTimeString[i] == ":":
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i %i 10\n" % (iDigitCount, iOffset)
elif strTimeString[i].isdigit():
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i %i %s\n" % (iDigitCount, iOffset, strTimeString[i].encode(self.m_strLCDEncoding))
else:
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i 0 0\n" % (iDigitCount)
if strTimeString[i] == ":":
iOffset += 1
else:
iOffset += 3
iDigitCount += 1
while iDigitCount <= self.m_iBigDigits:
if self.m_strDigits[iDigitCount] != "" or bForceUpdate:
self.m_strDigits[iDigitCount] = ""
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i 0 0\n" % (iDigitCount)
iDigitCount += 1
def SetProgressBar(self, percent, pxWidth):
self.m_iProgressBarWidth = int(float(percent) * pxWidth)
return self.m_iProgressBarWidth
def SetPlayingStateIcon(self):
bPlaying = self.m_InfoLabels.IsPlayerPlaying()
bPaused = self.m_InfoLabels.IsPlayerPaused()
bForwarding = self.m_InfoLabels.IsPlayerForwarding()
bRewinding = self.m_InfoLabels.IsPlayerRewinding()
self.m_bstrIconName = b"STOP"
if bForwarding:
self.m_bstrIconName = b"FF"
elif bRewinding:
self.m_bstrIconName = b"FR"
elif bPaused:
self.m_bstrIconName = b"PAUSE"
elif bPlaying:
self.m_bstrIconName = b"PLAY"
def GetRows(self):
return int(self.m_iRows)
def ClearBigDigits(self, fullredraw = True):
for i in range(1,int(self.m_iBigDigits + 1)):
# Clear Digit
if fullredraw:
self.m_bstrSetLineCmds += b"widget_set xbmc lineBigDigit%i 0 0\n" % (i)
self.m_strDigits[i] = ""
# on full redraw, make sure all widget get redrawn by resetting their type
if fullredraw:
for i in range(0, int(self.GetRows())):
self.m_strLineType[i] = ""
self.m_strLineText[i] = ""
self.m_bstrLineIcon[i] = b""
def ClearLine(self, iLine):
self.m_bstrSetLineCmds += b"widget_set xbmc lineIcon%i 0 0 BLOCK_FILLED\n" % (iLine)
self.m_bstrSetLineCmds += b"widget_set xbmc lineProgress%i 0 0 0\n" % (iLine)
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i 1 %i %i %i m 1 \"\"\n" % (iLine, iLine, self.m_iColumns, iLine)
def SetLine(self, mode, iLine, strLine, dictDescriptor, bForce):
if self.m_bStop or not self.tnsocket:
return
if iLine < 0 or iLine >= int(self.m_iRows):
return
plTime = self.m_InfoLabels.GetPlayerTime()
plDuration = self.m_InfoLabels.GetPlayerDuration()
ln = iLine + 1
bExtraForce = False
drawLineText = False
if self.m_strLineType[iLine] != dictDescriptor['type']:
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
self.ClearDisplay()
else:
if self.m_strLineType[iLine] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
self.ClearBigDigits()
else:
self.ClearLine(int(iLine + 1))
self.m_strLineType[iLine] = dictDescriptor['type']
bExtraForce = True
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESS and dictDescriptor['text'] != "":
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i 1 %i %i %i m 1 \"%s\"\n" % (ln, ln, self.m_iColumns, ln, dictDescriptor['text'].encode(self.m_strLCDEncoding))
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESSTIME and dictDescriptor['text'] != "":
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i 1 %i %i %i m 1 \"%s\"\n" % (ln, ln, self.m_iColumns, ln, dictDescriptor['text'].encode(self.m_strLCDEncoding))
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
strLineLong = self.GetBigDigitTime(mode)
elif dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESSTIME:
strLineLong = plTime + self.m_bProgressbarBlank * (self.m_iColumns - len(plTime) - len(plDuration)) + plDuration
else:
strLineLong = strLine
strLineLong.strip()
iMaxLineLen = dictDescriptor['endx'] - (int(dictDescriptor['startx']) - 1)
iScrollSpeed = self.m_Settings.getScrollDelay()
bstrScrollMode = self.m_Settings.getLCDprocScrollMode().encode(self.m_strLCDEncoding)
if len(strLineLong) > iMaxLineLen: # if the string doesn't fit the display...
if iScrollSpeed != 0: # add separator when scrolling enabled
if bstrScrollMode == b"m": # and scrollmode is marquee
strLineLong += self.m_strScrollSeparator
else: # or cut off
strLineLong = strLineLong[:iMaxLineLen]
iScrollSpeed = 1
iStartX = dictDescriptor['startx']
# check if update is required
if strLineLong != self.m_strLineText[iLine] or bForce:
# bigscreen
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_BIGSCREEN:
self.SetBigDigits(strLineLong, bExtraForce)
# progressbar line
elif dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESS:
self.m_bstrSetLineCmds += b"widget_set xbmc lineProgress%i %i %i %i\n" % (ln, iStartX, ln, self.m_iProgressBarWidth)
# progressbar line with time
elif dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_PROGRESSTIME:
drawLineText = True
pLenFract = float(self.m_iColumns - int(len(plDuration) + len(plTime))) / self.m_iColumns
pTimeLen = int(self.m_iProgressBarWidth * pLenFract)
self.m_bstrSetLineCmds += b"widget_set xbmc lineProgress%i %i %i %i\n" % (ln, iStartX + len(plTime), ln, pTimeLen)
# everything else (text, icontext)
else:
drawLineText = True
if len(strLineLong) < iMaxLineLen and dictDescriptor['align'] != LCD_LINEALIGN.LCD_LINEALIGN_LEFT:
iSpaces = iMaxLineLen - len(strLineLong)
if dictDescriptor['align'] == LCD_LINEALIGN.LCD_LINEALIGN_RIGHT:
iStartX += iSpaces
elif dictDescriptor['align'] == LCD_LINEALIGN.LCD_LINEALIGN_CENTER:
iStartX += int(iSpaces / 2)
if drawLineText:
self.m_bstrSetLineCmds += b"widget_set xbmc lineScroller%i %i %i %i %i %s %i \"%s\"\n" % (ln, iStartX, ln, self.m_iColumns, ln, bstrScrollMode, iScrollSpeed, re.escape(strLineLong.encode(self.m_strLCDEncoding, errors="replace")))
# cache contents
self.m_strLineText[iLine] = strLineLong
if dictDescriptor['type'] == LCD_LINETYPE.LCD_LINETYPE_ICONTEXT:
if self.m_bstrLineIcon[iLine] != self.m_bstrIconName or bExtraForce:
self.m_bstrLineIcon[iLine] = self.m_bstrIconName
self.m_bstrSetLineCmds += b"widget_set xbmc lineIcon%i 1 %i %s\n" % (ln, ln, self.m_bstrIconName)
def ClearDisplay(self):
log(LOGDEBUG, "Clearing display contents")
# clear line buffer first
self.FlushLines()
# set all widgets to empty stuff and/or offscreen
for i in range(1,int(self.m_iRows)+1):
self.ClearLine(i)
# add commands to clear big digits
self.ClearBigDigits()
# send to display
self.FlushLines()
def FlushLines(self):
if len(self.m_bstrSetLineCmds) > 0:
# Send complete command package
self.SendCommand(self.m_bstrSetLineCmds, False)
self.m_bstrSetLineCmds = b""
| gpl-2.0 | 4,010,940,431,878,455,000 | 32.681677 | 360 | 0.647411 | false | 3.329905 | false | false | false |
deepmind/deep-verify | deep_verify/src/auto_verifier.py | 1 | 9808 | # coding=utf-8
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatic construction of verifiable layers from a Sonnet module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deep_verify.src.layers import layers
import interval_bound_propagation as ibp
import sonnet as snt
class NotVerifiableError(Exception):
"""Module's graph contains features that do not map to verification layers."""
class VerifiableLayerBuilder(object):
"""Constructs verifiable layers from a Sonnet module."""
def __init__(self, network):
"""Constructor.
Args:
network: `NetworkBuilder` containing network with propagated bounds.
"""
super(VerifiableLayerBuilder, self).__init__()
self._network = network
def build_layers(self):
"""Builds the verifiable layers.
Returns:
List of `SingleVerifiableLayer` for the module.
Raises:
NotVerifiableError: on invalid layer arrangement.
"""
backstop_node, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(self._network.output_module))
if (not isinstance(backstop_node, ibp.ModelInputWrapper) or
self._network.fanout_of(backstop_node) != known_fanout):
raise NotVerifiableError('Invalid connectivity')
if reshape:
raise NotVerifiableError('Cannot end with a reshape operation')
return self._fuse(verifiable_layers)
def _build_layers_rec(self, node, known_fanout=1, batchnorm_node=None):
"""Builds verifiable layers leading up to the given layer output.
The list is constructed by navigating the layers in reverse order,
stopping either when the module's original inputs are reached,
or (for within a ResNet block) when a layer is encountered that has
outputs not processed by this navigation.
Args:
node: Layer output, up to which to build verifiable layers.
known_fanout: Number of immediate outputs of `layer_tensor` that have
already been processed by the caller.
This is typically 1, but sub-classes may invoke with 2 (or possibly
greater) where the network contains branches.
batchnorm_node: The BatchNorm's ConnectedSubgraph object if
`layer_tensor` is the input to a BatchNorm layer, otherwise None.
Returns:
backstop_node: Node, typically the `ibp.ModelInputWrapper`, at which we
stopped backtracking.
known_fanout: Number of immediate outputs of `input_tensor` that were
processed in this call.
This is typically 1, but overrides may return 2 (or possibly greater)
in the presence of branched architectures.
verifiable_layers: List of `SingleVerifiableLayer` whose final element's
output is `outputs`.
reshape: Whether the final element of `verifiable_layers` is followed by
a reshape operation.
Raises:
NotVerifiableError: on invalid layer arrangement.
"""
if (isinstance(node, ibp.ModelInputWrapper) or
self._network.fanout_of(node) != known_fanout):
# Reached the inputs (or start of the enclosing ResNet block).
# No more layers to construct.
if batchnorm_node:
raise NotVerifiableError('Cannot begin with batchnorm')
return node, known_fanout, [], False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'identity'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
return self._build_layers_rec(input_node, batchnorm_node=batchnorm_node)
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'avg_pool'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the AvgPool layer.
if batchnorm_node:
raise NotVerifiableError('AvgPool cannot have batchnorm')
if node.parameters['padding'] == 'SAME':
raise ValueError('"SAME" padding is not supported.')
verifiable_layers.append(layers.AvgPool(
input_node,
node,
kernel_shape=node.parameters['ksize'][1:-1],
strides=node.parameters['strides'][1:-1],
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'reduce_mean'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the AvgPool layer.
if batchnorm_node:
raise NotVerifiableError('AvgPool cannot have batchnorm')
verifiable_layers.append(layers.AvgPool(
input_node,
node,
kernel_shape=None,
strides=None,
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'max_pool'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the MaxPool layer.
if batchnorm_node:
raise NotVerifiableError('MaxPool cannot have batchnorm')
if node.parameters['padding'] == 'SAME':
raise ValueError('"SAME" padding is not supported.')
verifiable_layers.append(layers.MaxPool(
input_node,
node,
kernel_shape=node.parameters['ksize'][1:-1],
strides=node.parameters['strides'][1:-1],
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif (isinstance(node, ibp.IncreasingMonotonicWrapper) and
node.module.__name__ == 'reduce_max'):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
input_tensor, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the MaxPool layer.
if batchnorm_node:
raise NotVerifiableError('MaxPool cannot have batchnorm')
verifiable_layers.append(layers.MaxPool(
input_node,
node,
kernel_shape=None,
strides=None,
reshape=reshape))
return input_tensor, known_fanout, verifiable_layers, False
elif isinstance(node.module, snt.BatchNorm):
# Construct the previous layer with batchnorm.
if batchnorm_node:
raise NotVerifiableError('Cannot have consecutive batchnorms')
input_node, = self._network.dependencies(node)
return self._build_layers_rec(input_node, batchnorm_node=node)
elif isinstance(node.module, snt.BatchReshape):
# Recursively build all preceding layers.
input_node, = self._network.dependencies(node)
backstop_node, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
if batchnorm_node:
raise NotVerifiableError('Reshape cannot have batchnorm')
return backstop_node, known_fanout, verifiable_layers, True
else:
# Recursively build all preceding layers.
input_nodes = self._network.dependencies(node)
if len(input_nodes) != 1:
raise NotVerifiableError('Unary operation expected')
input_node, = input_nodes
backstop_node, known_fanout, verifiable_layers, reshape = (
self._build_layers_rec(input_node))
# Construct the layer.
verifiable_layers.append(layers.create_verifiable_layer(
input_node,
batchnorm_node or node,
node.module,
batch_norm=(batchnorm_node.module if batchnorm_node else None),
reshape=reshape,
parameters=(node.parameters
if isinstance(node, ibp.IncreasingMonotonicWrapper)
else None),
))
return backstop_node, known_fanout, verifiable_layers, False
def _fuse(self, verifiable_layers):
"""Performs fusion of certain layer pairs."""
fused_layers = []
idx = 0
while idx < len(verifiable_layers):
if (idx+2 <= len(verifiable_layers) and
isinstance(verifiable_layers[idx], layers.MaxPool) and
isinstance(verifiable_layers[idx+1], layers.Activation) and
verifiable_layers[idx+1].activation == 'relu'):
# Fuse maxpool with relu.
original = verifiable_layers[idx]
fused_layers.append(layers.MaxPool(original.input_node,
original.output_node,
kernel_shape=original.kernel_shape,
strides=original.strides,
with_relu=True,
reshape=original.reshape))
idx += 2
else:
fused_layers.append(verifiable_layers[idx])
idx += 1
return fused_layers
| apache-2.0 | 5,044,877,332,549,764,000 | 38.869919 | 80 | 0.662622 | false | 4.105483 | false | false | false |
SafeW3rd/Ciphers | simpleSubHacker.py | 1 | 7029 | # Simple Substitution Cipher Hacker
# http://inventwithpython.com/hacking (BSD Licensed)
import os, re, copy, pprint, pyperclip, simpleSubCipher, makeWordPatterns
if not os.path.exists('wordPatterns.py'):
makeWordPatterns.main() # create the wordPatterns.py file
import wordPatterns
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
nonLettersOrSpacePattern = re.compile('[^A-Z\s]')
def main():
message = 'Sy l nlx sr pyyacao l ylwj eiswi upar lulsxrj isr sxrjsxwjr, ia esmm rwctjsxsza sj wmpramh, lxo txmarr jia aqsoaxwa sr pqaceiamnsxu, ia esmm caytra jp famsaqa sj. Sy, px jia pjiac ilxo, ia sr pyyacao rpnajisxu eiswi lyypcor l calrpx ypc lwjsxu sx lwwpcolxwa jp isr sxrjsxwjr, ia esmm lwwabj sj aqax px jia rmsuijarj aqsoaxwa. Jia pcsusx py nhjir sr agbmlsxao sx jisr elh. -Facjclxo Ctrramm'
# Determine the possible valid ciphertext translations.
print('Hacking...')
letterMapping = hackSimpleSub(message)
# Display the results to the user.
print('Mapping:')
pprint.pprint(letterMapping)
print()
print('Original ciphertext:')
print(message)
print()
print('Copying hacked message to clipboard:')
hackedMessage = decryptWithCipherletterMapping(message, letterMapping)
pyperclip.copy(hackedMessage)
print(hackedMessage)
def getBlankCipherletterMapping():
# Returns a dictionary value that is a blank cipherletter mapping.
return {'A': [], 'B': [], 'C': [], 'D': [], 'E': [], 'F': [], 'G': [], 'H': [], 'I': [], 'J': [], 'K': [], 'L': [], 'M': [], 'N': [], 'O': [], 'P': [], 'Q': [], 'R': [], 'S': [], 'T': [], 'U': [], 'V': [], 'W': [], 'X': [], 'Y': [], 'Z': []}
def addLettersToMapping(letterMapping, cipherword, candidate):
# The letterMapping parameter is a "cipherletter mapping" dictionary
# value that the return value of this function starts as a copy of.
# The cipherword parameter is a string value of the ciphertext word.
# The candidate parameter is a possible English word that the
# cipherword could decrypt to.
# This function adds the letters of the candidate as potential
# decryption letters for the cipherletters in the cipherletter
# mapping.
letterMapping = copy.deepcopy(letterMapping)
for i in range(len(cipherword)):
if candidate[i] not in letterMapping[cipherword[i]]:
letterMapping[cipherword[i]].append(candidate[i])
return letterMapping
def intersectMappings(mapA, mapB):
# To intersect two maps, create a blank map, and then add only the
# potential decryption letters if they exist in BOTH maps.
intersectedMapping = getBlankCipherletterMapping()
for letter in LETTERS:
# An empty list means "any letter is possible". In this case just
# copy the other map entirely.
if mapA[letter] == []:
intersectedMapping[letter] = copy.deepcopy(mapB[letter])
elif mapB[letter] == []:
intersectedMapping[letter] = copy.deepcopy(mapA[letter])
else:
# If a letter in mapA[letter] exists in mapB[letter], add
# that letter to intersectedMapping[letter].
for mappedLetter in mapA[letter]:
if mappedLetter in mapB[letter]:
intersectedMapping[letter].append(mappedLetter)
return intersectedMapping
def removeSolvedLettersFromMapping(letterMapping):
# Cipher letters in the mapping that map to only one letter are
# "solved" and can be removed from the other letters.
# For example, if 'A' maps to potential letters ['M', 'N'], and 'B'
# maps to ['N'], then we know that 'B' must map to 'N', so we can
# remove 'N' from the list of what 'A' could map to. So 'A' then maps
# to ['M']. Note that now that 'A' maps to only one letter, we can
# remove 'M' from the list of letters for every other
# letter. (This is why there is a loop that keeps reducing the map.)
letterMapping = copy.deepcopy(letterMapping)
loopAgain = True
while loopAgain:
# First assume that we will not loop again:
loopAgain = False
# solvedLetters will be a list of uppercase letters that have one
# and only one possible mapping in letterMapping
solvedLetters = []
for cipherletter in LETTERS:
if len(letterMapping[cipherletter]) == 1:
solvedLetters.append(letterMapping[cipherletter][0])
# If a letter is solved, than it cannot possibly be a potential
# decryption letter for a different ciphertext letter, so we
# should remove it from those other lists.
for cipherletter in LETTERS:
for s in solvedLetters:
if len(letterMapping[cipherletter]) != 1 and s in letterMapping[cipherletter]:
letterMapping[cipherletter].remove(s)
if len(letterMapping[cipherletter]) == 1:
# A new letter is now solved, so loop again.
loopAgain = True
return letterMapping
def hackSimpleSub(message):
intersectedMap = getBlankCipherletterMapping()
cipherwordList = nonLettersOrSpacePattern.sub('', message.upper()).split()
for cipherword in cipherwordList:
# Get a new cipherletter mapping for each ciphertext word.
newMap = getBlankCipherletterMapping()
wordPattern = makeWordPatterns.getWordPattern(cipherword)
if wordPattern not in wordPatterns.allPatterns:
continue # This word was not in our dictionary, so continue.
# Add the letters of each candidate to the mapping.
for candidate in wordPatterns.allPatterns[wordPattern]:
newMap = addLettersToMapping(newMap, cipherword, candidate)
# Intersect the new mapping with the existing intersected mapping.
intersectedMap = intersectMappings(intersectedMap, newMap)
# Remove any solved letters from the other lists.
return removeSolvedLettersFromMapping(intersectedMap)
def decryptWithCipherletterMapping(ciphertext, letterMapping):
# Return a string of the ciphertext decrypted with the letter mapping,
# with any ambiguous decrypted letters replaced with an _ underscore.
# First create a simple sub key from the letterMapping mapping.
key = ['x'] * len(LETTERS)
for cipherletter in LETTERS:
if len(letterMapping[cipherletter]) == 1:
# If there's only one letter, add it to the key.
keyIndex = LETTERS.find(letterMapping[cipherletter][0])
key[keyIndex] = cipherletter
else:
ciphertext = ciphertext.replace(cipherletter.lower(), '_')
ciphertext = ciphertext.replace(cipherletter.upper(), '_')
key = ''.join(key)
# With the key we've created, decrypt the ciphertext.
return simpleSubCipher.decryptMessage(key, ciphertext)
if __name__ == '__main__':
main() | mit | -4,164,585,501,999,395,000 | 43.070513 | 405 | 0.654289 | false | 3.998294 | false | false | false |
fle-internal/content-curation | contentcuration/contentcuration/management/commands/exportchannel.py | 1 | 27865 | import collections
import itertools
import json
import logging as logmodule
import os
import re
import sys
import tempfile
import uuid
import zipfile
from itertools import chain
from django.conf import settings
from django.core.files import File
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import transaction
from django.db.models import Count
from django.db.models import Q
from django.db.models import Sum
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from kolibri.content.utils.search import fuzz
from kolibri_content import models as kolibrimodels
from kolibri_content.router import get_active_content_database
from kolibri_content.router import using_content_database
from le_utils.constants import content_kinds
from le_utils.constants import exercises
from le_utils.constants import file_formats
from le_utils.constants import format_presets
from le_utils.constants import roles
from contentcuration import models as ccmodels
from contentcuration.statistics import record_publish_stats
from contentcuration.utils.files import create_thumbnail_from_base64
from contentcuration.utils.files import get_thumbnail_encoding
from contentcuration.utils.parser import extract_value
from contentcuration.utils.parser import load_json_string
logmodule.basicConfig()
logging = logmodule.getLogger(__name__)
reload(sys)
sys.setdefaultencoding('utf8')
PERSEUS_IMG_DIR = exercises.IMG_PLACEHOLDER + "/images"
THUMBNAIL_DIMENSION = 128
MIN_SCHEMA_VERSION = "1"
class EarlyExit(BaseException):
def __init__(self, message, db_path):
self.message = message
self.db_path = db_path
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('channel_id', type=str)
parser.add_argument('--force', action='store_true', dest='force', default=False)
parser.add_argument('--user_id', dest='user_id', default=None)
parser.add_argument('--force-exercises', action='store_true', dest='force-exercises', default=False)
# optional argument to send an email to the user when done with exporting channel
parser.add_argument('--email', action='store_true', default=False)
def handle(self, *args, **options):
# license_id = options['license_id']
channel_id = options['channel_id']
force = options['force']
send_email = options['email']
user_id = options['user_id']
force_exercises = options['force-exercises']
channel = ccmodels.Channel.objects.get(pk=channel_id)
# license = ccmodels.License.objects.get(pk=license_id)
try:
create_content_database(channel_id, force, user_id, force_exercises)
increment_channel_version(channel)
mark_all_nodes_as_changed(channel)
add_tokens_to_channel(channel)
fill_published_fields(channel)
# Attributes not getting set for some reason, so just save it here
channel.main_tree.publishing = False
channel.main_tree.changed = False
channel.main_tree.published = True
channel.main_tree.save()
if send_email:
send_emails(channel, user_id)
# use SQLite backup API to put DB into archives folder.
# Then we can use the empty db name to have SQLite use a temporary DB (https://www.sqlite.org/inmemorydb.html)
record_publish_stats(channel)
except EarlyExit as e:
logging.warning("Exited early due to {message}.".format(message=e.message))
self.stdout.write("You can find your database in {path}".format(path=e.db_path))
# No matter what, make sure publishing is set to False once the run is done
finally:
channel.main_tree.publishing = False
channel.main_tree.save()
def send_emails(channel, user_id):
subject = render_to_string('registration/custom_email_subject.txt', {'subject': _('Kolibri Studio Channel Published')})
if user_id:
user = ccmodels.User.objects.get(pk=user_id)
message = render_to_string('registration/channel_published_email.txt', {'channel': channel, 'user': user})
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, )
else:
# Email all users about updates to channel
for user in itertools.chain(channel.editors.all(), channel.viewers.all()):
message = render_to_string('registration/channel_published_email.txt', {'channel': channel, 'user': user})
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL, )
def create_content_database(channel_id, force, user_id, force_exercises):
channel = ccmodels.Channel.objects.get(pk=channel_id)
# increment the channel version
if not force:
raise_if_nodes_are_all_unchanged(channel)
fh, tempdb = tempfile.mkstemp(suffix=".sqlite3")
with using_content_database(tempdb):
channel.main_tree.publishing = True
channel.main_tree.save()
prepare_export_database(tempdb)
map_channel_to_kolibri_channel(channel)
map_content_nodes(channel.main_tree, channel.language, channel.id, channel.name, user_id=user_id, force_exercises=force_exercises)
map_prerequisites(channel.main_tree)
save_export_database(channel_id)
def create_kolibri_license_object(ccnode):
use_license_description = not ccnode.license.is_custom
return kolibrimodels.License.objects.get_or_create(
license_name=ccnode.license.license_name,
license_description=ccnode.license.license_description if use_license_description else ccnode.license_description
)
def increment_channel_version(channel):
channel.version += 1
channel.last_published = timezone.now()
channel.save()
def assign_license_to_contentcuration_nodes(channel, license):
channel.main_tree.get_family().update(license_id=license.pk)
def map_content_nodes(root_node, default_language, channel_id, channel_name, user_id=None, force_exercises=False):
# make sure we process nodes higher up in the tree first, or else when we
# make mappings the parent nodes might not be there
node_queue = collections.deque()
node_queue.append(root_node)
def queue_get_return_none_when_empty():
try:
return node_queue.popleft()
except IndexError:
return None
# kolibri_license = kolibrimodels.License.objects.get(license_name=license.license_name)
with transaction.atomic():
with ccmodels.ContentNode.objects.delay_mptt_updates():
for node in iter(queue_get_return_none_when_empty, None):
logging.debug("Mapping node with id {id}".format(
id=node.pk))
if node.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists():
children = (node.children.all())
node_queue.extend(children)
kolibrinode = create_bare_contentnode(node, default_language, channel_id, channel_name)
if node.kind.kind == content_kinds.EXERCISE:
exercise_data = process_assessment_metadata(node, kolibrinode)
if force_exercises or node.changed or not node.files.filter(preset_id=format_presets.EXERCISE).exists():
create_perseus_exercise(node, kolibrinode, exercise_data, user_id=user_id)
create_associated_file_objects(kolibrinode, node)
map_tags_to_node(kolibrinode, node)
def create_bare_contentnode(ccnode, default_language, channel_id, channel_name):
logging.debug("Creating a Kolibri contentnode for instance id {}".format(
ccnode.node_id))
kolibri_license = None
if ccnode.license is not None:
kolibri_license = create_kolibri_license_object(ccnode)[0]
language = None
if ccnode.language or default_language:
language, _new = get_or_create_language(ccnode.language or default_language)
kolibrinode, is_new = kolibrimodels.ContentNode.objects.update_or_create(
pk=ccnode.node_id,
defaults={
'kind': ccnode.kind.kind,
'title': ccnode.title if ccnode.parent else channel_name,
'content_id': ccnode.content_id,
'channel_id': channel_id,
'author': ccnode.author or "",
'description': ccnode.description,
'sort_order': ccnode.sort_order,
'license_owner': ccnode.copyright_holder or "",
'license': kolibri_license,
'available': ccnode.get_descendants(include_self=True).exclude(kind_id=content_kinds.TOPIC).exists(), # Hide empty topics
'stemmed_metaphone': ' '.join(fuzz(ccnode.title + ' ' + ccnode.description)),
'lang': language,
'license_name': kolibri_license.license_name if kolibri_license is not None else None,
'license_description': kolibri_license.license_description if kolibri_license is not None else None,
'coach_content': ccnode.role_visibility == roles.COACH,
}
)
if ccnode.parent:
logging.debug("Associating {child} with parent {parent}".format(
child=kolibrinode.pk,
parent=ccnode.parent.node_id
))
kolibrinode.parent = kolibrimodels.ContentNode.objects.get(pk=ccnode.parent.node_id)
kolibrinode.save()
logging.debug("Created Kolibri ContentNode with node id {}".format(ccnode.node_id))
logging.debug("Kolibri node count: {}".format(kolibrimodels.ContentNode.objects.all().count()))
return kolibrinode
def get_or_create_language(language):
return kolibrimodels.Language.objects.get_or_create(
id=language.pk,
lang_code=language.lang_code,
lang_subcode=language.lang_subcode,
lang_name=language.lang_name if hasattr(language, 'lang_name') else language.native_name,
lang_direction=language.lang_direction
)
def create_associated_thumbnail(ccnode, ccfilemodel):
"""
Gets the appropriate thumbnail for export (uses or generates a base64 encoding)
Args:
ccnode (<ContentNode>): node to derive thumbnail from (if encoding is provided)
ccfilemodel (<File>): file to get thumbnail from if no encoding is available
Returns <File> model of encoded, resized thumbnail
"""
encoding = None
try:
encoding = ccnode.thumbnail_encoding and load_json_string(ccnode.thumbnail_encoding).get('base64')
except ValueError:
logging.error("ERROR: node thumbnail is not in correct format ({}: {})".format(ccnode.id, ccnode.thumbnail_encoding))
return
# Save the encoding if it doesn't already have an encoding
if not encoding:
try:
encoding = get_thumbnail_encoding(str(ccfilemodel))
except IOError:
# ImageMagick may raise an IOError if the file is not a thumbnail. Catch that then just return early.
logging.error("ERROR: cannot identify the thumbnail ({}: {})".format(ccnode.id, ccnode.thumbnail_encoding))
return
ccnode.thumbnail_encoding = json.dumps({
"base64": encoding,
"points": [],
"zoom": 0,
})
ccnode.save()
return create_thumbnail_from_base64(
encoding,
uploaded_by=ccfilemodel.uploaded_by,
file_format_id=ccfilemodel.file_format_id,
preset_id=ccfilemodel.preset_id
)
def create_associated_file_objects(kolibrinode, ccnode):
logging.debug("Creating LocalFile and File objects for Node {}".format(kolibrinode.id))
for ccfilemodel in ccnode.files.exclude(Q(preset_id=format_presets.EXERCISE_IMAGE) | Q(preset_id=format_presets.EXERCISE_GRAPHIE)):
preset = ccfilemodel.preset
fformat = ccfilemodel.file_format
if ccfilemodel.language:
get_or_create_language(ccfilemodel.language)
if preset.thumbnail:
ccfilemodel = create_associated_thumbnail(ccnode, ccfilemodel) or ccfilemodel
kolibrilocalfilemodel, new = kolibrimodels.LocalFile.objects.get_or_create(
pk=ccfilemodel.checksum,
defaults={
'extension': fformat.extension,
'file_size': ccfilemodel.file_size,
}
)
kolibrimodels.File.objects.create(
pk=ccfilemodel.pk,
checksum=ccfilemodel.checksum,
extension=fformat.extension,
available=True, # TODO: Set this to False, once we have availability stamping implemented in Kolibri
file_size=ccfilemodel.file_size,
contentnode=kolibrinode,
preset=preset.pk,
supplementary=preset.supplementary,
lang_id=ccfilemodel.language and ccfilemodel.language.pk,
thumbnail=preset.thumbnail,
priority=preset.order,
local_file=kolibrilocalfilemodel,
)
def create_perseus_exercise(ccnode, kolibrinode, exercise_data, user_id=None):
logging.debug("Creating Perseus Exercise for Node {}".format(ccnode.title))
filename = "{0}.{ext}".format(ccnode.title, ext=file_formats.PERSEUS)
temppath = None
try:
with tempfile.NamedTemporaryFile(suffix="zip", delete=False) as tempf:
temppath = tempf.name
create_perseus_zip(ccnode, exercise_data, tempf)
file_size = tempf.tell()
tempf.flush()
ccnode.files.filter(preset_id=format_presets.EXERCISE).delete()
assessment_file_obj = ccmodels.File.objects.create(
file_on_disk=File(open(temppath, 'r'), name=filename),
contentnode=ccnode,
file_format_id=file_formats.PERSEUS,
preset_id=format_presets.EXERCISE,
original_filename=filename,
file_size=file_size,
uploaded_by_id=user_id,
)
logging.debug("Created exercise for {0} with checksum {1}".format(ccnode.title, assessment_file_obj.checksum))
finally:
temppath and os.unlink(temppath)
def process_assessment_metadata(ccnode, kolibrinode):
# Get mastery model information, set to default if none provided
assessment_items = ccnode.assessment_items.all().order_by('order')
exercise_data = json.loads(ccnode.extra_fields) if ccnode.extra_fields else {}
randomize = exercise_data.get('randomize') if exercise_data.get('randomize') is not None else True
assessment_item_ids = [a.assessment_id for a in assessment_items]
mastery_model = {'type': exercise_data.get('mastery_model') or exercises.M_OF_N}
if mastery_model['type'] == exercises.M_OF_N:
mastery_model.update({'n': exercise_data.get('n') or min(5, assessment_items.count()) or 1})
mastery_model.update({'m': exercise_data.get('m') or min(5, assessment_items.count()) or 1})
elif mastery_model['type'] == exercises.DO_ALL:
mastery_model.update({'n': assessment_items.count() or 1, 'm': assessment_items.count() or 1})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_2:
mastery_model.update({'n': 2, 'm': 2})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_3:
mastery_model.update({'n': 3, 'm': 3})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_5:
mastery_model.update({'n': 5, 'm': 5})
elif mastery_model['type'] == exercises.NUM_CORRECT_IN_A_ROW_10:
mastery_model.update({'n': 10, 'm': 10})
exercise_data.update({
'mastery_model': exercises.M_OF_N,
'legacy_mastery_model': mastery_model['type'],
'randomize': randomize,
'n': mastery_model.get('n'),
'm': mastery_model.get('m'),
'all_assessment_items': assessment_item_ids,
'assessment_mapping': {a.assessment_id: a.type if a.type != 'true_false' else exercises.SINGLE_SELECTION.decode('utf-8') for a in assessment_items},
})
kolibrimodels.AssessmentMetaData.objects.create(
id=uuid.uuid4(),
contentnode=kolibrinode,
assessment_item_ids=json.dumps(assessment_item_ids),
number_of_assessments=assessment_items.count(),
mastery_model=json.dumps(mastery_model),
randomize=randomize,
is_manipulable=ccnode.kind_id == content_kinds.EXERCISE,
)
return exercise_data
def create_perseus_zip(ccnode, exercise_data, write_to_path):
with zipfile.ZipFile(write_to_path, "w") as zf:
try:
exercise_context = {
'exercise': json.dumps(exercise_data, sort_keys=True, indent=4)
}
exercise_result = render_to_string('perseus/exercise.json', exercise_context)
write_to_zipfile("exercise.json", exercise_result, zf)
for question in ccnode.assessment_items.prefetch_related('files').all().order_by('order'):
try:
for image in question.files.filter(preset_id=format_presets.EXERCISE_IMAGE).order_by('checksum'):
image_name = "images/{}.{}".format(image.checksum, image.file_format_id)
if image_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content:
write_to_zipfile(image_name, content.read(), zf)
for image in question.files.filter(preset_id=format_presets.EXERCISE_GRAPHIE).order_by('checksum'):
svg_name = "images/{0}.svg".format(image.original_filename)
json_name = "images/{0}-data.json".format(image.original_filename)
if svg_name not in zf.namelist() or json_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(image.checksum, str(image)), 'rb') as content:
content = content.read()
content = content.split(exercises.GRAPHIE_DELIMITER)
write_to_zipfile(svg_name, content[0], zf)
write_to_zipfile(json_name, content[1], zf)
write_assessment_item(question, zf)
except Exception as e:
logging.error("Publishing error: {}".format(str(e)))
finally:
zf.close()
def write_to_zipfile(filename, content, zf):
info = zipfile.ZipInfo(filename, date_time=(2013, 3, 14, 1, 59, 26))
info.comment = "Perseus file generated during export process".encode()
info.compress_type = zipfile.ZIP_STORED
info.create_system = 0
zf.writestr(info, content)
def write_assessment_item(assessment_item, zf):
if assessment_item.type == exercises.MULTIPLE_SELECTION:
template = 'perseus/multiple_selection.json'
elif assessment_item.type == exercises.SINGLE_SELECTION or assessment_item.type == 'true_false':
template = 'perseus/multiple_selection.json'
elif assessment_item.type == exercises.INPUT_QUESTION:
template = 'perseus/input_question.json'
elif assessment_item.type == exercises.PERSEUS_QUESTION:
template = 'perseus/perseus_question.json'
else:
raise TypeError("Unrecognized question type on item {}".format(assessment_item.assessment_id))
question = process_formulas(assessment_item.question)
question, question_images = process_image_strings(question, zf)
answer_data = json.loads(assessment_item.answers)
for answer in answer_data:
if assessment_item.type == exercises.INPUT_QUESTION:
answer['answer'] = extract_value(answer['answer'])
else:
answer['answer'] = answer['answer'].replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR)
answer['answer'] = process_formulas(answer['answer'])
# In case perseus doesn't support =wxh syntax, use below code
answer['answer'], answer_images = process_image_strings(answer['answer'], zf)
answer.update({'images': answer_images})
answer_data = list(filter(lambda a: a['answer'] or a['answer'] == 0, answer_data)) # Filter out empty answers, but not 0
hint_data = json.loads(assessment_item.hints)
for hint in hint_data:
hint['hint'] = process_formulas(hint['hint'])
hint['hint'], hint_images = process_image_strings(hint['hint'], zf)
hint.update({'images': hint_images})
context = {
'question': question,
'question_images': question_images,
'answers': sorted(answer_data, lambda x, y: cmp(x.get('order'), y.get('order'))),
'multiple_select': assessment_item.type == exercises.MULTIPLE_SELECTION,
'raw_data': assessment_item.raw_data.replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR),
'hints': sorted(hint_data, lambda x, y: cmp(x.get('order'), y.get('order'))),
'randomize': assessment_item.randomize,
}
result = render_to_string(template, context).encode('utf-8', "ignore")
write_to_zipfile("{0}.json".format(assessment_item.assessment_id), result, zf)
def process_formulas(content):
for match in re.finditer(ur'\$(\$.+\$)\$', content):
content = content.replace(match.group(0), match.group(1))
return content
def process_image_strings(content, zf):
image_list = []
content = content.replace(exercises.CONTENT_STORAGE_PLACEHOLDER, PERSEUS_IMG_DIR)
for match in re.finditer(ur'!\[(?:[^\]]*)]\(([^\)]+)\)', content):
img_match = re.search(ur'(.+/images/[^\s]+)(?:\s=([0-9\.]+)x([0-9\.]+))*', match.group(1))
if img_match:
# Add any image files that haven't been written to the zipfile
filename = img_match.group(1).split('/')[-1]
checksum, ext = os.path.splitext(filename)
image_name = "images/{}.{}".format(checksum, ext[1:])
if image_name not in zf.namelist():
with storage.open(ccmodels.generate_object_storage_name(checksum, filename), 'rb') as imgfile:
write_to_zipfile(image_name, imgfile.read(), zf)
# Add resizing data
if img_match.group(2) and img_match.group(3):
image_data = {'name': img_match.group(1)}
image_data.update({'width': float(img_match.group(2))})
image_data.update({'height': float(img_match.group(3))})
image_list.append(image_data)
content = content.replace(match.group(1), img_match.group(1))
return content, image_list
def map_prerequisites(root_node):
for n in ccmodels.PrerequisiteContentRelationship.objects.filter(prerequisite__tree_id=root_node.tree_id)\
.values('prerequisite__node_id', 'target_node__node_id'):
target_node = kolibrimodels.ContentNode.objects.get(pk=n['target_node__node_id'])
target_node.has_prerequisite.add(n['prerequisite__node_id'])
def map_channel_to_kolibri_channel(channel):
logging.debug("Generating the channel metadata.")
channel.icon_encoding = convert_channel_thumbnail(channel)
channel.save()
kolibri_channel = kolibrimodels.ChannelMetadata.objects.create(
id=channel.id,
name=channel.name,
description=channel.description,
version=channel.version + 1, # Need to save as version being published, not current version
thumbnail=channel.icon_encoding,
root_pk=channel.main_tree.node_id,
root_id=channel.main_tree.node_id,
min_schema_version=MIN_SCHEMA_VERSION, # Need to modify Kolibri so we can import this without importing models
)
logging.info("Generated the channel metadata.")
return kolibri_channel
def convert_channel_thumbnail(channel):
""" encode_thumbnail: gets base64 encoding of thumbnail
Args:
thumbnail (str): file path or url to channel's thumbnail
Returns: base64 encoding of thumbnail
"""
if not channel.thumbnail or channel.thumbnail == '' or 'static' in channel.thumbnail:
return ""
if channel.thumbnail_encoding:
try:
thumbnail_data = channel.thumbnail_encoding
if thumbnail_data.get("base64"):
return thumbnail_data["base64"]
except ValueError:
logging.error("ERROR: channel thumbnail is not in correct format ({}: {})".format(channel.id, channel.thumbnail_encoding))
return get_thumbnail_encoding(channel.thumbnail)
def map_tags_to_node(kolibrinode, ccnode):
""" map_tags_to_node: assigns tags to nodes (creates fk relationship)
Args:
kolibrinode (kolibri.models.ContentNode): node to map tag to
ccnode (contentcuration.models.ContentNode): node with tags to map
Returns: None
"""
tags_to_add = []
for tag in ccnode.tags.all():
t, _new = kolibrimodels.ContentTag.objects.get_or_create(pk=tag.pk, tag_name=tag.tag_name)
tags_to_add.append(t)
kolibrinode.tags = tags_to_add
kolibrinode.save()
def prepare_export_database(tempdb):
call_command("flush", "--noinput", database=get_active_content_database()) # clears the db!
call_command("migrate",
"content",
run_syncdb=True,
database=get_active_content_database(),
noinput=True)
logging.info("Prepared the export database.")
def raise_if_nodes_are_all_unchanged(channel):
logging.debug("Checking if we have any changed nodes.")
changed_models = channel.main_tree.get_family().filter(changed=True)
if changed_models.count() == 0:
logging.debug("No nodes have been changed!")
raise EarlyExit(message="No models changed!", db_path=None)
logging.info("Some nodes are changed.")
def mark_all_nodes_as_changed(channel):
logging.debug("Marking all nodes as changed.")
channel.main_tree.get_family().update(changed=False, published=True)
logging.info("Marked all nodes as changed.")
def save_export_database(channel_id):
logging.debug("Saving export database")
current_export_db_location = get_active_content_database()
target_export_db_location = os.path.join(settings.DB_ROOT, "{id}.sqlite3".format(id=channel_id))
with open(current_export_db_location) as currentf:
storage.save(target_export_db_location, currentf)
logging.info("Successfully copied to {}".format(target_export_db_location))
def add_tokens_to_channel(channel):
if not channel.secret_tokens.filter(is_primary=True).exists():
logging.info("Generating tokens for the channel.")
channel.make_token()
def fill_published_fields(channel):
published_nodes = channel.main_tree.get_descendants().filter(published=True).prefetch_related('files')
channel.total_resource_count = published_nodes.exclude(kind_id=content_kinds.TOPIC).count()
channel.published_kind_count = json.dumps(list(published_nodes.values('kind_id').annotate(count=Count('kind_id')).order_by('kind_id')))
channel.published_size = published_nodes.values('files__checksum', 'files__file_size').distinct(
).aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0
node_languages = published_nodes.exclude(language=None).values_list('language', flat=True)
file_languages = published_nodes.values_list('files__language', flat=True)
language_list = list(set(chain(node_languages, file_languages)))
for lang in language_list:
if lang:
channel.included_languages.add(lang)
channel.save()
| mit | -1,944,273,147,826,961,200 | 42.335925 | 156 | 0.656271 | false | 3.744289 | false | false | false |
ntt-sic/nova | nova/compute/api.py | 1 | 161884 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova import notifier
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.objects import instance_action
from nova.objects import instance_info_cache
from nova.objects import keypair as keypair_obj
from nova.objects import migration as migration_obj
from nova.objects import security_group as security_group_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(notifier.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = notifier.get_notifier('compute', CONF.host)
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
instance_action.InstanceAction.action_start(context,
instance['uuid'],
action,
want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
req_ram = max_count * instance_type['memory_mb']
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
overs = exc.kwargs['overs']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // instance_type['memory_mb'])
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. %(msg)s"),
params)
else:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties"),
{'pid': context.project_id,
'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if len(k) == 0:
msg = _("Metadata property key blank")
LOG.warn(msg)
raise exception.InvalidMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
LOG.warn(msg)
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""
Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks):
"""
Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
self.network_api.validate_networks(context, requested_networks)
@staticmethod
def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
image_service, kernel_id = glance.get_remote_image_service(
context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
image_service, ramdisk_id = glance.get_remote_image_service(
context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
if forced_host:
check_policy(context, 'create:forced_host', {})
if forced_node:
check_policy(context, 'create:forced_host', {})
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
# Image checks don't apply when building from volume
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.InstanceTypeMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.InstanceTypeDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.InstanceTypeDiskTooSmall()
def _check_and_transform_bdm(self, base_options, min_count, max_count,
block_device_mapping, legacy_bdm):
if legacy_bdm:
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, base_options.get('image_ref', ''),
root_device_name)
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_mapping
def _get_image(self, context, image_href):
if not image_href:
return None, {}
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
return image_id, image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
if image_id is not None:
self._check_requested_image(context, image_id,
image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.InstanceTypeNotFound(
instance_type_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
self._check_requested_networks(context, requested_networks)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = keypair_obj.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.properties_root_device_name(
boot_meta.get('properties', {}))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
return base_options
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping):
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug(_("Going to run %s instances...") % num_instances)
instances = []
try:
for i in xrange(num_instances):
instance = instance_obj.Instance()
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i)
instances.append(instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
QUOTAS.rollback(context, quota_reservations)
# Commit the reservations
QUOTAS.commit(context, quota_reservations)
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if legacy_bdm and bdm.get('device_name') != 'vda':
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_service.show(context, image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif bdm.get('volume_id'):
try:
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
return {}
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = {}
boot_meta['properties'] = \
self._get_bdm_image_metadata(context,
block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
base_options = self._validate_and_build_base_options(context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id)
block_device_mapping = self._check_and_transform_bdm(
base_options, min_count, max_count,
block_device_mapping, legacy_bdm)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, instance_uuid, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("Image bdm %s"), bdm, instance_uuid=instance_uuid)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
"""tell vm driver to attach volume at boot time by updating
BlockDeviceMapping
"""
LOG.debug(_("block_device_mapping %s"), block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
bdm['volume_size'] = self._volume_size(instance_type, bdm)
if bdm.get('volume_size') == 0:
continue
bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(elevated_context,
bdm,
legacy=False)
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm['boot_index']
for bdm in all_mappings
if bdm.get('boot_index') is not None
and bdm.get('boot_index') >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
image_id = bdm.get('image_id')
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.get('volume_size') or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].get('volume_size') or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.get('destination_type') == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
instance_uuid = instance['uuid']
image_properties = image.get('properties', {})
image_mapping = image_properties.get('mappings', [])
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
instance_uuid, image_mapping)
# NOTE (ndipanov): For now assume that image mapping is legacy
image_bdm = block_device.from_legacy_mapping(
image_properties.get('block_device_mapping', []),
None, instance['root_device_name'])
self._validate_bdm(context, instance, instance_type,
block_device_mapping + image_mapping + image_bdm)
for mapping in (image_mapping, image_bdm, block_device_mapping):
if not mapping:
continue
self._update_block_device_mapping(context,
instance_type, instance_uuid, mapping)
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
"""Populate instance shutdown_terminate information."""
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
instance.shutdown_terminate = False
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance['uuid'] = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance['image_ref'])
instance['system_metadata'].update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
self.security_group_api.ensure_default(context)
instance.create(context)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
self.db.instance_destroy(context, instance['uuid'])
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks:
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for net, ip, port in requested_networks:
if port:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True):
"""
Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1 and utils.is_neutron():
self._check_multiple_instances_neutron_ports(requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
services = service_obj.ServiceList.get_all_by_topic(context,
CONF.compute_topic)
for service in services:
host_name = service.host
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance: The instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: A reference to the updated instance
"""
refs = self._update(context, instance, **kwargs)
return refs[1]
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref,
instance_ref, service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance['disable_terminate']:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context, instance['uuid']))
reservations = None
if context.is_admin and context.project_id != instance['project_id']:
project_id = instance['project_id']
else:
project_id = context.project_id
if context.user_id != instance['user_id']:
user_id = instance['user_id']
else:
user_id = context.user_id
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
new_type_id = instance.instance_type_id
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
reservations = self._create_reservations(context,
instance,
new_type_id,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
if not host:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
except exception.ObjectActionError:
instance.refresh()
if instance['vm_state'] == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_up = False
try:
service = service_obj.Service.get_by_compute_host(
context.elevated(), instance.host)
if self.servicegroup_api.service_is_up(service):
is_up = True
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
mig_cls = migration_obj.Migration
migration = None
for status in ('finished', 'confirming'):
try:
migration = mig_cls.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s') %
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_('Migration %s may have been confirmed during delete') %
migration.id, context=context, instance=instance)
return
downsize_reservations = self._reserve_quota_delta(context,
deltas)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, downsize_reservations,
cast=False)
def _create_reservations(self, context, old_instance, new_instance_type_id,
project_id, user_id):
instance_vcpus = old_instance['vcpus']
instance_memory_mb = old_instance['memory_mb']
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
Migration = migration_obj.Migration
try:
migration = Migration.get_by_instance_and_status(
context.elevated(), old_instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
new_instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.InstanceTypeNotFound:
LOG.warning(_("instance type %d not found"),
old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
instance_memory_mb = old_inst_type['memory_mb']
LOG.debug(_("going to delete a resizing instance"))
reservations = QUOTAS.reserve(context,
project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance_uuid = instance['uuid']
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
self.network_api.deallocate_for_instance(elevated,
instance)
system_meta = self.db.instance_system_metadata_get(context,
instance_uuid)
# cleanup volumes
for bdm in bdms:
if bdm['volume_id']:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm['volume_id'],
connector)
self.volume_api.detach(elevated, bdm['volume_id'])
if bdm['delete_on_termination']:
self.volume_api.delete(context, bdm['volume_id'])
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
self.db.block_device_mapping_destroy(context, bdm['id'])
cb(context, instance, bdms, local=True)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=system_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
instance_type = flavors.extract_flavor(instance)
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance['host']:
instance = self.update(context, instance,
task_state=task_states.RESTORING,
expected_task_state=None,
deleted_at=None)
self.compute_rpcapi.restore_instance(context, instance)
else:
self.update(context,
instance,
vm_state=vm_states.ACTIVE,
task_state=None,
expected_task_state=None,
deleted_at=None)
QUOTAS.commit(context, quota_reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, quota_reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
def force_stop(self, context, instance, do_cast=True):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast)
@wrap_check_policy
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug(_("Going to try to start instance"), instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
#NOTE(bcwaldon): no policy check here since it should be rolled in to
# search_opts in get_all
def get_active_by_window(self, context, begin, end=None, project_id=None):
"""Get instances that were continuously active over a window."""
return self.db.instance_get_active_by_window_joined(context, begin,
end, project_id)
#NOTE(bcwaldon): this doesn't really belong in this class
def get_instance_type(self, context, instance_type_id):
"""Get an instance type by instance type id."""
return flavors.get_flavor(instance_type_id, ctxt=context)
def get(self, context, instance_id, want_objects=False):
"""Get a single instance with the given instance_id."""
# NOTE(ameade): we still need to support integer ids for ec2
expected_attrs = ['metadata', 'system_metadata',
'security_groups', 'info_cache']
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be returned sorted in the order specified by the
'sort_dir' parameter using the key specified in the 'sort_key'
parameter.
"""
#TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
if 'all_tenants' in search_opts:
check_policy(context, "get_all_tenants", target)
LOG.debug(_("Searching by: %s") % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
instance_type = flavors.get_flavor_by_flavor_id(
flavor_id)
filters['instance_type_id'] = instance_type['id']
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.iteritems():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, basestring):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir,
limit=limit,
marker=marker)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
return instance_obj.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_snapshot(self, context, instance, name, extra_properties=None,
image_id=None):
"""Live Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: A dict containing image metadata
"""
if image_id:
# The image entry has already been created, so just pull the
# metadata.
image_meta = self.image_service.show(context, image_id)
else:
image_meta = self._create_image(context, instance, name,
'snapshot', extra_properties=extra_properties)
instance = self.update(context, instance,
task_state=task_states.IMAGE_LIVE_SNAPSHOT,
expected_task_state=None)
self.compute_rpcapi.live_snapshot_instance(context, instance=instance,
image_id=image_meta['id'])
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
"""
props_copy = dict(extra_properties, backup_type=backup_type)
image_meta = self._create_image(context, instance, name,
'backup', extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=None)
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(expected_task_state=None)
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param backup_type: daily | weekly
:param extra_properties: dict of extra image properties to include
"""
if extra_properties is None:
extra_properties = {}
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
image_ref = instance.image_ref
sent_meta = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
bdms = self.get_instance_bdms(context, instance)
mapping = []
for bdm in bdms:
if bdm['no_device']:
continue
volume_id = bdm.get('volume_id')
if volume_id:
# create snapshot based on volume_id
volume = self.volume_api.get(context, volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
bdm['snapshot_id'] = snapshot['id']
bdm['volume_id'] = None
mapping.append(bdm)
for m in block_device.mappings_prepend_dev(properties.get('mappings',
[])):
virtual_name = m['virtual']
if virtual_name in ('ami', 'root'):
continue
assert block_device.is_swap_or_ephemeral(virtual_name)
device_name = m['device']
if device_name in [b['device_name'] for b in mapping
if not b.get('no_device', False)]:
continue
# NOTE(yamahata): swap and ephemeral devices are specified in
# AMI, but disabled for this instance by user.
# So disable those device by no_device.
mapping.append({'device_name': device_name, 'no_device': True})
if mapping:
properties['block_device_mapping'] = mapping
for attr in ('status', 'location', 'id'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_service.create(context, image_meta, data='')
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED,
vm_states.ERROR],
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance['vm_state'] in [vm_states.STOPPED,
vm_states.PAUSED,
vm_states.SUSPENDED,
vm_states.ERROR])):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method='reboot')
if ((reboot_type == 'SOFT' and
instance['task_state'] == task_states.REBOOTING) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=[None, task_states.REBOOTING])
elevated = context.elevated()
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance['image_ref'] or ''
files_to_inject = kwargs.pop('files_to_inject', [])
metadata = kwargs.get('metadata', {})
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
instance_type = flavors.extract_flavor(instance)
self._checks_for_create_and_rebuild(context, image_id, image,
instance_type, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""
Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that
# if the system_metadata for this instance is updated
# after we do the get and before we update.. those other
# updates will be lost. Since this problem exists in a lot
# of other places, I think it should be addressed in a DB
# layer overhaul.
sys_metadata = self.db.instance_system_metadata_get(context,
instance['uuid'])
orig_sys_metadata = dict(sys_metadata)
# Remove the old keys
for key in sys_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del sys_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, instance_type)
sys_metadata.update(new_sys_metadata)
self.db.instance_system_metadata_update(context,
instance['uuid'], sys_metadata, True)
return orig_sys_metadata
instance = self.update(context, instance,
task_state=task_states.REBUILDING,
expected_task_state=None,
# Unfortunately we need to set image_ref early,
# so API users can see it.
image_ref=image_href, kernel_id=kernel_id or "",
ramdisk_id=ramdisk_id or "",
progress=0, **kwargs)
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = block_device.legacy_mapping(
self.db.block_device_mapping_get_all_by_instance(
context,
instance['uuid']))
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
reservations = self._reserve_quota_delta(context, deltas)
instance.task_state = task_states.RESIZE_REVERTING
instance.save(expected_task_state=None)
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
reservations = self._reserve_quota_delta(context, deltas)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable and reservations:
QUOTAS.commit(context, reservations)
reservations = []
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
reservations)
@staticmethod
def _resize_quota_delta(context, new_instance_type,
old_instance_type, sense, compare):
"""
Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_instance_type: the target instance type
:param old_instance_type: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_instance_type[resource] -
old_instance_type[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_instance_type, old_instance_type):
"""
Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""
Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_instance_type = flavors.get_flavor(
migration_ref['old_instance_type_id'])
new_instance_type = flavors.get_flavor(
migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""
Calculate deltas required to adjust quota for an instance downsize.
"""
old_instance_type = flavors.extract_flavor(instance,
'old_')
new_instance_type = flavors.extract_flavor(instance,
'new_')
return API._resize_quota_delta(context, new_instance_type,
old_instance_type, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, project_id=None):
if not deltas:
return
return QUOTAS.reserve(context, project_id=project_id, **deltas)
@staticmethod
def _resize_cells_support(context, reservations, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
if reservations:
# With cells, the best we can do right now is commit the
# reservations immediately...
QUOTAS.commit(context, reservations,
project_id=instance.project_id)
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = migration_obj.Migration()
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.create(context.elevated())
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = flavors.extract_flavor(instance)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
# FIXME(sirp): both of these should raise InstanceTypeNotFound instead
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id:
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
reservations = self._reserve_quota_delta(context, deltas,
project_id=instance[
'project_id'])
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
usages = exc.kwargs['usages']
overs = exc.kwargs['overs']
headroom = dict((res, quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=None)
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, reservations, instance,
current_instance_type,
new_instance_type)
reservations = []
self._record_action_start(context, instance, instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type, reservations=reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance['display_name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED], task_state=[None])
def shelve_offload(self, context, instance):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=None)
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED], task_state=[None])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
bdms = self.get_instance_bdms(context, instance, legacy=False)
for bdm in bdms:
if bdm['volume_id']:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.check_attached(context, volume)
# TODO(ndipanov): This check can be generalized as a decorator to
# check for valid combinations of src and dests - for now check
# if it's booted from volume only
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance['uuid'],
reason=reason)
self.update(context,
instance,
task_state=task_states.RESCUING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
self.update(context,
instance,
task_state=task_states.UNRESCUING,
expected_task_state=None)
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
instance,
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=None)
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_lock
def inject_file(self, context, instance, path, file_contents):
"""Write a file to the given instance."""
self.compute_rpcapi.inject_file(context, instance=instance, path=path,
file_contents=file_contents)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug(_('Locking'), context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug(_('Unlocking'), context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def attach_volume(self, context, instance, volume_id, device=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
except Exception:
with excutils.save_and_reraise_exception():
self.db.block_device_mapping_destroy_by_instance_and_device(
context, instance['uuid'], device)
return device
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance. This method is separated to make
it easier for cells version to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=None)
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance['uuid']:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
def _match_any(pattern_list, string):
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
# Both keys and value defined -- AND
if ((keys_filter and values_filter) and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
# Only keys or value is defined
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
metadata = instance.get(metadata_type, {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
self.db.instance_metadata_delete(context, instance['uuid'], key)
instance['metadata'] = {}
notifications.send_update(context, instance, instance)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = self.get_instance_metadata(context, instance)
if delete:
_metadata = metadata
else:
_metadata = orig.copy()
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
metadata = self.db.instance_metadata_update(context, instance['uuid'],
_metadata, True)
instance['metadata'] = metadata
notifications.send_update(context, instance, instance)
diff = utils.diff_dict(orig, _metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def get_instance_bdms(self, context, instance, legacy=True):
"""Get all bdm tables for specified instance."""
bdms = self.db.block_device_mapping_get_all_by_instance(context,
instance['uuid'])
if legacy:
return block_device.legacy_mapping(bdms)
return bdms
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance['image_ref']:
return True
if bdms is None:
bdms = self.get_instance_bdms(context, instance, legacy=False)
root_bdm = block_device.get_root_bdm(bdms)
if root_bdm and root_bdm.get('destination_type') == 'volume':
return True
return False
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=None)
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
"""
LOG.debug(_('vm evacuation scheduled'))
inst_host = instance['host']
service = service_obj.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceInUse(host=inst_host)
instance = self.update(context, instance, expected_task_state=None,
task_state=task_states.REBUILDING)
self._record_action_start(context, instance, instance_actions.EVACUATE)
return self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return migration_obj.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = self.db.block_device_mapping_get_by_volume_id(context,
volume_id, ['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm['instance'],
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = self.db.block_device_mapping_get_by_volume_id(context,
volume_id, ['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm['instance'],
volume_id, snapshot_id, delete_info)
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = service_obj.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.host_power_action(context, action=action,
host=host_name)
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
return self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = service_obj.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return service_obj.Service.get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
service = service_obj.Service.get_by_args(context, host_name,
binary)
service.update(params_to_update)
service.save()
return service
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return instance_action.InstanceActionList.get_by_instance_uuid(
context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return instance_action.InstanceAction.get_by_request_id(
context, instance['uuid'], request_id)
def action_events_get(self, context, instance, action_id):
return instance_action.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = aggregate_obj.Aggregate()
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create(context)
aggregate = self._reformat_aggregate_info(aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
del aggregate['metadata']
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
return self._reformat_aggregate_info(aggregate)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
aggregates = aggregate_obj.AggregateList.get_all(context)
return [self._reformat_aggregate_info(agg) for agg in aggregates]
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
if values:
aggregate.metadata = values
aggregate.save()
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.update_metadata(metadata)
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = aggregate_obj.Aggregate.get_by_id(context,
aggregate_id)
if len(aggregate.hosts) > 0:
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason='not empty')
aggregate.destroy()
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def _check_az_for_host(self, aggregate_meta, host_az, aggregate_id):
# NOTE(mtreinish) The availability_zone key returns a set of
# zones so loop over each zone. However there should only
# ever be one zone in the set because an aggregate can only
# have a single availability zone set at one time.
for aggregate_az in aggregate_meta["availability_zone"]:
# NOTE(mtreinish) Ensure that the aggregate_az is not none
# if it is none then that is just a regular aggregate and
# it is valid to have a host in multiple aggregates.
if aggregate_az and aggregate_az != host_az:
msg = _("Host already in availability zone "
"%s") % host_az
action_name = "add_host_to_aggregate"
raise exception.InvalidAggregateAction(
action=action_name, aggregate_id=aggregate_id,
reason=msg)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
host_az = availability_zones.get_host_availability_zone(context,
host_name)
if host_az and host_az != CONF.default_availability_zone:
aggregate_meta = self.db.aggregate_metadata_get_by_metadata_key(
context, aggregate_id, 'availability_zone')
if aggregate_meta.get("availability_zone"):
self._check_az_for_host(aggregate_meta, host_az, aggregate_id)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.add_host(context, host_name)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
def _reformat_aggregate_info(self, aggregate):
"""Builds a dictionary with aggregate props, metadata and hosts."""
return dict(aggregate.iteritems())
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = notifier.get_notifier(service='api')
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
if not 0 < len(key_name) < 256:
raise exception.InvalidKeypair(
reason=_('Keypair name must be between '
'1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def import_key_pair(self, context, user_id, key_name, public_key):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'import.start', key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'import.end', key_name)
return keypair
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def create_key_pair(self, context, user_id, key_name):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'create.end', key_name)
return keypair, private_key
@exception.wrap_exception(notifier=notifier.get_notifier(service='api'))
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
keypair_obj.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return keypair_obj.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return keypair_obj.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""
Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
"""
Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
if not val:
msg = _("Security group %s cannot be empty.") % property
self.raise_invalid_property(msg)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
if len(val) > 255:
msg = _("Security group %s should not be greater "
"than 255 characters.") % property
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
group_ref = self.db.security_group_update(context,
security_group['id'],
group)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
# Get reservations
try:
reservations = QUOTAS.reserve(context, security_groups=-1)
except Exception:
reservations = None
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations)
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, name, context=context)
rules = [self.db.security_group_rule_create(context, v) for v in vals]
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
for rule_id in rule_ids:
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
is_duplicate = True
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = self.db.instance_get_by_uuid(context, instance_uuid)
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
| apache-2.0 | -2,824,583,000,643,304,000 | 42.505509 | 79 | 0.569476 | false | 4.530378 | true | false | false |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/google/cloud/gapic/errorreporting/v1beta1/error_group_service_client.py | 1 | 10469 | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/devtools/clouderrorreporting/v1beta1/error_group_service.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.devtools.clouderrorreporting.v1beta1 ErrorGroupService API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.gapic.errorreporting.v1beta1 import enums
from google.cloud.proto.devtools.clouderrorreporting.v1beta1 import common_pb2
from google.cloud.proto.devtools.clouderrorreporting.v1beta1 import error_group_service_pb2
class ErrorGroupServiceClient(object):
"""Service for retrieving and updating individual error groups."""
SERVICE_ADDRESS = 'clouderrorreporting.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
_GROUP_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/groups/{group}')
@classmethod
def group_path(cls, project, group):
"""Returns a fully-qualified group resource name string."""
return cls._GROUP_PATH_TEMPLATE.render({
'project': project,
'group': group,
})
@classmethod
def match_project_from_group_name(cls, group_name):
"""Parses the project from a group resource.
Args:
group_name (string): A fully-qualified path representing a group
resource.
Returns:
A string representing the project.
"""
return cls._GROUP_PATH_TEMPLATE.match(group_name).get('project')
@classmethod
def match_group_from_group_name(cls, group_name):
"""Parses the group from a group resource.
Args:
group_name (string): A fully-qualified path representing a group
resource.
Returns:
A string representing the group.
"""
return cls._GROUP_PATH_TEMPLATE.match(group_name).get('group')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A ErrorGroupServiceClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-error-reporting-v1beta1', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'error_group_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.devtools.clouderrorreporting.v1beta1.ErrorGroupService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers, )
self.error_group_service_stub = config.create_stub(
error_group_service_pb2.ErrorGroupServiceStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._get_group = api_callable.create_api_call(
self.error_group_service_stub.GetGroup,
settings=defaults['get_group'])
self._update_group = api_callable.create_api_call(
self.error_group_service_stub.UpdateGroup,
settings=defaults['update_group'])
# Service calls
def get_group(self, group_name, options=None):
"""
Get the specified group.
Example:
>>> from google.cloud.gapic.errorreporting.v1beta1 import error_group_service_client
>>> api = error_group_service_client.ErrorGroupServiceClient()
>>> group_name = api.group_path('[PROJECT]', '[GROUP]')
>>> response = api.get_group(group_name)
Args:
group_name (string): [Required] The group resource name. Written as
<code>projects/<var>projectID</var>/groups/<var>group_name</var></code>.
Call
<a href=\"/error-reporting/reference/rest/v1beta1/projects.groupStats/list\">
<code>groupStats.list</code></a> to return a list of groups belonging to
this project.
Example: <code>projects/my-project-123/groups/my-group</code>
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.devtools.clouderrorreporting.v1beta1.common_pb2.ErrorGroup` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = error_group_service_pb2.GetGroupRequest(
group_name=group_name)
return self._get_group(request, options)
def update_group(self, group, options=None):
"""
Replace the data for the specified group.
Fails if the group does not exist.
Example:
>>> from google.cloud.gapic.errorreporting.v1beta1 import error_group_service_client
>>> from google.cloud.proto.devtools.clouderrorreporting.v1beta1 import common_pb2
>>> api = error_group_service_client.ErrorGroupServiceClient()
>>> group = common_pb2.ErrorGroup()
>>> response = api.update_group(group)
Args:
group (:class:`google.cloud.proto.devtools.clouderrorreporting.v1beta1.common_pb2.ErrorGroup`): [Required] The group which replaces the resource on the server.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.devtools.clouderrorreporting.v1beta1.common_pb2.ErrorGroup` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = error_group_service_pb2.UpdateGroupRequest(group=group)
return self._update_group(request, options)
| mit | 8,283,711,628,382,273,000 | 40.543651 | 169 | 0.647244 | false | 4.358451 | true | false | false |
kmoocdev2/edx-platform | openedx/features/course_experience/views/course_outline.py | 1 | 5344 | """
Views to show a course outline.
"""
import re
import datetime
from completion import waffle as completion_waffle
from django.contrib.auth.models import User
from django.template.context_processors import csrf
from django.template.loader import render_to_string
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from waffle.models import Switch
from web_fragments.fragment import Fragment
from courseware.courses import get_course_overview_with_access
from openedx.core.djangoapps.plugin_api.views import EdxFragmentView
from student.models import CourseEnrollment
from util.milestones_helpers import get_course_content_milestones
from xmodule.modulestore.django import modulestore
from ..utils import get_course_outline_block_tree, get_resume_block
DEFAULT_COMPLETION_TRACKING_START = datetime.datetime(2018, 1, 24, tzinfo=UTC)
class CourseOutlineFragmentView(EdxFragmentView):
"""
Course outline fragment to be shown in the unified course view.
"""
def render_to_fragment(self, request, course_id=None, page_context=None, **kwargs):
"""
Renders the course outline as a fragment.
"""
course_key = CourseKey.from_string(course_id)
course_overview = get_course_overview_with_access(request.user, 'load', course_key, check_if_enrolled=True)
course = modulestore().get_course(course_key)
course_block_tree = get_course_outline_block_tree(request, course_id)
if not course_block_tree:
return None
context = {
'csrf': csrf(request)['csrf_token'],
'course': course_overview,
'due_date_display_format': course.due_date_display_format,
'blocks': course_block_tree
}
resume_block = get_resume_block(course_block_tree)
if not resume_block:
self.mark_first_unit_to_resume(course_block_tree)
xblock_display_names = self.create_xblock_id_and_name_dict(course_block_tree)
gated_content = self.get_content_milestones(request, course_key)
context['gated_content'] = gated_content
context['xblock_display_names'] = xblock_display_names
html = render_to_string('course_experience/course-outline-fragment.html', context)
return Fragment(html)
def create_xblock_id_and_name_dict(self, course_block_tree, xblock_display_names=None):
"""
Creates a dictionary mapping xblock IDs to their names, using a course block tree.
"""
if xblock_display_names is None:
xblock_display_names = {}
if course_block_tree.get('id'):
xblock_display_names[course_block_tree['id']] = course_block_tree['display_name']
if course_block_tree.get('children'):
for child in course_block_tree['children']:
self.create_xblock_id_and_name_dict(child, xblock_display_names)
return xblock_display_names
def get_content_milestones(self, request, course_key):
"""
Returns dict of subsections with prerequisites and whether the prerequisite has been completed or not
"""
def _get_key_of_prerequisite(namespace):
return re.sub('.gating', '', namespace)
all_course_milestones = get_course_content_milestones(course_key)
uncompleted_prereqs = {
milestone['content_id']
for milestone in get_course_content_milestones(course_key, user_id=request.user.id)
}
gated_content = {
milestone['content_id']: {
'completed_prereqs': milestone['content_id'] not in uncompleted_prereqs,
'prerequisite': _get_key_of_prerequisite(milestone['namespace'])
}
for milestone in all_course_milestones
}
return gated_content
def user_enrolled_after_completion_collection(self, user, course_key):
"""
Checks that the user has enrolled in the course after 01/24/2018, the date that
the completion API began data collection. If the user has enrolled in the course
before this date, they may see incomplete collection data. This is a temporary
check until all active enrollments are created after the date.
"""
user = User.objects.get(username=user)
try:
user_enrollment = CourseEnrollment.objects.get(
user=user,
course_id=course_key,
is_active=True
)
return user_enrollment.created > self._completion_data_collection_start()
except CourseEnrollment.DoesNotExist:
return False
def _completion_data_collection_start(self):
"""
Returns the date that the ENABLE_COMPLETION_TRACKING waffle switch was enabled.
"""
# pylint: disable=protected-access
switch_name = completion_waffle.waffle()._namespaced_name(completion_waffle.ENABLE_COMPLETION_TRACKING)
try:
return Switch.objects.get(name=switch_name).created
except Switch.DoesNotExist:
return DEFAULT_COMPLETION_TRACKING_START
def mark_first_unit_to_resume(self, block_node):
children = block_node.get('children')
if children:
children[0]['resume_block'] = True
self.mark_first_unit_to_resume(children[0])
| agpl-3.0 | -8,452,376,268,452,737,000 | 38.007299 | 115 | 0.661302 | false | 4.104455 | false | false | false |
FilipeMaia/arrayfire-python | arrayfire/array.py | 1 | 30063 | #######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
arrayfire.Array class and helper functions.
"""
import inspect
from .library import *
from .util import *
from .util import _is_number
from .bcast import _bcast_var
from .base import *
from .index import *
from .index import _Index4
def _create_array(buf, numdims, idims, dtype):
out_arr = ct.c_void_p(0)
c_dims = dim4(idims[0], idims[1], idims[2], idims[3])
safe_call(backend.get().af_create_array(ct.pointer(out_arr), ct.c_void_p(buf),
numdims, ct.pointer(c_dims), dtype.value))
return out_arr
def _create_empty_array(numdims, idims, dtype):
out_arr = ct.c_void_p(0)
c_dims = dim4(idims[0], idims[1], idims[2], idims[3])
safe_call(backend.get().af_create_handle(ct.pointer(out_arr),
numdims, ct.pointer(c_dims), dtype.value))
return out_arr
def constant_array(val, d0, d1=None, d2=None, d3=None, dtype=Dtype.f32):
"""
Internal function to create a C array. Should not be used externall.
"""
if not isinstance(dtype, ct.c_int):
if isinstance(dtype, int):
dtype = ct.c_int(dtype)
elif isinstance(dtype, Dtype):
dtype = ct.c_int(dtype.value)
else:
raise TypeError("Invalid dtype")
out = ct.c_void_p(0)
dims = dim4(d0, d1, d2, d3)
if isinstance(val, complex):
c_real = ct.c_double(val.real)
c_imag = ct.c_double(val.imag)
if (dtype.value != Dtype.c32.value and dtype.value != Dtype.c64.value):
dtype = Dtype.c32.value
safe_call(backend.get().af_constant_complex(ct.pointer(out), c_real, c_imag,
4, ct.pointer(dims), dtype))
elif dtype.value == Dtype.s64.value:
c_val = ct.c_longlong(val.real)
safe_call(backend.get().af_constant_long(ct.pointer(out), c_val, 4, ct.pointer(dims)))
elif dtype.value == Dtype.u64.value:
c_val = ct.c_ulonglong(val.real)
safe_call(backend.get().af_constant_ulong(ct.pointer(out), c_val, 4, ct.pointer(dims)))
else:
c_val = ct.c_double(val)
safe_call(backend.get().af_constant(ct.pointer(out), c_val, 4, ct.pointer(dims), dtype))
return out
def _binary_func(lhs, rhs, c_func):
out = Array()
other = rhs
if (_is_number(rhs)):
ldims = dim4_to_tuple(lhs.dims())
rty = implicit_dtype(rhs, lhs.type())
other = Array()
other.arr = constant_array(rhs, ldims[0], ldims[1], ldims[2], ldims[3], rty.value)
elif not isinstance(rhs, Array):
raise TypeError("Invalid parameter to binary function")
safe_call(c_func(ct.pointer(out.arr), lhs.arr, other.arr, _bcast_var.get()))
return out
def _binary_funcr(lhs, rhs, c_func):
out = Array()
other = lhs
if (_is_number(lhs)):
rdims = dim4_to_tuple(rhs.dims())
lty = implicit_dtype(lhs, rhs.type())
other = Array()
other.arr = constant_array(lhs, rdims[0], rdims[1], rdims[2], rdims[3], lty.value)
elif not isinstance(lhs, Array):
raise TypeError("Invalid parameter to binary function")
c_func(ct.pointer(out.arr), other.arr, rhs.arr, _bcast_var.get())
return out
def _ctype_to_lists(ctype_arr, dim, shape, offset=0):
if (dim == 0):
return list(ctype_arr[offset : offset + shape[0]])
else:
dim_len = shape[dim]
res = [[]] * dim_len
for n in range(dim_len):
res[n] = _ctype_to_lists(ctype_arr, dim - 1, shape, offset)
offset += shape[0]
return res
def _slice_to_length(key, dim):
tkey = [key.start, key.stop, key.step]
if tkey[0] is None:
tkey[0] = 0
elif tkey[0] < 0:
tkey[0] = dim - tkey[0]
if tkey[1] is None:
tkey[1] = dim
elif tkey[1] < 0:
tkey[1] = dim - tkey[1]
if tkey[2] is None:
tkey[2] = 1
return int(((tkey[1] - tkey[0] - 1) / tkey[2]) + 1)
def _get_info(dims, buf_len):
elements = 1
numdims = len(dims)
idims = [1]*4
for i in range(numdims):
elements *= dims[i]
idims[i] = dims[i]
if (elements == 0):
if (buf_len != 0):
idims = [buf_len, 1, 1, 1]
numdims = 1
else:
raise RuntimeError("Invalid size")
return numdims, idims
def _get_indices(key):
S = Index(slice(None))
inds = _Index4(S, S, S, S)
if isinstance(key, tuple):
n_idx = len(key)
for n in range(n_idx):
inds[n] = Index(key[n])
else:
inds[0] = Index(key)
return inds
def _get_assign_dims(key, idims):
dims = [1]*4
for n in range(len(idims)):
dims[n] = idims[n]
if _is_number(key):
dims[0] = 1
return dims
elif isinstance(key, slice):
dims[0] = _slice_to_length(key, idims[0])
return dims
elif isinstance(key, ParallelRange):
dims[0] = _slice_to_length(key.S, idims[0])
return dims
elif isinstance(key, BaseArray):
# If the array is boolean take only the number of nonzeros
if(key.dtype() is Dtype.b8):
dims[0] = int(sum(key))
else:
dims[0] = key.elements()
return dims
elif isinstance(key, tuple):
n_inds = len(key)
for n in range(n_inds):
if (_is_number(key[n])):
dims[n] = 1
elif (isinstance(key[n], BaseArray)):
# If the array is boolean take only the number of nonzeros
if(key[n].dtype() is Dtype.b8):
dims[n] = int(sum(key[n]))
else:
dims[n] = key[n].elements()
elif (isinstance(key[n], slice)):
dims[n] = _slice_to_length(key[n], idims[n])
elif (isinstance(key[n], ParallelRange)):
dims[n] = _slice_to_length(key[n].S, idims[n])
else:
raise IndexError("Invalid type while assigning to arrayfire.array")
return dims
else:
raise IndexError("Invalid type while assigning to arrayfire.array")
def transpose(a, conj=False):
"""
Perform the transpose on an input.
Parameters
-----------
a : af.Array
Multi dimensional arrayfire array.
conj : optional: bool. default: False.
Flag to specify if a complex conjugate needs to applied for complex inputs.
Returns
--------
out : af.Array
Containing the tranpose of `a` for all batches.
"""
out = Array()
safe_call(backend.get().af_transpose(ct.pointer(out.arr), a.arr, conj))
return out
def transpose_inplace(a, conj=False):
"""
Perform inplace transpose on an input.
Parameters
-----------
a : af.Array
- Multi dimensional arrayfire array.
- Contains transposed values on exit.
conj : optional: bool. default: False.
Flag to specify if a complex conjugate needs to applied for complex inputs.
Note
-------
Input `a` needs to be a square matrix or a batch of square matrices.
"""
safe_call(backend.get().af_transpose_inplace(a.arr, conj))
class Array(BaseArray):
"""
A multi dimensional array container.
Parameters
----------
src : optional: array.array, list or C buffer. default: None.
- When `src` is `array.array` or `list`, the data is copied to create the Array()
- When `src` is None, an empty buffer is created.
dims : optional: tuple of ints. default: (0,)
- When using the default values of `dims`, the dims are caclulated as `len(src)`
dtype: optional: str or arrayfire.Dtype. default: None.
- if str, must be one of the following:
- 'f' for float
- 'd' for double
- 'b' for bool
- 'B' for unsigned char
- 'i' for signed 32 bit integer
- 'I' for unsigned 32 bit integer
- 'l' for signed 64 bit integer
- 'L' for unsigned 64 bit integer
- 'F' for 32 bit complex number
- 'D' for 64 bit complex number
- if arrayfire.Dtype, must be one of the following:
- Dtype.f32 for float
- Dtype.f64 for double
- Dtype.b8 for bool
- Dtype.u8 for unsigned char
- Dtype.s32 for signed 32 bit integer
- Dtype.u32 for unsigned 32 bit integer
- Dtype.s64 for signed 64 bit integer
- Dtype.u64 for unsigned 64 bit integer
- Dtype.c32 for 32 bit complex number
- Dtype.c64 for 64 bit complex number
- if None, Dtype.f32 is assumed
Attributes
-----------
arr: ctypes.c_void_p
ctypes variable containing af_array from arrayfire library.
Examples
--------
Creating an af.Array() from array.array()
>>> import arrayfire as af
>>> import array
>>> a = array.array('f', (1, 2, 3, 4))
>>> b = af.Array(a, (2,2))
>>> af.display(b)
[2 2 1 1]
1.0000 3.0000
2.0000 4.0000
Creating an af.Array() from a list
>>> import arrayfire as af
>>> import array
>>> a = [1, 2, 3, 4]
>>> b = af.Array(a)
>>> af.display(b)
[4 1 1 1]
1.0000
2.0000
3.0000
4.0000
Creating an af.Array() from numpy.array()
>>> import numpy as np
>>> import arrayfire as af
>>> a = np.random.random((2,2))
>>> a
array([[ 0.33042524, 0.36135449],
[ 0.86748649, 0.42199135]])
>>> b = af.Array(a.ctypes.data, a.shape, a.dtype.char)
>>> af.display(b)
[2 2 1 1]
0.3304 0.8675
0.3614 0.4220
Note
-----
- The class is currently limited to 4 dimensions.
- arrayfire.Array() uses column major format.
- numpy uses row major format by default which can cause issues during conversion
"""
def __init__(self, src=None, dims=(0,), dtype=None):
super(Array, self).__init__()
buf=None
buf_len=0
if dtype is not None:
if isinstance(dtype, str):
type_char = dtype
else:
type_char = to_typecode[dtype.value]
else:
type_char = None
_type_char='f'
backend.lock()
if src is not None:
if (isinstance(src, Array)):
safe_call(backend.get().af_retain_array(ct.pointer(self.arr), src.arr))
return
host = __import__("array")
if isinstance(src, host.array):
buf,buf_len = src.buffer_info()
_type_char = src.typecode
numdims, idims = _get_info(dims, buf_len)
elif isinstance(src, list):
tmp = host.array('f', src)
buf,buf_len = tmp.buffer_info()
_type_char = tmp.typecode
numdims, idims = _get_info(dims, buf_len)
elif isinstance(src, int) or isinstance(src, ct.c_void_p):
buf = src
numdims, idims = _get_info(dims, buf_len)
elements = 1
for dim in idims:
elements *= dim
if (elements == 0):
raise RuntimeError("Expected dims when src is data pointer")
if (type_char is None):
raise TypeError("Expected type_char when src is data pointer")
_type_char = type_char
else:
raise TypeError("src is an object of unsupported class")
if (type_char is not None and
type_char != _type_char):
raise TypeError("Can not create array of requested type from input data type")
self.arr = _create_array(buf, numdims, idims, to_dtype[_type_char])
else:
if type_char is None:
type_char = 'f'
numdims = len(dims)
idims = [1] * 4
for n in range(numdims):
idims[n] = dims[n]
self.arr = _create_empty_array(numdims, idims, to_dtype[type_char])
def copy(self):
"""
Performs a deep copy of the array.
Returns
-------
out: af.Array()
An identical copy of self.
"""
out = Array()
safe_call(backend.get().af_copy_array(ct.pointer(out.arr), self.arr))
return out
def __del__(self):
"""
Release the C array when going out of scope
"""
if self.arr.value:
backend.get().af_release_array(self.arr)
def device_ptr(self):
"""
Return the device pointer held by the array.
Returns
------
ptr : int
Contains location of the device pointer
Note
----
- This can be used to integrate with custom C code and / or PyCUDA or PyOpenCL.
- No mem copy is peformed, this function returns the raw device pointer.
"""
ptr = ct.c_void_p(0)
backend.get().af_get_device_ptr(ct.pointer(ptr), self.arr)
return ptr.value
def elements(self):
"""
Return the number of elements in the array.
"""
num = ct.c_ulonglong(0)
safe_call(backend.get().af_get_elements(ct.pointer(num), self.arr))
return num.value
def dtype(self):
"""
Return the data type as a arrayfire.Dtype enum value.
"""
dty = ct.c_int(Dtype.f32.value)
safe_call(backend.get().af_get_type(ct.pointer(dty), self.arr))
return to_dtype[typecodes[dty.value]]
def type(self):
"""
Return the data type as an int.
"""
return self.dtype().value
def dims(self):
"""
Return the shape of the array as a tuple.
"""
d0 = ct.c_longlong(0)
d1 = ct.c_longlong(0)
d2 = ct.c_longlong(0)
d3 = ct.c_longlong(0)
safe_call(backend.get().af_get_dims(ct.pointer(d0), ct.pointer(d1),
ct.pointer(d2), ct.pointer(d3), self.arr))
dims = (d0.value,d1.value,d2.value,d3.value)
return dims[:self.numdims()]
def numdims(self):
"""
Return the number of dimensions of the array.
"""
nd = ct.c_uint(0)
safe_call(backend.get().af_get_numdims(ct.pointer(nd), self.arr))
return nd.value
def is_empty(self):
"""
Check if the array is empty i.e. it has no elements.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_empty(ct.pointer(res), self.arr))
return res.value
def is_scalar(self):
"""
Check if the array is scalar i.e. it has only one element.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_scalar(ct.pointer(res), self.arr))
return res.value
def is_row(self):
"""
Check if the array is a row i.e. it has a shape of (1, cols).
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_row(ct.pointer(res), self.arr))
return res.value
def is_column(self):
"""
Check if the array is a column i.e. it has a shape of (rows, 1).
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_column(ct.pointer(res), self.arr))
return res.value
def is_vector(self):
"""
Check if the array is a vector i.e. it has a shape of one of the following:
- (rows, 1)
- (1, cols)
- (1, 1, vols)
- (1, 1, 1, batch)
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_vector(ct.pointer(res), self.arr))
return res.value
def is_complex(self):
"""
Check if the array is of complex type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_complex(ct.pointer(res), self.arr))
return res.value
def is_real(self):
"""
Check if the array is not of complex type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_real(ct.pointer(res), self.arr))
return res.value
def is_double(self):
"""
Check if the array is of double precision floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_double(ct.pointer(res), self.arr))
return res.value
def is_single(self):
"""
Check if the array is of single precision floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_single(ct.pointer(res), self.arr))
return res.value
def is_real_floating(self):
"""
Check if the array is real and of floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_realfloating(ct.pointer(res), self.arr))
return res.value
def is_floating(self):
"""
Check if the array is of floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_floating(ct.pointer(res), self.arr))
return res.value
def is_integer(self):
"""
Check if the array is of integer type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_integer(ct.pointer(res), self.arr))
return res.value
def is_bool(self):
"""
Check if the array is of type b8.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_bool(ct.pointer(res), self.arr))
return res.value
def __add__(self, other):
"""
Return self + other.
"""
return _binary_func(self, other, backend.get().af_add)
def __iadd__(self, other):
"""
Perform self += other.
"""
self = _binary_func(self, other, backend.get().af_add)
return self
def __radd__(self, other):
"""
Return other + self.
"""
return _binary_funcr(other, self, backend.get().af_add)
def __sub__(self, other):
"""
Return self - other.
"""
return _binary_func(self, other, backend.get().af_sub)
def __isub__(self, other):
"""
Perform self -= other.
"""
self = _binary_func(self, other, backend.get().af_sub)
return self
def __rsub__(self, other):
"""
Return other - self.
"""
return _binary_funcr(other, self, backend.get().af_sub)
def __mul__(self, other):
"""
Return self * other.
"""
return _binary_func(self, other, backend.get().af_mul)
def __imul__(self, other):
"""
Perform self *= other.
"""
self = _binary_func(self, other, backend.get().af_mul)
return self
def __rmul__(self, other):
"""
Return other * self.
"""
return _binary_funcr(other, self, backend.get().af_mul)
def __truediv__(self, other):
"""
Return self / other.
"""
return _binary_func(self, other, backend.get().af_div)
def __itruediv__(self, other):
"""
Perform self /= other.
"""
self = _binary_func(self, other, backend.get().af_div)
return self
def __rtruediv__(self, other):
"""
Return other / self.
"""
return _binary_funcr(other, self, backend.get().af_div)
def __div__(self, other):
"""
Return self / other.
"""
return _binary_func(self, other, backend.get().af_div)
def __idiv__(self, other):
"""
Perform other / self.
"""
self = _binary_func(self, other, backend.get().af_div)
return self
def __rdiv__(self, other):
"""
Return other / self.
"""
return _binary_funcr(other, self, backend.get().af_div)
def __mod__(self, other):
"""
Return self % other.
"""
return _binary_func(self, other, backend.get().af_mod)
def __imod__(self, other):
"""
Perform self %= other.
"""
self = _binary_func(self, other, backend.get().af_mod)
return self
def __rmod__(self, other):
"""
Return other % self.
"""
return _binary_funcr(other, self, backend.get().af_mod)
def __pow__(self, other):
"""
Return self ** other.
"""
return _binary_func(self, other, backend.get().af_pow)
def __ipow__(self, other):
"""
Perform self **= other.
"""
self = _binary_func(self, other, backend.get().af_pow)
return self
def __rpow__(self, other):
"""
Return other ** self.
"""
return _binary_funcr(other, self, backend.get().af_pow)
def __lt__(self, other):
"""
Return self < other.
"""
return _binary_func(self, other, backend.get().af_lt)
def __gt__(self, other):
"""
Return self > other.
"""
return _binary_func(self, other, backend.get().af_gt)
def __le__(self, other):
"""
Return self <= other.
"""
return _binary_func(self, other, backend.get().af_le)
def __ge__(self, other):
"""
Return self >= other.
"""
return _binary_func(self, other, backend.get().af_ge)
def __eq__(self, other):
"""
Return self == other.
"""
return _binary_func(self, other, backend.get().af_eq)
def __ne__(self, other):
"""
Return self != other.
"""
return _binary_func(self, other, backend.get().af_neq)
def __and__(self, other):
"""
Return self & other.
"""
return _binary_func(self, other, backend.get().af_bitand)
def __iand__(self, other):
"""
Perform self &= other.
"""
self = _binary_func(self, other, backend.get().af_bitand)
return self
def __or__(self, other):
"""
Return self | other.
"""
return _binary_func(self, other, backend.get().af_bitor)
def __ior__(self, other):
"""
Perform self |= other.
"""
self = _binary_func(self, other, backend.get().af_bitor)
return self
def __xor__(self, other):
"""
Return self ^ other.
"""
return _binary_func(self, other, backend.get().af_bitxor)
def __ixor__(self, other):
"""
Perform self ^= other.
"""
self = _binary_func(self, other, backend.get().af_bitxor)
return self
def __lshift__(self, other):
"""
Return self << other.
"""
return _binary_func(self, other, backend.get().af_bitshiftl)
def __ilshift__(self, other):
"""
Perform self <<= other.
"""
self = _binary_func(self, other, backend.get().af_bitshiftl)
return self
def __rshift__(self, other):
"""
Return self >> other.
"""
return _binary_func(self, other, backend.get().af_bitshiftr)
def __irshift__(self, other):
"""
Perform self >>= other.
"""
self = _binary_func(self, other, backend.get().af_bitshiftr)
return self
def __neg__(self):
"""
Return -self
"""
return 0 - self
def __pos__(self):
"""
Return +self
"""
return self
def __invert__(self):
"""
Return ~self
"""
return self == 0
def __nonzero__(self):
return self != 0
# TODO:
# def __abs__(self):
# return self
def __getitem__(self, key):
"""
Return self[key]
Note
----
Ellipsis not supported as key
"""
try:
out = Array()
n_dims = self.numdims()
inds = _get_indices(key)
safe_call(backend.get().af_index_gen(ct.pointer(out.arr),
self.arr, ct.c_longlong(n_dims), inds.pointer))
return out
except RuntimeError as e:
raise IndexError(str(e))
def __setitem__(self, key, val):
"""
Perform self[key] = val
Note
----
Ellipsis not supported as key
"""
try:
n_dims = self.numdims()
if (_is_number(val)):
tdims = _get_assign_dims(key, self.dims())
other_arr = constant_array(val, tdims[0], tdims[1], tdims[2], tdims[3], self.type())
del_other = True
else:
other_arr = val.arr
del_other = False
out_arr = ct.c_void_p(0)
inds = _get_indices(key)
safe_call(backend.get().af_assign_gen(ct.pointer(out_arr),
self.arr, ct.c_longlong(n_dims), inds.pointer,
other_arr))
safe_call(backend.get().af_release_array(self.arr))
if del_other:
safe_call(backend.get().af_release_array(other_arr))
self.arr = out_arr
except RuntimeError as e:
raise IndexError(str(e))
def to_ctype(self, row_major=False, return_shape=False):
"""
Return the data as a ctype C array after copying to host memory
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: The ctypes array of the appropriate type and length.
else :
(res, dims): tuple of the ctypes array and the shape of the array
"""
if (self.arr.value == 0):
raise RuntimeError("Can not call to_ctype on empty array")
tmp = transpose(self) if row_major else self
ctype_type = to_c_type[self.type()] * self.elements()
res = ctype_type()
safe_call(backend.get().af_get_data_ptr(ct.pointer(res), self.arr))
if (return_shape):
return res, self.dims()
else:
return res
def to_array(self, row_major=False, return_shape=False):
"""
Return the data as array.array
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: array.array of the appropriate type and length.
else :
(res, dims): array.array and the shape of the array
"""
if (self.arr.value == 0):
raise RuntimeError("Can not call to_array on empty array")
res = self.to_ctype(row_major, return_shape)
host = __import__("array")
h_type = to_typecode[self.type()]
if (return_shape):
return host.array(h_type, res[0]), res[1]
else:
return host.array(h_type, res)
def to_list(self, row_major=False):
"""
Return the data as list
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: list of the appropriate type and length.
else :
(res, dims): list and the shape of the array
"""
ct_array, shape = self.to_ctype(row_major, True)
return _ctype_to_lists(ct_array, len(shape) - 1, shape)
def __repr__(self):
"""
Displays the meta data of the arrayfire array.
Note
----
Use arrayfire.display(a) to display the contents of the array.
"""
# Having __repr__ directly print things is a bad idea
# Placeholder for when af_array_to_string is available
# safe_call(backend.get().af_array_to_string...
return 'Type: arrayfire.Array()\nShape: %s\nType char: %s' % \
(self.dims(), to_typecode[self.type()])
def __array__(self):
"""
Constructs a numpy.array from arrayfire.Array
"""
import numpy as np
res = np.empty(self.dims(), dtype=np.dtype(to_typecode[self.type()]), order='F')
safe_call(backend.get().af_get_data_ptr(ct.c_void_p(res.ctypes.data), self.arr))
return res
def display(a):
"""
Displays the contents of an array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array
"""
expr = inspect.stack()[1][-2]
try:
if (expr is not None):
st = expr[0].find('(') + 1
en = expr[0].rfind(')')
print('%s' % expr[0][st:en])
safe_call(backend.get().af_print_array(a.arr))
except:
safe_call(backend.get().af_print_array(a.arr))
from .algorithm import sum
| bsd-3-clause | -5,315,669,486,786,941,000 | 27.30791 | 100 | 0.520939 | false | 3.725279 | false | false | false |
ethansshaw/stellavitrum | ScienceFairProcess.py | 1 | 10349 | #!/usr/bin/env python
"""
Written by Ethan Shaw
"""
from astropy.io import fits
import sys, png, math, os
colors = ['red', 'green', 'blue']
# Build x_axis_len rows, each containing y_axis_len columns
# access with PNG_data[row][column]
def buildMatrix(x_axis_len, y_axis_len, greyscale=True):
# set up empty list (matrix) to hold pixels
PNG_data = []
for row in range(0, x_axis_len):
PNG_data.append([])
#start out with an empty list, then put another list in it so it looks like [[]]
#gives the value of x_axis_len empty lists inside the list PNG_data
for column in range (0, y_axis_len):
if ( greyscale ):
PNG_data[row].append(0)
#this is the grayscale value
else:
#Red,Green,Blue values
PNG_data[row].append(0)
PNG_data[row].append(0)
PNG_data[row].append(0)
return PNG_data
#Function defines ONLY color
def setPixel(PNG_data, red, green, blue, row, column):
PNG_data[row][column*3] = red
PNG_data[row][column*3 + 1] = green
PNG_data[row][column*3 + 2] = blue
def getPixelRange(PNG_data, x_axis_len, y_axis_len):
# determine the PNG_data range for scaling purposes
pixel_max = 0
pixel_min = pow(2,16)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
pixel_max = max(pixel_max, PNG_data[row][column])
pixel_min = min(pixel_min, PNG_data[row][column])
print "Pixel max: {0:.20f}, Pixel min: {0:.20f}".format(pixel_max, pixel_min)
return (pixel_max, pixel_min)
def getRawDataFromFile(file, color):
#this reads the file and structures into useable format
hdulist = fits.open(file)
entry = hdulist[0]
bits_per_pixel = entry.header['BITPIX']
number_axis = entry.header['NAXIS']
x_axis_len = entry.header['NAXIS2']
y_axis_len = entry.header['NAXIS1']
print "Data dimensions: (%d x %d) - %d axes, %d bpp" % (x_axis_len, y_axis_len, number_axis, bits_per_pixel)
# data is a bunch of columns, each containing one row
data = entry.data
pixelData = buildMatrix(x_axis_len, y_axis_len, greyscale=False)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
try:
image_value = data[row][column]
red, green, blue = ( 0,0,0 )
if ( color == 'red' ):
red = image_value
elif ( color == 'green' ):
green = image_value
elif ( color == 'blue' ):
blue = image_value
setPixel(pixelData, red, green, blue, row, column)
except Exception as e:
print "Error accessing (%d, %d) : %s" % (row, column, e)
raise SystemExit
return pixelData
def combineTwoDataSets(dataSet1, dataSet2):
print "Combining two data sets"
# step 1, make a new data set the size of the two
x_axis_len = len(dataSet1)
y_axis_len = len(dataSet1[0])
combinedData = buildMatrix(x_axis_len, y_axis_len)
# step 2, step over each pixel in the sets and ADD to the combined pixel value
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
combinedData[row][column] = dataSet1[row][column] + dataSet2[row][column]
# step 3, return the combined data set
return combinedData
def writePNGFile(PNGData, output_directory, dataset_name):
filename = '%s/out_data_%s.png' % ( output_directory, dataset_name)
f = open(filename, 'wb') # binary mode is important
w = png.Writer(len(PNGData[0])/3, len(PNGData), greyscale=False,alpha=False, bitdepth=16)
w.write(f, PNGData)
print "Image written to file %s" % filename
def linearScale(value, min_value, max_value):
pixel_range = abs(max_value - min_value)
#2 to the 16th means a 16 bit image (using 16 bits of data to describe each pixel)
ratio = (pow(2, 16)*1.0 - 1) / pixel_range
#This gives us a linearly scaled value between 0 (black) and 2^16 (white)
val = int(round(value * ratio))
return val
def logarithmicScalePixel(value, min_value, max_value):
try:
val = abs(math.log(value))
# for min and max we use 0, 100 for now
return linearScalePixel(val, 0, 100)
except Exception as e:
return 0
def linearScalePixel(value, min_value, max_value):
pixel_range = abs(max_value - min_value)
#2 to the 16th means a 16 bit image (using 16 bits of data to describe each pixel)
ratio = (pow(2, 16)*1.0 -1 ) / pixel_range
#This gives us a linearly scaled value between 0 (black) and 2^16 (white)
val = int(round(value * ratio))
if ( val < 0 or val > 65535 ):
print "value %d (orig: %f was outside range %.e, %.e" % ( val, value, min_value, max_value )
raise SystemExit
return val
def scaleDataSet(scalingFunction, dataSet):
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
pixel_max, pixel_min = getPixelRange(dataSet, x_axis_len, y_axis_len)
print "Max: %f, Min: %f" % (pixel_max, pixel_min)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
dataSet[row][column] = scalingFunction(dataSet[row][column], pixel_min, pixel_max)
return dataSet
def linearScaleDataSet(dataSet):
return scaleDataSet(linearScalePixel, dataSet)
def logScaleDataSet(dataSet):
return scaleDataSet(logarithmicScalePixel, dataSet)
def zeroOutliersInDataSet(dataSet, interQuartileScaleFactor=1.5):
(firstQuartile, median, thirdQuartile, interQuartile) = getQuartileValues(dataSet)
minAllowedValue = max(0, firstQuartile - (interQuartileScaleFactor * interQuartile))
maxAllowedValue = thirdQuartile + (interQuartileScaleFactor * interQuartile)
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
dataValue = dataSet[row][column]
if (dataValue < minAllowedValue or dataValue > maxAllowedValue):
dataSet[row][column] = 0
return dataSet
def histogramData(dataSet, output_directory, dataset_folder="data"):
pixel_max, pixel_min = getPixelRange(dataSet, len(dataSet), len(dataSet[0]))
histogram = {}
number_of_groups = 10
group_size = (pixel_max - pixel_min) / (number_of_groups *1.0)
for i in range(0, number_of_groups):
histogram[int(i*group_size)] = 0
histogramKeys = histogram.keys()
histogramKeys.sort()
histogramKeys.reverse()
for x in range(0, len(dataSet)):
for y in range(0, len(dataSet[0])):
pixel = dataSet[x][y]
for key in histogramKeys:
if pixel < key:
histogram[key] = int(histogram[key] + 1)
continue
histogramKeys.reverse()
output_path = "%s/%s_histogram.csv" % (output_directory, dataset_folder)
outf = open(output_path, "w")
for key in histogramKeys:
kname = "Bucket %d" % key
outf.write("%s,%d\n" % (kname, histogram[key]))
outf.close()
print "Histogram written to file %s" % output_path
def getMean(dataSet):
sum = 0.0
count = 0
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if dataSet[row][column] > 0:
sum = sum + dataSet[row][column]
count = count + 1
return sum/count
def getMedian(dataSet):
dataList = []
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if (dataSet[row][column] > 0):
dataList.append(dataSet[row][column])
dataList.sort()
middleNumber = len(dataList)/2
return dataList[middleNumber]
def getQuartileValues(dataSet):
median = getMedian(dataSet)
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
valuesLessThanMedian = []
valuesGreaterThanMedian = []
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if dataSet[row][column] > median:
valuesGreaterThanMedian.append(dataSet[row][column])
else:
valuesLessThanMedian.append(dataSet[row][column])
valuesGreaterThanMedian.sort()
valuesLessThanMedian.sort()
firstQuartile = valuesLessThanMedian[len(valuesLessThanMedian)/2]
thirdQuartile = valuesGreaterThanMedian[len(valuesGreaterThanMedian)/2]
interQuartile = thirdQuartile - firstQuartile
print "Quartiles: ", firstQuartile, median, thirdQuartile, interQuartile
return (firstQuartile, median, thirdQuartile, interQuartile)
def getMode(dataSet):
dataPoints = {}
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
point = dataSet[row][column]
if (point > 0):
if dataPoints.has_key(point):
dataPoints[point] = dataPoints[point] + 1
else:
dataPoints[point] = 1
maxCount = 0
maxValue = None
for (value, count) in dataPoints.items():
if count > maxCount:
maxCount = count
maxValue = value
print "%f was the max value and occurred %d times" % (maxValue, maxCount)
return maxValue
def outputToCSVFile(filename, dataSet):
outf = open(filename, 'w')
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
line = ""
for column in range (0, y_axis_len):
line = "%s%.7e," % (line, dataSet[row][column])
line = line + "\n"
outf.write(line)
outf.close()
print "Wrote to %s" % filename
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s <file1> <file2> ..." % sys.argv[0]
raise SystemExit
files = sys.argv[1:]
i = 0
PNGDataSets = []
#rData = getRawDataFromFile(files[0], "red")
#writePNGFile(rData, "red")
#raise SystemExit
full_path1 = os.path.abspath(files[0])
folder_path = os.path.split(full_path1)[0]
dataset_folder = os.path.basename(folder_path)
for file in files:
dataSet = getRawDataFromFile(file, colors[i])
i = i + 1
dataSetNormalized = zeroOutliersInDataSet(dataSet)
PNGDataSets.append(dataSetNormalized)
combinedSet = None
for dataSet in PNGDataSets:
if (combinedSet == None):
combinedSet = dataSet
else:
combinedSet = combineTwoDataSets(combinedSet, dataSet)
parent_directory = os.path.split(os.path.abspath(sys.argv[0]))[0]
output_directory = os.path.join(parent_directory, "Results")
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print "Created directory %s" % output_directory
else:
print "Output directory %s exists" % output_directory
# now linear scale the outlier set
scaledSet = linearScaleDataSet(combinedSet)
histogramData(scaledSet, output_directory, dataset_folder)
#raise SystemExit
filename = "%s/dataset_%s.csv" % (output_directory, dataset_folder)
outputToCSVFile(filename, scaledSet)
writePNGFile(scaledSet, output_directory, dataset_folder) #old was writePNGFile(combinedSet, "combined")
print "Process complete" | mit | 1,193,926,610,286,185,000 | 30.081081 | 109 | 0.689439 | false | 2.839232 | false | false | false |
sbelskie/symplicity | Symplicity/local_settings.py | 1 | 2695 | """
Django settings for Symplicity project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i+acxn5(akgsn!sr4^qgf(^m&*@+g1@u^t@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'symptom_tracker',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Symplicity.urls'
WSGI_APPLICATION = 'Symplicity.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'symplicity',
'USER':'postgres',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
| apache-2.0 | 5,726,592,050,074,065,000 | 24.683168 | 77 | 0.687199 | false | 3.294621 | false | false | false |
oldm/OldMan | oldman/schema/hydra.py | 1 | 1520 | from uuid import uuid4
from rdflib import URIRef, RDF, RDFS
from oldman.vocabulary import OLDM_CORRESPONDING_CLASS
class HydraSchemaAdapter(object):
"""Updates some Hydra patterns in the schema graph:
- hydra:Link: create a hydra:Class, subclass of the link range that support the same operations
"""
def update_schema_graph(self, graph):
graph = graph.skolemize()
graph = self._update_links(graph)
return graph
@staticmethod
def _update_links(graph):
links = list(graph.subjects(RDF.type, URIRef(u"http://www.w3.org/ns/hydra/core#Link")))
for link_property in links:
new_class_iri = URIRef(u"http://localhost/.well-known/genid/link_class/%s" % uuid4())
graph.add((new_class_iri, RDF.type, URIRef(u"http://www.w3.org/ns/hydra/core#Class")))
graph.add((link_property, URIRef(OLDM_CORRESPONDING_CLASS), new_class_iri))
# Ranges --> upper classes
ranges = list(graph.objects(link_property, RDFS.range))
for range in ranges:
graph.add((new_class_iri, RDFS.subClassOf, range))
# supported Operations
supported_operation_property = URIRef(u"http://www.w3.org/ns/hydra/core#supportedOperation")
operations = list(graph.objects(link_property, supported_operation_property))
for operation in operations:
graph.add((new_class_iri, supported_operation_property, operation))
return graph
| bsd-3-clause | -5,559,144,035,788,686,000 | 35.190476 | 105 | 0.646053 | false | 3.653846 | false | false | false |
blackshirt/dompetku | dompetku/handler/services.py | 1 | 3750 | #!/usr/bin/env python
#
# Copyright @2014 [email protected]
# Licensed: see Python license
"""Module to handle json services."""
import datetime
import json
import peewee
import tornado.web
import tornado.escape
from dompetku.handler import base
from dompetku.utils import jsonify
from dompetku.model import Transaksi, User
from dompetku.form import TransaksiForm
class TransaksiContainer(object):
def __init__(self, user):
self.user = user
def find_one(self, tid):
cur_user = User.select().where(User.name == self.user)
if cur_user.exists():
user = cur_user.get()
trn = Transaksi.select().where(Transaksi.user == user.uid, Transaksi.tid == tid)
if trn.exists():
data = trn.get() # Transaksi instance
return data
return None
def find_data(self, *expr):
cur_user = User.select().where(User.name == self.user)
if cur_user.exists():
user = cur_user.get()
trn = Transaksi.select().where(Transaksi.user == user.uid, *expr)
return trn # Transaksi QueryResultWrapper
return None
class DataSources(TransaksiContainer):
def __init__(self, user):
self.user = user
super().__init__(self.user)
def get_one(self, tid):
data = self.find_one(tid)
if data is not None:
results = {
'tid': data.tid,
'user': data.user.name,
'info': data.info,
'amount': data.amount,
'transdate': data.transdate,
'memo': data.memo
}
return results # dict of transaksi item
def get_data(self, *expr):
temporary = {}
results = []
data = self.find_data(*expr)
for item in data:
temporary = {
'tid': item.tid,
'user': item.user.name,
'info': item.info,
'transdate': item.transdate,
'amount': item.amount,
'memo': item.memo
}
results.append(temporary)
return results # list of dict of transaksi item
class ApiTransactions(base.BaseHandler):
def initialize(self):
self.dsc = DataSources(self.current_user)
@tornado.web.authenticated
def get(self, *kondisi):
if kondisi:
data = self.dsc.get_data(*kondisi)
else:
# get data bulan sekarang
today = datetime.date.today()
cur_month = today.month
expr = (Transaksi.transdate.month == cur_month,)
data = self.dsc.get_data(expr)
self.write(jsonify(data))
def post(self):
data = tornado.escape.json_decode(self.request.body)
info = data.get('info')
amount = data.get('amount')
memo = data.get('memo')
try:
active_user = User.get(User.name == self.current_user)
except peewee.DoesNotExist:
active_user = None
return
item = Transaksi.insert(info = info,
amount=amount,
tipe=10,
user=active_user.uid,
memo=memo )
last_id = item.execute()
transaksi = Transaksi.get(Transaksi.tid == last_id)
response = {'info': transaksi.info,
'user': transaksi.user.name,
'amount': transaksi.amount,
'memo': transaksi.memo,
'transdate': transaksi.transdate}
self.write(jsonify(response))
| bsd-2-clause | -4,487,325,386,750,957,000 | 29.241935 | 92 | 0.524267 | false | 4.002134 | false | false | false |
spel-uchile/SUCHAI-Flight-Software | sandbox/log_parser.py | 1 | 1956 | import re
import argparse
import pandas as pd
# General expressions
re_error = re.compile(r'\[ERROR\]\[(\d+)\]\[(\w+)\](.+)')
re_warning = re.compile(r'\[WARN \]\[(\d+)\]\[(\w+)\](.+)')
re_info = re.compile(r'\[INFO \]\[(\d+)\]\[(\w+)\](.+)')
re_debug = re.compile(r'\[DEBUG\]\[(\d+)\]\[(\w+)\](.+)')
re_verbose = re.compile(r'\[VERB \]\[(\d+)\]\[(\w+)\](.+)')
# Specific expressions
re_cmd_run = re.compile(r'\[INFO \]\[(\d+)]\[Executer\] Running the command: (.+)')
re_cmd_result = re.compile(r'\[INFO \]\[(\d+)]\[Executer\] Command result: (\d+)')
def get_parameters():
"""
Parse script arguments
"""
parser = argparse.ArgumentParser()
# General expressions
parser.add_argument('file', type=str, help="Log file")
parser.add_argument('--error', action="store_const", const=re_error)
parser.add_argument('--warning', action="store_const", const=re_warning)
parser.add_argument('--info', action="store_const", const=re_info)
parser.add_argument('--debug', action="store_const", const=re_debug)
parser.add_argument('--verbose', action="store_const", const=re_verbose)
# Specific expressions
parser.add_argument('--cmd-run', action="store_const", const=re_cmd_run)
parser.add_argument('--cmd-result', action="store_const", const=re_cmd_result)
return parser.parse_args()
def parse_text(text, regexp):
return regexp.findall(text)
def save_parsed(logs, file, format=None):
df = pd.DataFrame(logs)
# print(df)
df.to_csv(file)
if __name__ == "__main__":
args = get_parameters()
print("Reading file {}...".format(args.file))
with open(args.file) as logfile:
text = logfile.read()
args = vars(args)
print(args)
for type, regexp in args.items():
if type is not "file" and regexp is not None:
print("Parsing {}...", type)
logs = parse_text(text, regexp)
save_parsed(logs, args["file"]+type+".csv")
| gpl-3.0 | 7,759,135,881,385,060,000 | 30.548387 | 83 | 0.599182 | false | 3.233058 | false | false | false |
pombreda/ruffus | ruffus/test/test_verbosity.py | 1 | 8627 | #!/usr/bin/env python
from __future__ import print_function
"""
test_verbosity.py
"""
temp_dir = "test_verbosity/"
import unittest
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
ruffus_name = os.path.basename(parent_dir)
ruffus = list(map(__import__, [ruffus_name]))[0]
import shutil
try:
from StringIO import StringIO
except:
from io import StringIO
import re
ruffus = __import__ (ruffus_name)
for attr in "pipeline_run", "pipeline_printout", "suffix", "transform", "split", "merge", "dbdict", "follows", "mkdir", "originate", "Pipeline":
globals()[attr] = getattr (ruffus, attr)
RethrownJobError = ruffus.ruffus_exceptions.RethrownJobError
RUFFUS_HISTORY_FILE = ruffus.ruffus_utility.RUFFUS_HISTORY_FILE
CHECKSUM_FILE_TIMESTAMPS = ruffus.ruffus_utility.CHECKSUM_FILE_TIMESTAMPS
#---------------------------------------------------------------
# create initial files
#
@mkdir(temp_dir + 'data/scratch/lg/what/one/two/three/')
@originate([ [temp_dir + 'data/scratch/lg/what/one/two/three/job1.a.start', temp_dir + 'job1.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job2.a.start', temp_dir + 'job2.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job3.a.start', temp_dir + 'job3.b.start'] ])
def create_initial_file_pairs(output_files):
# create both files as necessary
for output_file in output_files:
with open(output_file, "w") as oo: pass
#---------------------------------------------------------------
# first task
@transform(create_initial_file_pairs, suffix(".start"), ".output.1")
def first_task(input_files, output_file):
with open(output_file, "w"): pass
#---------------------------------------------------------------
# second task
@transform(first_task, suffix(".output.1"), ".output.2")
def second_task(input_files, output_file):
with open(output_file, "w"): pass
test_pipeline = Pipeline("test")
test_pipeline.originate(output = [ [temp_dir + 'data/scratch/lg/what/one/two/three/job1.a.start', temp_dir + 'job1.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job2.a.start', temp_dir + 'job2.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job3.a.start', temp_dir + 'job3.b.start'] ],
task_func = create_initial_file_pairs)
test_pipeline.transform(task_func = first_task, input = create_initial_file_pairs, filter = suffix(".start"), output = ".output.1")
test_pipeline.transform(input = first_task, filter = suffix(".output.1"), output = ".output.2", task_func= second_task)
decorator_syntax = 0
oop_syntax = 1
class Test_verbosity(unittest.TestCase):
#___________________________________________________________________________
#
# test_printout_abbreviated_path1
#___________________________________________________________________________
def test_printout_abbreviated_path1(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 1)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 1, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue(re.search('Job needs update:.*Missing files.*'
'\[\.\.\./job2\.a\.start, test_verbosity/job2\.b\.start, \.\.\./job2.a.output.1\]', ret, re.DOTALL) is not None)
#___________________________________________________________________________
#
# test_printout_abbreviated_path2
#___________________________________________________________________________
def test_printout_abbreviated_path2(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 2, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 2, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[.../three/job1.a.start, test_verbosity/job1.b.start, .../three/job1.a.output.1]' in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path3
#___________________________________________________________________________
def test_printout_abbreviated_path3(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 3, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 3, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[.../two/three/job1.a.start, test_verbosity/job1.b.start, .../two/three/job1.a.output.1]' in s.getvalue())
#___________________________________________________________________________
#
# test_printout_abbreviated_path9
#___________________________________________________________________________
def test_printout_abbreviated_path9(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 9, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 9, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[%sdata/scratch/lg/what/one/two/three/job2.a.start, test_verbosity/job2.b.start,' % temp_dir in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path0
#___________________________________________________________________________
def test_printout_abbreviated_path0(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 0, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 0, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
path_str = os.path.abspath('%sdata/scratch/lg/what/one/two/three/job2.a.start' % temp_dir)
path_str = '[[%s' % path_str
self.assertTrue(path_str in ret)
self.assertTrue(temp_dir + 'job2.b.start]' in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path_minus_60
#___________________________________________________________________________
def test_printout_abbreviated_path_minus_60(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = -60, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = -60, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[<???> ne/two/three/job2.a.start, test_verbosity/job2.b.start]' in ret)
#
# Necessary to protect the "entry point" of the program under windows.
# see: http://docs.python.org/library/multiprocessing.html#multiprocessing-programming
#
if __name__ == '__main__':
unittest.main()
| mit | 8,074,170,189,742,017,000 | 44.405263 | 144 | 0.534833 | false | 3.688328 | true | false | false |
ewilazarus/snnm | snnm.py | 1 | 2886 | #!/usr/bin/python
"""
snnm
~~~~
This module contains the source code for `snnm`
Snnm is an utility tool created to fetch synonyms for a given expression from
the web and print them to the console.
"""
import bs4
import click
import requests
BASE_URL = 'http://www.thesaurus.com/browse/'
def _fetch_html(expression):
"""
Returns the HTML containing the synonyms for the given expression
"""
response = requests.get(BASE_URL + expression)
response.raise_for_status()
return response.text
def _parse_html(html):
"""
Returns a parsed list of synonyms out of a given HTML
"""
parser = bs4.BeautifulSoup(html, 'html.parser')
synonyms = []
divs = parser.find_all('div', class_='relevancy-list')
for div in divs:
spans = div.find_all('span', class_='text')
synonyms += [str(span.string) for span in spans]
return synonyms
def fetch_synonyms(expression):
"""
Returns a list of synonyms for a given expression
"""
try:
return _parse_html(_fetch_html(expression))
except requests.exceptions.HTTPError:
return []
def clean(synonyms):
"""
Returns the deduped, sorted list of synonyms
"""
deduped_synonyms = list(set([s.strip() for s in synonyms]))
deduped_synonyms.sort()
return deduped_synonyms
def print_synonyms_ugly(synonyms):
"""
Prints the list of synonyms to the screen
"""
for synonym in synonyms:
print(synonym)
def print_synonyms(synonyms):
"""
Prints the list of synonyms to the screen, using colors and breakpoints
"""
if not synonyms:
click.secho('-- NO RESULTS --', fg='red')
click.echo()
else:
height = click.get_terminal_size()[1] - 3
batch = [synonyms[i:i+height] for i in range(0, len(synonyms), height)]
for synonyms in batch:
for synonym in synonyms:
click.secho(synonym, fg='yellow')
click.echo()
if batch.index(synonyms) != len(batch) - 1:
click.echo('Press any key to continue ...', nl=False)
key = click.getchar()
if key == '\x03':
raise KeyboardInterrupt()
click.echo()
@click.command(name='snnm')
@click.argument('expression')
@click.option('-u', '--ugly-output', is_flag=True)
def main(expression, ugly_output):
"""
List synonyms for an expression
"""
try:
if not ugly_output:
click.echo('Synonyms for {}:'.format(click.style(expression,
fg='blue')))
synonyms = clean(fetch_synonyms(expression))
if ugly_output:
print_synonyms_ugly(synonyms)
else:
print_synonyms(synonyms)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| mit | -3,771,467,267,364,749,000 | 24.767857 | 79 | 0.592862 | false | 3.6075 | false | false | false |
vmalloc/gossip | gossip/utils.py | 2 | 2484 | import itertools
from .exceptions import CannotResolveDependencies
from .helpers import DONT_CARE, FIRST
def topological_sort_registrations(registrations, unconstrained_priority=DONT_CARE):
graph = _build_dependency_graph(registrations, unconstrained_priority=unconstrained_priority)
returned_indices = _topological_sort(range(len(registrations)), graph)
assert len(returned_indices) == len(registrations)
return [registrations[idx] for idx in returned_indices]
def _topological_sort(indices, graph):
independent = sorted(set(indices) - set(m for n, m in graph), reverse=True)
returned = []
while independent:
n = independent.pop()
returned.append(n)
for m in indices:
edge = (n, m)
if m == n:
assert edge not in graph
continue
if edge in graph:
graph.remove(edge)
# check if m is now independent
for edge in graph:
if edge[1] == m:
# not indepdendent
break
else:
# no other incoming edges to m
independent.append(m)
if graph:
raise CannotResolveDependencies('Cyclic dependency detected')
return returned
def _build_dependency_graph(registrations, unconstrained_priority):
providers_by_name = {}
for index, registration in enumerate(registrations):
for name in registration.provides:
providers = providers_by_name.get(name)
if providers is None:
providers = providers_by_name[name] = []
providers.append(index)
graph = set()
for needer_index, registration in enumerate(registrations):
for need in registration.needs:
for provider_index in providers_by_name.get(need, []):
graph.add((provider_index, needer_index))
if unconstrained_priority != DONT_CARE:
caring_indices = set([idx for idx, r in enumerate(registrations) if r.needs or r.provides])
non_caring_indices = set(range(len(registrations))) - caring_indices
for caring_index, uncaring_index in itertools.product(caring_indices, non_caring_indices):
if unconstrained_priority == FIRST:
pair = (uncaring_index, caring_index)
else:
pair = (caring_index, uncaring_index)
graph.add(pair)
return graph
| bsd-3-clause | 8,597,037,287,050,342,000 | 36.636364 | 99 | 0.612721 | false | 4.388693 | false | false | false |
philanthropy-u/edx-platform | openedx/core/djangoapps/user_authn/views/tests/test_login.py | 1 | 29762 | # coding:utf-8
"""
Tests for student activation and login
"""
import json
import unicodedata
import unittest
import ddt
import six
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseBadRequest
from django.test.client import Client
from django.test.utils import override_settings
from django.urls import NoReverseMatch, reverse
from mock import patch
from six import text_type
from six.moves import range
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
from openedx.core.djangoapps.password_policy.compliance import (
NonCompliantPasswordException,
NonCompliantPasswordWarning
)
from openedx.core.djangoapps.user_api.config.waffle import PREVENT_AUTH_USER_WRITES, waffle
from openedx.core.djangoapps.user_authn.cookies import jwt_cookies
from openedx.core.djangoapps.user_authn.tests.utils import setup_login_oauth_client
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import RegistrationFactory, UserFactory, UserProfileFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class LoginTest(CacheIsolationTestCase):
"""
Test login_user() view
"""
ENABLED_CACHES = ['default']
LOGIN_FAILED_WARNING = 'Email or password is incorrect'
ACTIVATE_ACCOUNT_WARNING = 'In order to sign in, you need to activate your account'
username = 'test'
user_email = '[email protected]'
password = 'test_password'
def setUp(self):
"""Setup a test user along with its registration and profile"""
super(LoginTest, self).setUp()
self.user = UserFactory.build(username=self.username, email=self.user_email)
self.user.set_password(self.password)
self.user.save()
RegistrationFactory(user=self.user)
UserProfileFactory(user=self.user)
self.client = Client()
cache.clear()
try:
self.url = reverse('login_post')
except NoReverseMatch:
self.url = reverse('login')
def test_login_success(self):
response, mock_audit_log = self._login_response(
self.user_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', self.user_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_success_no_pii(self):
response, mock_audit_log = self._login_response(
self.user_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [self.user_email])
def test_login_success_unicode_email(self):
unicode_email = u'test' + six.unichr(40960) + u'@edx.org'
self.user.email = unicode_email
self.user.save()
response, mock_audit_log = self._login_response(
unicode_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', unicode_email])
def test_last_login_updated(self):
old_last_login = self.user.last_login
self.test_login_success()
self.user.refresh_from_db()
assert self.user.last_login > old_last_login
def test_login_success_prevent_auth_user_writes(self):
with waffle().override(PREVENT_AUTH_USER_WRITES, True):
old_last_login = self.user.last_login
self.test_login_success()
self.user.refresh_from_db()
assert old_last_login == self.user.last_login
def test_login_fail_no_user_exists(self):
nonexistent_email = u'[email protected]'
response, mock_audit_log = self._login_response(
nonexistent_email,
self.password,
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_no_user_exists_no_pii(self):
nonexistent_email = u'[email protected]'
response, mock_audit_log = self._login_response(
nonexistent_email,
self.password,
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [nonexistent_email])
def test_login_fail_wrong_password(self):
response, mock_audit_log = self._login_response(
self.user_email,
'wrong_password',
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning',
[u'Login failed', u'password for', self.user_email, u'invalid'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_wrong_password_no_pii(self):
response, mock_audit_log = self._login_response(self.user_email, 'wrong_password')
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'invalid'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [self.user_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_not_activated_no_pii(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response(
self.user_email,
self.password
)
self._assert_response(response, success=False,
value="In order to sign in, you need to activate your account.")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'test'])
def test_login_not_activated_with_correct_credentials(self):
"""
Tests that when user login with the correct credentials but with an inactive
account, the system, send account activation email notification to the user.
"""
self.user.is_active = False
self.user.save()
response, mock_audit_log = self._login_response(
self.user_email,
self.password,
)
self._assert_response(response, success=False, value=self.ACTIVATE_ACCOUNT_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
@patch('openedx.core.djangoapps.user_authn.views.login._log_and_raise_inactive_user_auth_error')
def test_login_inactivated_user_with_incorrect_credentials(self, mock_inactive_user_email_and_error):
"""
Tests that when user login with incorrect credentials and an inactive account,
the system does *not* send account activation email notification to the user.
"""
nonexistent_email = '[email protected]'
self.user.is_active = False
self.user.save()
response, mock_audit_log = self._login_response(nonexistent_email, 'incorrect_password')
self.assertFalse(mock_inactive_user_email_and_error.called)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
def test_login_unicode_email(self):
unicode_email = self.user_email + six.unichr(40960)
response, mock_audit_log = self._login_response(
unicode_email,
self.password,
)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', unicode_email])
def test_login_unicode_password(self):
unicode_password = self.password + six.unichr(1972)
response, mock_audit_log = self._login_response(
self.user_email,
unicode_password,
)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning',
[u'Login failed', u'password for', self.user_email, u'invalid'])
def test_logout_logging(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 200)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout', u'test'])
def test_login_user_info_cookie(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
# Verify the format of the "user info" cookie set on login
cookie = self.client.cookies[settings.EDXMKTG_USER_INFO_COOKIE_NAME]
user_info = json.loads(cookie.value)
self.assertEqual(user_info["version"], settings.EDXMKTG_USER_INFO_COOKIE_VERSION)
self.assertEqual(user_info["username"], self.user.username)
# Check that the URLs are absolute
for url in user_info["header_urls"].values():
self.assertIn("http://testserver/", url)
def test_logout_deletes_mktg_cookies(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
# Check that the marketing site cookies have been set
self.assertIn(settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, self.client.cookies)
self.assertIn(settings.EDXMKTG_USER_INFO_COOKIE_NAME, self.client.cookies)
# Log out
logout_url = reverse('logout')
response = self.client.post(logout_url)
# Check that the marketing site cookies have been deleted
# (cookies are deleted by setting an expiration date in 1970)
for cookie_name in [settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, settings.EDXMKTG_USER_INFO_COOKIE_NAME]:
cookie = self.client.cookies[cookie_name]
self.assertIn("01-Jan-1970", cookie.get('expires'))
@override_settings(
EDXMKTG_LOGGED_IN_COOKIE_NAME=u"unicode-logged-in",
EDXMKTG_USER_INFO_COOKIE_NAME=u"unicode-user-info",
)
def test_unicode_mktg_cookie_names(self):
# When logged in cookie names are loaded from JSON files, they may
# have type `unicode` instead of `str`, which can cause errors
# when calling Django cookie manipulation functions.
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
response = self.client.post(reverse('logout'))
expected = {
'target': '/',
}
self.assertDictContainsSubset(expected, response.context_data)
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_logout_logging_no_pii(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 200)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'test'])
def test_login_ratelimited_success(self):
# Try (and fail) logging in with fewer attempts than the limit of 30
# and verify that you can still successfully log in afterwards.
for i in range(20):
password = u'test_password{0}'.format(i)
response, _audit_log = self._login_response(self.user_email, password)
self._assert_response(response, success=False)
# now try logging in with a valid password
response, _audit_log = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
def test_login_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for i in range(30):
password = u'test_password{0}'.format(i)
self._login_response(self.user_email, password)
# check to see if this response indicates that this was ratelimited
response, _audit_log = self._login_response(self.user_email, 'wrong_password')
self._assert_response(response, success=False, value='Too many failed login attempts')
@patch.dict("django.conf.settings.FEATURES", {"DISABLE_SET_JWT_COOKIES_FOR_TESTS": False})
def test_login_refresh(self):
def _assert_jwt_cookie_present(response):
self.assertEqual(response.status_code, 200)
self.assertIn(jwt_cookies.jwt_refresh_cookie_name(), self.client.cookies)
setup_login_oauth_client()
response, _ = self._login_response(self.user_email, self.password)
_assert_jwt_cookie_present(response)
response = self.client.post(reverse('login_refresh'))
_assert_jwt_cookie_present(response)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session(self):
creds = {'email': self.user_email, 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = User.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_no_user_profile(self):
"""
Assert that user login with cas (Central Authentication Service) is
redirect to dashboard in case of lms or upload_transcripts in case of
cms
"""
user = UserFactory.build(username='tester', email='[email protected]')
user.set_password(self.password)
user.save()
# Assert that no profile is created.
self.assertFalse(hasattr(user, 'profile'))
creds = {'email': '[email protected]', 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
user = User.objects.get(pk=user.pk)
# Assert that profile is created.
self.assertTrue(hasattr(user, 'profile'))
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_url_not_having_login_required_decorator(self):
# accessing logout url as it does not have login-required decorator it will avoid redirect
# and go inside the enforce_single_login
creds = {'email': self.user_email, 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = User.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
url = reverse('logout')
response = client1.get(url)
self.assertEqual(response.status_code, 200)
def test_change_enrollment_400(self):
"""
Tests that a 400 in change_enrollment doesn't lead to a 404
and in fact just logs in the user without incident
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponseBadRequest("I am a 400")
response, _ = self._login_response(
self.user_email, self.password, extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
def test_change_enrollment_200_no_redirect(self):
"""
Tests "redirect_url" is None if change_enrollment returns a HttpResponse
with no content
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponse()
response, _ = self._login_response(
self.user_email, self.password, extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance(self):
"""
Tests _enforce_password_policy_compliance succeeds when no exception is thrown
"""
enforce_compliance_path = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_path) as mock_check_password_policy_compliance:
mock_check_password_policy_compliance.return_value = HttpResponse()
response, _ = self._login_response(self.user_email, self.password)
response_content = json.loads(response.content)
self.assertTrue(response_content.get('success'))
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance_exception(self):
"""
Tests _enforce_password_policy_compliance fails with an exception thrown
"""
enforce_compliance_on_login = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_on_login) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.side_effect = NonCompliantPasswordException()
response, _ = self._login_response(
self.user_email,
self.password
)
response_content = json.loads(response.content)
self.assertFalse(response_content.get('success'))
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Password reset', mail.outbox[0].subject)
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance_warning(self):
"""
Tests _enforce_password_policy_compliance succeeds with a warning thrown
"""
enforce_compliance_on_login = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_on_login) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.side_effect = NonCompliantPasswordWarning('Test warning')
response, _ = self._login_response(self.user_email, self.password)
response_content = json.loads(response.content)
self.assertIn('Test warning', self.client.session['_messages'])
self.assertTrue(response_content.get('success'))
@ddt.data(
('test_password', 'test_password', True),
(unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKC', u'Ṗŕệṿïệẅ Ṯệẍt'), False),
(unicodedata.normalize('NFKC', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'), True),
(unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'), False),
)
@ddt.unpack
def test_password_unicode_normalization_login(self, password, password_entered, login_success):
"""
Tests unicode normalization on user's passwords on login.
"""
self.user.set_password(password)
self.user.save()
response, _ = self._login_response(self.user.email, password_entered)
self._assert_response(response, success=login_success)
def _login_response(self, email, password, patched_audit_log=None, extra_post_params=None):
"""
Post the login info
"""
if patched_audit_log is None:
patched_audit_log = 'openedx.core.djangoapps.user_authn.views.login.AUDIT_LOG'
post_params = {'email': email, 'password': password}
if extra_post_params is not None:
post_params.update(extra_post_params)
with patch(patched_audit_log) as mock_audit_log:
result = self.client.post(self.url, post_params)
return result, mock_audit_log
def _assert_response(self, response, success=None, value=None):
"""
Assert that the response had status 200 and returned a valid
JSON-parseable dict.
If success is provided, assert that the response had that
value for 'success' in the JSON dict.
If value is provided, assert that the response contained that
value for 'value' in the JSON dict.
"""
self.assertEqual(response.status_code, 200)
try:
response_dict = json.loads(response.content)
except ValueError:
self.fail("Could not parse response content as JSON: %s"
% str(response.content))
if success is not None:
self.assertEqual(response_dict['success'], success)
if value is not None:
msg = ("'%s' did not contain '%s'" %
(unicode(response_dict['value']), unicode(value)))
self.assertIn(value, response_dict['value'], msg)
def _assert_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertIn(log_string, format_string)
def _assert_not_in_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertNotIn(log_string, format_string)
class ExternalAuthShibTest(ModuleStoreTestCase):
"""
Tests how login_user() interacts with ExternalAuth, in particular Shib
"""
def setUp(self):
super(ExternalAuthShibTest, self).setUp()
self.course = CourseFactory.create(
org='Stanford',
number='456',
display_name='NO SHIB',
user_id=self.user.id,
)
self.shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.user.id,
)
self.user_w_map = UserFactory.create(email='[email protected]')
self.extauth = ExternalAuthMap(external_id='[email protected]',
external_email='[email protected]',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=self.user_w_map)
self.user_w_map.save()
self.extauth.save()
self.user_wo_map = UserFactory.create(email='[email protected]')
self.user_wo_map.save()
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_page_redirect(self):
"""
Tests that when a shib user types their email address into the login page, they get redirected
to the shib login.
"""
response = self.client.post(reverse('login'), {'email': self.user_w_map.email, 'password': ''})
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertEqual(obj, {
'success': False,
'redirect': reverse('shib-login'),
})
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_required_dashboard(self):
"""
Tests redirects to when @login_required to dashboard, which should always be the normal login,
since there is no course context
"""
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/login?next=/dashboard')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_externalauth_login_required_course_context(self):
"""
Tests the redirects when visiting course-specific URL with @login_required.
Should vary by course depending on its enrollment_domain
"""
target_url = reverse('courseware', args=[text_type(self.course.id)])
noshib_response = self.client.get(target_url, follow=True, HTTP_ACCEPT="text/html")
self.assertEqual(noshib_response.redirect_chain[-1],
('/login?next={url}'.format(url=target_url), 302))
self.assertContains(noshib_response, (u"Sign in or Register | {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
self.assertEqual(noshib_response.status_code, 200)
target_url_shib = reverse('courseware', args=[text_type(self.shib_course.id)])
shib_response = self.client.get(**{'path': target_url_shib,
'follow': True,
'REMOTE_USER': self.extauth.external_id,
'Shib-Identity-Provider': 'https://idp.stanford.edu/',
'HTTP_ACCEPT': "text/html"})
# Test that the shib-login redirect page with ?next= and the desired page are part of the redirect chain
# The 'courseware' page actually causes a redirect itself, so it's not the end of the chain and we
# won't test its contents
self.assertEqual(shib_response.redirect_chain[-3],
('/shib-login/?next={url}'.format(url=target_url_shib), 302))
self.assertEqual(shib_response.redirect_chain[-2],
(target_url_shib, 302))
self.assertEqual(shib_response.status_code, 200)
| agpl-3.0 | -3,259,418,563,379,477,500 | 44.621538 | 118 | 0.643859 | false | 3.836223 | true | false | false |
bruteforce1/cryptopals | set2/ch10/implement_aes_cbc.py | 1 | 2510 | #!/usr/bin/python3
"""
CBC mode is a block cipher mode that allows us to encrypt irregularly-
sized messages, despite the fact that a block cipher natively only
transforms individual blocks.
In CBC mode, each ciphertext block is added to the next plaintext block
before the next call to the cipher core.
The first plaintext block, which has no associated previous ciphertext
block, is added to a "fake 0th ciphertext block" called the
initialization vector, or IV.
Implement CBC mode by hand by taking the ECB function you wrote
earlier, making it encrypt instead of decrypt (verify this by
decrypting whatever you encrypt to test), and using your XOR function
from the previous exercise to combine them.
The file here is intelligible (somewhat) when CBC decrypted against
"YELLOW SUBMARINE" with an IV of all ASCII 0 (\x00\x00\x00 &c)
"""
import argparse
import os
import sys
from utils.cpset2 import aes_cbc, make_b64_printable
def main(filename, key, iv):
print('Input File: ' + str(filename))
print('Key: ' + str(key))
print('IV: ' + str(iv))
crypt = ''
if not os.path.isfile(filename):
print(filename + ' is not a valid file.')
return -1
with open(filename, 'r') as infile:
for line in infile:
crypt += line
ret = aes_cbc(crypt, key, iv, 0)
if ret:
print('Decrypted Contents in: ' + filename + '.dec')
with open(filename + '.dec', 'w') as tf:
tf.write(ret.decode('utf-8'))
un_ret = make_b64_printable(aes_cbc(ret, key, iv))
if un_ret:
print('Encrypted Contents in: ' + filename + '.enc')
with open(filename + '.enc', 'w') as tf:
tf.write(un_ret.decode('utf-8'))
return 0
print('Error.')
return -1
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Implements AES CBC encryption and decryption manually.')
parser.add_argument('-f', '--inputfile', help='opt. file encrypted \
with AES in CBC mode',
default='10.txt')
parser.add_argument('-i', '--iv', help='opt. 16 byte initialization \
vector',
default=chr(0) * 16)
parser.add_argument('-k', '--key', help='opt. 16 byte encryption or \
decryption key',
default='YELLOW SUBMARINE')
args = parser.parse_args()
sys.exit(main(args.inputfile, args.key, args.iv))
| mit | 5,468,502,557,935,550,000 | 34.352113 | 77 | 0.61753 | false | 3.808801 | false | false | false |
charanpald/features | features/test/PrimalCCATest.py | 1 | 3226 |
import unittest
import numpy
import scipy.linalg
from features.PrimalCCA import PrimalCCA
from features.KernelCCA import KernelCCA
from kernel.LinearKernel import LinearKernel
import logging
class PrimalCCATest(unittest.TestCase):
def setUp(self):
numpy.seterr(all='ignore')
pass
def testLearnModel(self):
numExamples = 50
numFeatures = 10
X = numpy.random.rand(numExamples, numFeatures)
Y = X
tau = 0.0
tol = 10**--6
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(u-v) < tol)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
Y = X*2
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(u-v) < tol)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
#Rotate X to form Y
Z = numpy.random.rand(numFeatures, numFeatures)
ZZ = numpy.dot(Z.T, Z)
(D, W) = scipy.linalg.eig(ZZ)
Y = numpy.dot(X, W)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
def testProject(self):
#Test if it is the same as KCCA
numExamples = 50
numFeatures = 10
X = numpy.random.rand(numExamples, numFeatures)
Y = numpy.random.rand(numExamples, numFeatures)
tau = 0.0
tol = 10**--6
k = 5
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
XU, YU = cca.project(X, Y, k)
kernel = LinearKernel()
kcca = KernelCCA(kernel, kernel, tau)
alpha, beta, lmbdas2 = kcca.learnModel(X, Y)
XU2, YU2 = kcca.project(X, Y, k)
#Seem to get an error in this for some reason
#self.assertTrue(numpy.linalg.norm(XU-XU2) < tol)
#self.assertTrue(numpy.linalg.norm(YU-YU2) < tol)
#Now try with different tau
tau = 0.5
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
XU, YU = cca.project(X, Y, k)
kernel = LinearKernel()
kcca = KernelCCA(kernel, kernel, tau)
alpha, beta, lmbdas = kcca.learnModel(X, Y)
XU2, YU2 = kcca.project(X, Y, k)
self.assertTrue(numpy.linalg.norm(XU-XU2) < tol)
self.assertTrue(numpy.linalg.norm(YU-YU2) < tol)
self.assertTrue(numpy.linalg.norm(numpy.dot(XU.T, XU) - numpy.ones(k)) < tol)
self.assertTrue(numpy.linalg.norm(numpy.dot(YU.T, YU) - numpy.ones(k)) < tol)
def testGetY(self):
#Test if we can recover Y from X
numExamples = 10
numFeatures = 5
X = numpy.random.rand(numExamples, numFeatures)
Z = numpy.random.rand(numFeatures, numFeatures)
ZZ = numpy.dot(Z.T, Z)
(D, W) = scipy.linalg.eig(ZZ)
Y = numpy.dot(X, W)
tau = 0.0
cca = PrimalCCA(tau)
U, V, lmbdas = cca.learnModel(X, Y)
Yhat = X.dot(U).dot(V.T).dot(numpy.linalg.inv(numpy.dot(V, V.T)))
logging.debug((numpy.abs(Yhat- Y)))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -873,620,248,228,570,200 | 27.298246 | 85 | 0.579045 | false | 3.060721 | true | false | false |
myt00seven/svrg | cifar/alexnet_lasagne/lasagne-googlenet-master/googlenet/layers/bn.py | 1 | 4243 | import numpy as np
import theano.tensor as T
import theano
from lasagne import init # from .. import init
from lasagne import nonlinearities # from .. import nonlinearities
from lasagne.layers.base import Layer # from .base import Layer
__all__ = [
"BNLayer",
]
class BNLayer(Layer):
"""
lasagne.layers.BNLayer(incoming, nonlinearity=lasagne.nonlinearities.rectify, **kwargs)
A batch normalization layer.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
W : Theano shared variable, numpy array or callable
An initializer for the weights of the layer. If a shared variable or a
numpy array is provided the shape should be (num_inputs, num_units).
See :meth:`Layer.create_param` for more information.
b : Theano shared variable, numpy array, callable or None
An initializer for the biases of the layer. If a shared variable or a
numpy array is provided the shape should be (num_units,).
If None is provided the layer will have no biases.
See :meth:`Layer.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = BNLayer(l_in)
Notes
-----
If the input to this layer has more than two axes, it will flatten the
trailing axes. This is useful for when a dense layer follows a
convolutional layer, for example. It is not necessary to insert a
:class:`FlattenLayer` in this case.
"""
def __init__(self, incoming, gamma=1.0, beta=0., nonlinearity=None, epsilon=1e-6,
**kwargs):
super(BNLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
# get output shape of incoming
#self.n_channels = self.input_shape[1]
#print self.input_shape
#raise NameError("Hi")
self.epsilon = epsilon
if len(self.input_shape) is 4:
self.gamma = self.add_param(init.Constant(gamma), (self.input_shape[1],), name='gamma', regularizable=False).dimshuffle(('x',0,'x','x'))
self.beta = self.add_param(init.Constant(beta), (self.input_shape[1],), name='beta', regularizable=False).dimshuffle(('x',0,'x','x'))
elif len(self.input_shape) is 2:
self.gamma = self.add_param(init.Constant(gamma), (self.input_shape[1],), name='gamma', regularizable=False).dimshuffle(('x',0))
self.beta = self.add_param(init.Constant(beta), (self.input_shape[1],), name='beta', regularizable=False).dimshuffle(('x',0))
else: # input should be 4d tensor or 2d matrix
raise ValueError('input of BNLayer should be 4d tensor or 2d matrix')
# done init
def get_output_shape_for(self, input_shape):
#return (input_shape[0], self.num_units)
return input_shape
def get_output_for(self, input, **kwargs):
if input.ndim is 4: # 4d tensor
self.mean = T.mean(input, axis=[0, 2, 3], keepdims=True) #self.mean = T.mean(input, axis=[0, 2, 3]).dimshuffle(('x', 0, 'x', 'x'))
#self.var = T.std(input, axis=[0, 2, 3], keepdims=True)
self.var = T.sum(T.sqr(input - self.mean), axis=[0, 2, 3], keepdims=True) / np.array([self.input_shape[0] * self.input_shape[2] * self.input_shape[3]], dtype=theano.config.floatX)
else: # elif input.ndim is 2: # 2d matrix
self.mean = T.mean(input, axis=0, keepdims=True) #self.mean = T.mean(input, axis=0).dimshuffle(('x',0))
#self.var = T.std(input, axis=0, keepdims=True)
self.var = T.sum(T.sqr(input - self.mean), axis=0, keepdims=True) / np.array([self.input_shape[0]], dtype=theano.config.floatX)
activation = (input - self.mean) / T.sqrt(self.var + self.epsilon)
activation = self.gamma * activation + self.beta
return self.nonlinearity(activation)
| mit | 4,750,715,369,948,741,000 | 40.194175 | 191 | 0.634928 | false | 3.638937 | false | false | false |
qrsforever/workspace | python/learn/thinkstats/rankit.py | 1 | 1807 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import random
import thinkstats
import myplot
import matplotlib.pyplot as pyplot
def Sample(n=6):
"""Generates a sample from a standard normal variate.
n: sample size
Returns: list of n floats
"""
t = [random.normalvariate(0.0, 1.0) for i in range(n)]
t.sort()
return t
def Samples(n=6, m=1000):
"""Generates m samples with size n each.
n: sample size
m: number of samples
Returns: list of m samples
"""
t = [Sample(n) for i in range(m)]
return t
def EstimateRankits(n=6, m=1000):
"""Estimates the expected values of sorted random samples.
n: sample size
m: number of iterations
Returns: list of n rankits
"""
t = Samples(n, m)
t = zip(*t)
means = [thinkstats.Mean(x) for x in t]
return means
def MakeNormalPlot(ys, root=None, line_options={}, **options):
"""Makes a normal probability plot.
Args:
ys: sequence of values
line_options: dictionary of options for pyplot.plot
options: dictionary of options for myplot.Save
"""
# TODO: when n is small, generate a larger sample and desample
n = len(ys)
xs = [random.normalvariate(0.0, 1.0) for i in range(n)]
pyplot.clf()
pyplot.plot(sorted(xs), sorted(ys), 'b.', markersize=3, **line_options)
myplot.Save(root,
xlabel = 'Standard normal values',
legend=False,
**options)
def main():
means = EstimateRankits(84)
print(means)
if __name__ == "__main__":
main()
| mit | 151,881,123,661,949,600 | 21.308642 | 75 | 0.614278 | false | 3.461686 | false | false | false |
baliga-lab/weeder_patched | python/seqtools.py | 1 | 3069 | HAMMING_MAX = 9999
def read_sequences_from_fasta_string(fasta_string):
"""reads the sequences contained in a FASTA string"""
lines = fasta_string.split('\n')
sequences = []
seqbuffer = ""
seqname = None
for line in lines:
line = line.strip()
if line.startswith('>'):
if len(seqbuffer) > 0:
sequences.append((seqname, seqbuffer))
seqbuffer = ""
seqname = line[1:]
elif line and len(line) > 0:
seqbuffer += line
# add the last line
if len(seqbuffer) > 0:
sequences.append((seqname, seqbuffer))
return sequences
def read_sequences_from_fasta_file(filepath):
"""Read the sequences from the specified FASTA file"""
with open(filepath) as inputfile:
fasta_string = inputfile.read()
return read_sequences_from_fasta_string(fasta_string)
def revcomp(sequence):
"""compute the reverse complement of the input string"""
return "".join([revchar(c) for c in sequence[::-1]])
def overlap(str1, str2, checkreverse):
result = False
overlapping = True
for l in range(1, 3):
for i in range(len(str1) - l):
if i >= len(str2) or str1[i + l] != str2[i]:
overlapping = False
break
if overlapping:
result = True
overlapping = True
for i in range(len(str1) - l):
if (i + l) >= len(str2) or str1[i] != str2[i + l]:
overlapping = False
break
if overlapping:
result = True
if checkreverse:
rev_result = overlap(str1[::-1], str2, False)
if rev_result:
result = True
return result
def hamming_distance(str1, str2, checkreverse):
dist_forward = 0
dist_reverse = HAMMING_MAX
if len(str1) != len(str2) or str1 == str2:
return HAMMING_MAX
for i in range(len(str1)):
if str1[i] != str2[i]:
dist_forward += 1
if not checkreverse:
return dist_forward
else:
rev = str1[::-1]
for i in range(len(str1)):
if rev[i] != str2[i]:
dist_reverse += 1
if dist_reverse < dist_forward:
return dist_reverse
else:
return dist_forward
def inside(str1, str2, checkreverse):
len1 = len(str1)
len2 = len(str2)
result = False
if (len2 - len1) != 2:
return False
for i in range(len2 - len1 + 1):
match = True
for j in range(i, i + len1):
if str1[j - i] != str2[j]:
match = False
break
if match:
result = True
if checkreverse:
rev_result = inside(str1[::-1], str2, False)
if rev_result:
result = True
return result
def char_to_int(c):
c = c.lower()
if c == 'a':
return 0;
elif c == 'c':
return 1;
elif c == 'g':
return 2;
elif c == 't':
return 3;
elif c == '$':
return 4;
else:
return -1;
| gpl-3.0 | -7,517,009,544,331,105,000 | 24.789916 | 62 | 0.525904 | false | 3.675449 | false | false | false |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part03-e05_correlation/test/test_correlation.py | 1 | 2795 | #!/usr/bin/env python3
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, get_out
module_name="src.correlation"
correlations = load(module_name, "correlations")
lengths = load(module_name, "lengths")
def patch_name(m, d):
import importlib
parts=d.split(".")
try:
getattr(importlib.import_module(m), parts[-1])
p=".".join([m, parts[-1]])
except ModuleNotFoundError:
raise
except AttributeError:
if len(parts) == 1:
raise
try:
getattr(importlib.import_module(m), parts[-2])
p=".".join([m] + parts[-2:])
except AttributeError:
if len(parts) == 2:
raise
getattr(importlib.import_module(m), parts[-3])
p=".".join([m] + parts[-3:])
return p
class Correlation(unittest.TestCase):
@points('p03-05.1')
def test_lengths(self):
result = lengths()
self.assertAlmostEqual(result, 0.8717537758865832, places=4, msg="Wrong correlation!")
@points('p03-05.1')
def test_lengths_calls(self):
with patch(patch_name(module_name, "scipy.stats.pearsonr")) as pcorr:
result = lengths()
pcorr.assert_called()
@points('p03-05.2')
def test_correlations(self):
result = correlations()
n, m = result.shape
for r in range(n):
for c in range(r):
self.assertAlmostEqual(result[r,c], result[c,r], places=4,
msg="The correlation matrix is not symmetric!")
self.assertAlmostEqual(result[r,r], 1, places=4, msg="Values on the diagonal should be one!")
self.assertAlmostEqual(result[0,1], -0.11756978, places=4,
msg="Incorrect value in position [0,1]!")
self.assertAlmostEqual(result[0,2], 0.87175378, places=4,
msg="Incorrect value in position [0,2]!")
self.assertAlmostEqual(result[0,3], 0.81794113, places=4,
msg="Incorrect value in position [0,3]!")
self.assertAlmostEqual(result[1,2], -0.4284401, places=4,
msg="Incorrect value in position [1,2]!")
self.assertAlmostEqual(result[1,3], -0.36612593, places=4,
msg="Incorrect value in position [1,3]!")
self.assertAlmostEqual(result[2,3], 0.96286543, places=4,
msg="Incorrect value in position [2,3]!")
@points('p03-05.2')
def test_lengths_calls(self):
with patch(patch_name(module_name, "np.corrcoef")) as pcorr:
result = correlations()
pcorr.assert_called()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -656,753,270,546,592,900 | 33.085366 | 105 | 0.563148 | false | 3.813097 | true | false | false |
NeoRazorX/ubuntufaq | public.py | 1 | 15820 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of ubuntufaq
# Copyright (C) 2011 Carlos Garcia Gomez [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, logging
# cargamos django 1.2
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')
from google.appengine.ext.webapp import template
from google.appengine.ext import db, webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import users, memcache
from recaptcha.client import captcha
from base import *
from preguntas import *
from enlaces import *
class Portada(Pagina):
def get(self):
Pagina.get(self)
mixto = self.sc.get_portada( users.get_current_user() )
tags = self.get_tags_from_mixto( mixto )
template_values = {
'titulo': 'Ubuntu FAQ',
'descripcion': APP_DESCRIPTION,
'tags': tags,
'mixto': mixto,
'urespuestas': self.sc.get_ultimas_respuestas(),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'stats': self.sc.get_stats()
}
path = os.path.join(os.path.dirname(__file__), 'templates/portada.html')
self.response.out.write( template.render(path, template_values) )
class Populares(Pagina):
def get(self):
Pagina.get(self)
mixto = self.sc.get_populares()
tags = self.get_tags_from_mixto( mixto )
template_values = {
'titulo': 'Populares - Ubuntu FAQ',
'descripcion': 'Listado de preguntas y noticias populares de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': tags,
'mixto': mixto,
'stats': self.sc.get_stats(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio
}
path = os.path.join(os.path.dirname(__file__), 'templates/populares.html')
self.response.out.write( template.render(path, template_values) )
class Ayuda(Pagina):
def get(self):
Pagina.get(self)
template_values = {
'titulo': 'Ayuda de Ubuntu FAQ',
'descripcion': u'Sección de ayuda de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'formulario': self.formulario,
'error_dominio': self.error_dominio,
'karmalist': memcache.get('pending-users'),
'foco': 'ayuda'
}
path = os.path.join(os.path.dirname(__file__), 'templates/ayuda.html')
self.response.out.write(template.render(path, template_values))
class Nueva_publicacion(Pagina):
def get(self):
Pagina.get(self)
# el captcha
if users.get_current_user():
chtml = ''
else:
chtml = captcha.displayhtml(
public_key = RECAPTCHA_PUBLIC_KEY,
use_ssl = False,
error = None)
if self.request.get('tipo') == 'pregunta':
foco = 'pregunta'
elif self.request.get('tipo') == 'enlace':
foco = 'enlace'
else:
foco = 'pensamiento'
template_values = {
'titulo': 'Publicar...',
'descripcion': u'Formulario de publicación de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'formulario': self.formulario,
'error_dominio': self.error_dominio,
'captcha': chtml,
'tipo': self.request.get('tipo'),
'contenido': self.request.get('contenido'),
'url2': self.request.get('url'),
'foco': foco
}
path = os.path.join(os.path.dirname(__file__), 'templates/nueva.html')
self.response.out.write(template.render(path, template_values))
class Pagina_buscar(Pagina):
def get(self, tag=None):
Pagina.get(self)
# para corregir fallos de codificación en el tag
if isinstance(tag, str):
tag = unicode( urllib.unquote(tag), 'utf-8')
else:
tag = unicode( urllib.unquote(tag) )
template_values = {
'titulo': 'Ubuntu FAQ: ' + tag,
'descripcion': u'Páginas relacionadas con ' + tag,
'tag': tag,
'tags': 'problema, duda, ayuda, ' + tag,
'relacionadas': self.sc.paginas_relacionadas(tag, True),
'alltags': self.sc.get_alltags(),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/search.html')
self.response.out.write(template.render(path, template_values))
def post(self, ntag=None):
Pagina.get(self)
query = urllib.unquote( self.request.get('query') )
template_values = {
'titulo': 'Ubuntu FAQ: ' + query,
'descripcion': u'Resultados de: ' + query,
'tag': query,
'buscando': True,
'tags': 'problema, duda, ayuda, ' + query,
'relacionadas': self.sc.buscar( query ),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/search.html')
self.response.out.write(template.render(path, template_values))
class Guardar_voto(Pagina):
def get(self, tipo='x', keye=None, voto='-1'):
try:
if self.request.environ['HTTP_USER_AGENT'].lower().find('googlebot') != -1:
logging.info('Googlebot!')
self.redirect('/')
else:
if tipo == 'r':
elemento = Respuesta.get( keye )
elif tipo == 'c':
elemento = Comentario.get( keye )
else:
elemento = False
if not elemento: # no hay elemento a votar
logging.warning('Elemento no encontrado!')
self.redirect('/error/404')
elif self.request.remote_addr in elemento.ips and self.request.remote_addr != '127.0.0.1': # ya se ha votado desde esta IP
logging.info('Voto ya realizado')
self.redirect( elemento.get_link() )
else: # voto válido
ips = elemento.ips
ips.append( self.request.remote_addr )
elemento.ips = ips
if voto == '0':
elemento.valoracion -= 1
logging.info('Voto negativo')
elif voto == '1':
elemento.valoracion += 1
logging.info('Voto positivo')
else:
logging.info('Voto no válido: ' + str(voto))
elemento.put()
elemento.borrar_cache()
# actualizamos la estadistica
stats = self.sc.get_stats()
if voto in ['0', '1']:
try:
stats['votos'] += 1
except:
stats['votos'] = 1
memcache.replace('stats', stats)
self.redirect( elemento.get_link() )
except:
self.redirect('/error/503')
class Rss(Pagina):
def get(self):
template_values = {
'portada': self.sc.get_portada(),
'domain': APP_DOMAIN,
'title': APP_NAME,
'descripcion': APP_DESCRIPTION
}
path = os.path.join(os.path.dirname(__file__), 'templates/rss.html')
self.response.out.write(template.render(path, template_values))
class Rssr(Pagina):
def get(self):
template_values = {
'respuestas': self.sc.get_ultimas_respuestas(),
'comentarios': self.sc.get_ultimos_comentarios(),
'domain': APP_DOMAIN,
'title': APP_NAME,
'descripcion': APP_DESCRIPTION
}
path = os.path.join(os.path.dirname(__file__), 'templates/rss-respuestas.html')
self.response.out.write(template.render(path, template_values))
class Sitemap(Pagina):
def get(self):
portada = self.sc.get_portada()
print 'Content-Type: text/xml'
print ''
print '<?xml version="1.0" encoding="UTF-8"?>'
print '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'
for p in portada:
print '<url><loc>' + p['link'] + '</loc><lastmod>' + str(p['fecha']).split(' ')[0] + '</lastmod><changefreq>always</changefreq><priority>0.9</priority></url>'
print '</urlset>'
class Perror(Pagina):
def get(self, cerror='404'):
Pagina.get(self)
derror = {
'403': 'Permiso denegado',
'403c': 'Permiso denegado - error en el captcha',
'404': u'Página no encontrada en Ubuntu FAQ',
'503': 'Error en Ubuntu FAQ',
'606': 'Idiota detectado'
}
merror = {
'403': '403 - Permiso denegado',
'403c': u'<img src="/img/fuuu_face.png" alt="fuuu"/><br/><br/>403 - Permiso denegado: debes repetir el captcha.<br/>Evita los captchas iniciando sesión.',
'404': u'404 - Página no encontrada en Ubuntu FAQ',
'503': '<img src="/img/fuuu_face.png" alt="explosión"/><br/><br/>503 - Error en Ubuntu FAQ,<br/>consulta el estado en: http://code.google.com/status/appengine',
'606': u'<img src="/img/troll_face.png" alt="troll"/><br/><br/>606 - ¿Por qué no pruebas a escribir algo diferente?'
}
if cerror == '503':
logging.error( '503' )
else:
logging.warning( cerror )
template_values = {
'titulo': str(cerror) + ' - Ubuntu FAQ',
'descripcion': derror.get(cerror, 'Error desconocido'),
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario': self.formulario,
'error': merror.get(cerror, 'Error desconocido'),
'cerror': cerror,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/portada.html')
self.response.out.write(template.render(path, template_values))
def main():
application = webapp.WSGIApplication([('/', Portada),
('/inicio', Todas_preguntas),
('/preguntas', Todas_preguntas),
(r'/preguntas/(.*)', Todas_preguntas),
('/populares', Populares),
('/sin-solucionar', Sin_solucionar),
('/actualidad', Actualidad),
(r'/actualidad/(.*)', Actualidad),
(r'/p/(.*)', Redir_pregunta),
(r'/question/(.*)', Detalle_pregunta),
('/nueva', Nueva_publicacion),
('/add_p', Nueva_pregunta),
('/mod_p', Detalle_pregunta),
('/del_p', Borrar_pregunta),
('/add_r', Responder),
('/mod_r', Modificar_respuesta),
('/del_r', Borrar_respuesta),
(r'/e/(.*)', Acceder_enlace),
(r'/de/(.*)', Redir_enlace),
(r'/story/(.*)', Detalle_enlace),
('/add_e', Actualidad),
('/mod_e', Detalle_enlace),
('/hun_e', Hundir_enlace),
('/del_e', Borrar_enlace),
('/add_c', Comentar),
('/mod_c', Modificar_comentario),
('/del_c', Borrar_comentario),
('/ayuda', Ayuda),
(r'/search/(.*)', Pagina_buscar),
(r'/votar/(.*)/(.*)/(.*)', Guardar_voto),
('/rss', Rss),
('/rss-respuestas', Rssr),
('/sitemap', Sitemap),
('/sitemap.xml', Sitemap),
(r'/error/(.*)', Perror),
('/.*', Perror),
],
debug=DEBUG_FLAG)
webapp.template.register_template_library('filters.filtros_django')
run_wsgi_app(application)
if __name__ == "__main__":
main()
| agpl-3.0 | -7,409,657,376,595,017,000 | 43.784703 | 179 | 0.49864 | false | 3.704077 | false | false | false |
DataDog/integrations-core | openstack_controller/tests/common.py | 1 | 14615 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import datetime
import os
CHECK_NAME = 'openstack'
FIXTURES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures')
ALL_IDS = ['server-1', 'server-2', 'other-1', 'other-2']
EXCLUDED_NETWORK_IDS = ['server-1', 'other-.*']
EXCLUDED_SERVER_IDS = ['server-2', 'other-.*']
FILTERED_NETWORK_ID = 'server-2'
FILTERED_SERVER_ID = 'server-1'
FILTERED_BY_PROJ_SERVER_ID = ['server-1', 'server-2']
CONFIG_FILE_INSTANCE = {
'name': 'test_name',
'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': 'test_id'}},
'ssl_verify': False,
'exclude_network_ids': EXCLUDED_NETWORK_IDS,
'openstack_config_file_path': os.path.abspath('./tests/fixtures/openstack_config.yaml'),
'openstack_cloud_name': 'test_cloud',
}
KEYSTONE_INSTANCE = {
'name': 'test_name',
'keystone_server_url': 'http://10.0.2.15:5000',
'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': 'test_id'}},
'ssl_verify': False,
'exclude_network_ids': EXCLUDED_NETWORK_IDS,
}
MOCK_CONFIG = {'init_config': {}, 'instances': [KEYSTONE_INSTANCE]}
EXAMPLE_AUTH_RESPONSE = {
u'token': {
u'methods': [u'password'],
u'roles': [
{u'id': u'f20c215f5a4d47b7a6e510bc65485ced', u'name': u'datadog_monitoring'},
{u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'},
],
u'expires_at': u'2015-11-02T15: 57: 43.911674Z',
u'project': {
u'domain': {u'id': u'default', u'name': u'Default'},
u'id': u'0850707581fe4d738221a72db0182876',
u'name': u'admin',
},
u'catalog': [
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'354e35ed19774e398f80dc2a90d07f4b',
},
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'36e8e2bf24384105b9d56a65b0900172',
},
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'de93edcbf7f9446286687ec68423c36f',
},
],
u'type': u'compute',
u'id': u'2023bd4f451849ba8abeaaf283cdde4f',
u'name': u'nova',
},
{
u'endpoints': [
{
u'url': u'http://10.0.3.111:8776/v1/***************************4bfc1',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************2452f',
},
{
u'url': u'http://10.0.2.15:8776/v1/***************************4bfc1',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************8239f',
},
{
u'url': u'http://10.0.2.15:8776/v1/***************************4bfc1',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************7caa1',
},
],
u'type': u'volume',
u'id': u'***************************e7e16',
u'name': u'cinder',
},
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'7c1e318d8f7f42029fcb591598df2ef5',
},
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'afcc88b1572f48a38bb393305dc2b584',
},
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'd9730dbdc07844d785913219da64a197',
},
],
u'type': u'network',
u'id': u'21ad241f26194bccb7d2e49ee033d5a2',
u'name': u'neutron',
},
],
u'extras': {},
u'user': {
u'domain': {u'id': u'default', u'name': u'Default'},
u'id': u'5f10e63fbd6b411186e561dc62a9a675',
u'name': u'datadog',
},
u'audit_ids': [u'OMQQg9g3QmmxRHwKrfWxyQ'],
u'issued_at': u'2015-11-02T14: 57: 43.911697Z',
}
}
EXAMPLE_PROJECTS_RESPONSE = {
"projects": [
{
"domain_id": "1789d1",
"enabled": True,
"id": "263fd9",
"links": {"self": "https://example.com/identity/v3/projects/263fd9"},
"name": "Test Group",
}
],
"links": {"self": "https://example.com/identity/v3/auth/projects", "previous": None, "next": None},
}
# .. server/network
SERVERS_CACHE_MOCK = {
'servers': {
"server-1": {"id": "server-1", "name": "server-name-1", "status": "ACTIVE", "project_name": "testproj"},
"server-2": {"id": "server-2", "name": "server-name-2", "status": "ACTIVE", "project_name": "testproj"},
"other-1": {"id": "other-1", "name": "server-name-other-1", "status": "ACTIVE", "project_name": "blacklist_1"},
"other-2": {"id": "other-2", "name": "server-name-other-2", "status": "ACTIVE", "project_name": "blacklist_2"},
},
'change_since': datetime.datetime.utcnow().isoformat(),
}
EMPTY_NOVA_SERVERS = []
# One example from MOCK_NOVA_SERVERS to emulate pagination
MOCK_NOVA_SERVERS_PAGINATED = [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-1",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server-1",
"metadata": {"My Server Name": "Apache1"},
"name": "new-server-test",
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
}
]
# Example response from - https://developer.openstack.org/api-ref/compute/#list-servers-detailed
# ID and server-name values have been changed for test readability
MOCK_NOVA_SERVERS = [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-1",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server-1",
"metadata": {"My Server Name": "Apache1"},
"name": "new-server-test",
"status": "DELETED",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
},
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-2",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server_newly_added",
"metadata": {"My Server Name": "Apache1"},
"name": "newly_added_server",
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
},
]
EXAMPLE_GET_FLAVORS_DETAIL_RETURN_VALUE = [
{'id': u'10', 'disk': 10, 'vcpus': 2, 'ram': 1024, 'OS-FLV-EXT-DATA:ephemeral': 0, 'swap': 0},
{
'id': u'625c2e4b-0a1f-4236-bb67-5ceee1a766e5',
'disk': 48,
'vcpus': 8,
'ram': 5934,
'OS-FLV-EXT-DATA:ephemeral': 0,
'swap': 0,
},
]
EXAMPLE_GET_OS_AGGREGATES_RETURN_VALUE = [{'hosts': ["compute"], 'name': "name", 'availability_zone': "london"}]
EXAMPLE_GET_OS_HYPERVISORS_RETURN_VALUE = [
{
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": ["pge", "clflush"],
"topology": {"cores": 1, "threads": 1, "sockets": 4},
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "host1",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 2,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {"host": "host1", "id": 7, "disabled_reason": None},
"vcpus": 2,
"vcpus_used": 0,
}
]
EXAMPLE_GET_PROJECT_LIMITS_RETURN_VALUE = {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 1,
"totalServerGroupsUsed": 0,
}
EXAMPLE_GET_NETWORKS_RETURN_VALUE = [
{
'id': u'2755452c-4fe8-4ba1-9b26-8898665b0958',
'name': u'net2',
'tenant_id': u'680031a39ce040e1b81289ea8c73fb11',
'admin_state_up': True,
}
]
DEFAULT_METRICS = [
'openstack.controller',
'openstack.nova.current_workload',
'openstack.nova.disk_available_least',
'openstack.nova.free_disk_gb',
'openstack.nova.free_ram_mb',
'openstack.nova.hypervisor_load.1',
'openstack.nova.hypervisor_load.15',
'openstack.nova.hypervisor_load.5',
'openstack.nova.limits.max_image_meta',
'openstack.nova.limits.max_personality',
'openstack.nova.limits.max_personality_size',
'openstack.nova.limits.max_security_group_rules',
'openstack.nova.limits.max_security_groups',
'openstack.nova.limits.max_server_meta',
'openstack.nova.limits.max_total_cores',
'openstack.nova.limits.max_total_floating_ips',
'openstack.nova.limits.max_total_instances',
'openstack.nova.limits.max_total_keypairs',
'openstack.nova.limits.max_total_ram_size',
'openstack.nova.limits.total_cores_used',
'openstack.nova.limits.total_floating_ips_used',
'openstack.nova.limits.total_instances_used',
'openstack.nova.limits.total_ram_used',
'openstack.nova.limits.total_security_groups_used',
'openstack.nova.local_gb',
'openstack.nova.local_gb_used',
'openstack.nova.memory_mb',
'openstack.nova.memory_mb_used',
'openstack.nova.running_vms',
'openstack.nova.vcpus',
'openstack.nova.vcpus_used',
]
| bsd-3-clause | 3,778,650,173,125,150,700 | 37.159269 | 119 | 0.515156 | false | 3.039725 | true | false | false |
asimshankar/tensorflow | tensorflow/python/saved_model/save.py | 1 | 34278 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a SavedModel from a Checkpointable Python object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import builder_impl
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import function_serialization
from tensorflow.python.saved_model import saved_object_graph_pb2
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils_impl
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
DEFAULT_SIGNATURE_ATTR = "_default_save_signature"
def _find_function_to_export(root):
"""Iterate over `root`'s attributes, finding traced functions."""
exported_function = None
previous_attribute_name = None
for attribute_name in dir(root):
attribute_value = getattr(root, attribute_name, None)
if isinstance(attribute_value, def_function.PolymorphicFunction):
if exported_function is not None:
raise ValueError(
("Exporting an object with no "
"tf.saved_model.save(..., signatures=...) "
"argument specified, and with more than one "
"@tf.function-decorated method attached to it: {}. The signature "
"keys for these functions are ambiguous. Specify signature "
"functions explicitly.").format(
[previous_attribute_name, attribute_name]))
exported_function = attribute_value
previous_attribute_name = attribute_name
if exported_function is None:
exported_function = getattr(root, DEFAULT_SIGNATURE_ATTR, None)
if exported_function is None:
raise ValueError(
("Exporting an object with no tf.saved_model.save(..., signatures=...) "
"argument specified, and with no @tf.function-decorated methods "
"attached to it. In the future this will be a supported use-case for "
"Python re-import, but at the moment saving a SavedModel without "
"signatures does not make sense, as the only consumers will expect "
"signatures. Either decorate a method or specify a signature function "
"explicitly."))
return exported_function
def _canonicalize_signatures(signatures):
"""Converts `signatures` into a dictionary of concrete functions."""
if not isinstance(signatures, collections.Mapping):
signatures = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signatures}
concrete_signatures = {}
for serving_key, signature_function in signatures.items():
if isinstance(signature_function, (defun.PolymorphicFunction,
def_function.PolymorphicFunction)):
input_signature = signature_function._input_signature # pylint: disable=protected-access
if input_signature is None:
raise ValueError(
("Unable to use the function {} as a signature directly. Functions "
"used to generate serving signatures must either have an "
"`input_signature=` specified when constructed, or must be "
"converted to concrete functions using "
"`f.get_concrete_function(...)`.").format(signature_function))
signature_function = signature_function.get_concrete_function()
elif not isinstance(signature_function, defun.Function):
raise ValueError(
("Expected a TensorFlow function to generate a signature for, but "
"got {}. Python functions may be decorated with "
"`@tf.function(input_signature=...)` and passed as signatures "
"directly, or created without a signature using `@tf.function` "
"and then converted to a concrete TensorFlow function using "
"`f.get_concrete_function(...)`.").format(signature_function))
concrete_signatures[serving_key] = signature_function
return concrete_signatures
def _is_flat(sequence):
sequence_flat = nest.flatten(sequence)
try:
nest.assert_same_structure(sequence_flat, sequence)
return True
except ValueError:
return False
except TypeError:
return False
def _normalize_outputs(outputs, function_name, signature_key):
"""Construct an output dictionary from unnormalized function outputs."""
if isinstance(outputs, collections.Mapping):
for key, value in outputs.items():
if not isinstance(value, ops.Tensor):
raise ValueError(
("Got a dictionary containing non-Tensor value {} for key {} "
"in the output of the function {} used to generate a SavedModel "
"signature. Dictionaries outputs for functions used as signatures "
"should have one Tensor output per string key.")
.format(value, key, compat.as_str_any(function_name)))
return outputs
else:
original_outputs = outputs
if not isinstance(outputs, collections.Sequence):
outputs = [outputs]
if not _is_flat(outputs):
raise ValueError(
("Got non-flat outputs '{}' from '{}' for SavedModel "
"signature '{}'. Signatures have one Tensor per output, so "
"to have predictable names Python functions used to generate "
"these signatures should avoid outputting Tensors in nested "
"structures.")
.format(original_outputs, function_name, signature_key))
return {("output_{}".format(output_index)): output
for output_index, output
in enumerate(outputs)}
def _tensor_dict_to_tensorinfo(tensor_dict):
return {key: utils_impl.build_tensor_info(value)
for key, value in tensor_dict.items()}
def _map_captures_to_created_tensors(
original_captures, resource_map):
"""Maps eager tensors captured by a function to Graph resources for export.
Args:
original_captures: A dictionary mapping from tensors captured by the
function to interior placeholders for those tensors (inside the function
body).
resource_map: A dictionary mapping from resource tensors owned by the eager
context to resource tensors in the exported graph.
Returns:
A list of stand-in tensors which belong to the exported graph, corresponding
to the function's captures.
Raises:
AssertionError: If the function references a resource which is not part of
`resource_map`.
"""
export_captures = []
for exterior, interior in original_captures.items():
mapped_resource = resource_map.get(exterior, None)
if mapped_resource is None:
if exterior.dtype == dtypes.resource:
raise AssertionError(
("Tried to export a function which references untracked stateful "
"object {}. Stateful TensorFlow objects (e.g. tf.Variable) must "
"be tracked by the main object. Objects may be tracked by "
"assigning them to an attribute of another tracked object, or to "
"an attribute of the main object directly.")
.format(interior))
else:
# This is a captured Tensor, but it's not a resource. We'll just add it
# to the graph as a constant.
mapped_resource = constant_op.constant(exterior.numpy())
export_captures.append(mapped_resource)
return export_captures
def _map_function_arguments_to_created_inputs(
function_arguments, signature_key, function_name):
"""Creates exterior placeholders in the exported graph for function arguments.
Functions have two types of inputs: tensors captured from the outside (eager)
context, and arguments to the function which we expect to receive from the
user at each call. `_map_captures_to_created_tensors` replaces
captured tensors with stand-ins (typically these are resource dtype tensors
associated with variables). `_map_function_inputs_to_created_inputs` runs over
every argument, creating a new placeholder for each which will belong to the
exported graph rather than the function body.
Args:
function_arguments: A list of argument placeholders in the function body.
signature_key: The name of the signature being exported, for error messages.
function_name: The name of the function, for error messages.
Returns:
A tuple of (mapped_inputs, exterior_placeholders)
mapped_inputs: A list with entries corresponding to `function_arguments`
containing all of the inputs of the function gathered from the exported
graph (both captured resources and arguments).
exterior_argument_placeholders: A dictionary mapping from argument names
to placeholders in the exported graph, containing the explicit arguments
to the function which a user is expected to provide.
Raises:
ValueError: If argument names are not unique.
"""
# `exterior_argument_placeholders` holds placeholders which are outside the
# function body, directly contained in a MetaGraph of the SavedModel. The
# function body itself contains nearly identical placeholders used when
# running the function, but these exterior placeholders allow Session-based
# APIs to call the function using feeds and fetches which name Tensors in the
# MetaGraph.
exterior_argument_placeholders = {}
mapped_inputs = []
for placeholder in function_arguments:
# `export_captures` contains an exhaustive set of captures, so if we don't
# find the input there then we now know we have an argument.
user_input_name = compat.as_str_any(
placeholder.op.get_attr("_user_specified_name"))
# If the internal placeholders for a function have names which were
# uniquified by TensorFlow, then a single user-specified argument name
# must refer to multiple Tensors. The resulting signatures would be
# confusing to call. Instead, we throw an exception telling the user to
# specify explicit names.
if user_input_name != placeholder.op.name:
# This should be unreachable, since concrete functions may not be
# generated with non-unique argument names.
raise ValueError(
("Got non-flat/non-unique argument names for SavedModel "
"signature '{}': more than one argument to '{}' was named '{}'. "
"Signatures have one Tensor per named input, so to have "
"predictable names Python functions used to generate these "
"signatures should avoid *args and Tensors in nested "
"structures unless unique names are specified for each. Use "
"tf.TensorSpec(..., name=...) to provide a name for a Tensor "
"input.")
.format(signature_key, compat.as_str_any(function_name),
user_input_name))
arg_placeholder = array_ops.placeholder(
shape=placeholder.shape,
dtype=placeholder.dtype,
name="{}_{}".format(signature_key, user_input_name))
exterior_argument_placeholders[user_input_name] = arg_placeholder
mapped_inputs.append(arg_placeholder)
return mapped_inputs, exterior_argument_placeholders
def _call_function_with_mapped_captures(function, args, resource_map):
"""Calls `function` in the exported graph, using mapped resource captures."""
export_captures = _map_captures_to_created_tensors(
function.graph.captures, resource_map)
mapped_inputs = args + export_captures
# Calls the function quite directly, since we have new captured resource
# tensors we need to feed in which weren't part of the original function
# definition.
# pylint: disable=protected-access
outputs = function._build_call_outputs(
function._inference_function.call(context.context(), mapped_inputs))
return outputs
def _generate_signatures(signature_functions, resource_map):
"""Validates and calls `signature_functions` in the default graph.
Args:
signature_functions: A dictionary mapping string keys to concrete TensorFlow
functions (e.g. from `_canonicalize_signatures`) which will be used to
generate SignatureDefs.
resource_map: A dictionary mapping from resource tensors in the eager
context to resource tensors in the Graph being exported. This dictionary
is used to re-bind resources captured by functions to tensors which will
exist in the SavedModel.
Returns:
Each function in the `signature_functions` dictionary is called with
placeholder Tensors, generating a function call operation and output
Tensors. The placeholder Tensors, the function call operation, and the
output Tensors from the function call are part of the default Graph.
This function then returns a dictionary with the same structure as
`signature_functions`, with the concrete functions replaced by SignatureDefs
implicitly containing information about how to call each function from a
TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference
the generated placeholders and Tensor outputs by name.
The caller is expected to include the default Graph set while calling this
function as a MetaGraph in a SavedModel, including the returned
SignatureDefs as part of that MetaGraph.
"""
signatures = {}
for signature_key, function in sorted(signature_functions.items()):
if function.graph.captures:
argument_inputs = function.graph.inputs[:-len(function.graph.captures)]
else:
argument_inputs = function.graph.inputs
mapped_inputs, exterior_argument_placeholders = (
_map_function_arguments_to_created_inputs(
argument_inputs, signature_key, function.name))
outputs = _normalize_outputs(
_call_function_with_mapped_captures(
function, mapped_inputs, resource_map),
function.name, signature_key)
signatures[signature_key] = signature_def_utils.build_signature_def(
_tensor_dict_to_tensorinfo(exterior_argument_placeholders),
_tensor_dict_to_tensorinfo(outputs))
return signatures
def _trace_resource_initializers(accessible_objects):
"""Create concrete functions from `TrackableResource` objects."""
resource_initializers = []
def _wrap_initializer(obj):
obj.initialize()
return constant_op.constant(1.) # Dummy control output
for obj in accessible_objects:
if isinstance(obj, tracking.TrackableResource):
resource_initializers.append(def_function.function(
functools.partial(_wrap_initializer, obj),
# All inputs are captures.
input_signature=[]).get_concrete_function())
return resource_initializers
_AssetInfo = collections.namedtuple(
"_AssetInfo", [
# List of AssetFileDef protocol buffers
"asset_defs",
# Map from asset variable resource Tensors to their init ops
"asset_initializers_by_resource",
# Map from base asset filenames to full paths
"asset_filename_map",
# Map from TrackableAsset to index of corresponding AssetFileDef
"asset_index"])
def _process_asset(trackable_asset, asset_info, resource_map):
"""Add `trackable_asset` to `asset_info` and `resource_map`."""
original_variable = trackable_asset.asset_path
with context.eager_mode():
original_path = original_variable.numpy()
path = builder_impl.get_asset_filename_to_add(
asset_filepath=original_path,
asset_filename_map=asset_info.asset_filename_map)
# TODO(andresp): Instead of mapping 1-1 between trackable asset
# and asset in the graph def consider deduping the assets that
# point to the same file.
asset_path_initializer = array_ops.placeholder(
shape=original_variable.shape,
dtype=dtypes.string,
name="asset_path_initializer")
asset_variable = resource_variable_ops.ResourceVariable(
asset_path_initializer)
asset_info.asset_filename_map[path] = original_path
asset_def = meta_graph_pb2.AssetFileDef()
asset_def.filename = path
asset_def.tensor_info.name = asset_path_initializer.name
asset_info.asset_defs.append(asset_def)
asset_info.asset_initializers_by_resource[original_variable.handle] = (
asset_variable.initializer)
asset_info.asset_index[trackable_asset] = len(asset_info.asset_defs) - 1
resource_map[original_variable.handle] = asset_variable.handle
def _map_resources(accessible_objects):
"""Makes new resource handle ops corresponding to existing resource tensors.
Creates resource handle ops in the current default graph, whereas
`accessible_objects` will be from an eager context. Resource mapping adds
resource handle ops to the main GraphDef of a SavedModel, which allows the C++
loader API to interact with variables.
Args:
accessible_objects: A list of objects, some of which may contain resources,
to create replacements for.
Returns:
A tuple of (object_map, resource_map, asset_info):
object_map: A dictionary mapping from object in `accessible_objects` to
replacement objects created to hold the new resource tensors.
resource_map: A dictionary mapping from resource tensors extracted from
`accessible_objects` to newly created resource tensors.
asset_info: An _AssetInfo tuple describing external assets referenced from
accessible_objects.
"""
# TODO(allenl): Handle MirroredVariables and other types of variables which
# may need special casing.
object_map = {}
resource_map = {}
asset_info = _AssetInfo(
asset_defs=[],
asset_initializers_by_resource={},
asset_filename_map={},
asset_index={})
for obj in accessible_objects:
if isinstance(obj, tracking.TrackableResource):
new_resource = obj.create_resource()
resource_map[obj.resource_handle] = new_resource
elif resource_variable_ops.is_resource_variable(obj):
new_variable = resource_variable_ops.copy_to_graph_uninitialized(obj)
object_map[obj] = new_variable
resource_map[obj.handle] = new_variable.handle
elif isinstance(obj, tracking.TrackableAsset):
_process_asset(obj, asset_info, resource_map)
return object_map, resource_map, asset_info
def _fill_meta_graph_def(meta_graph_def, obj, signature_functions,
object_saver):
"""Generates a MetaGraph which calls `signature_functions`.
Args:
meta_graph_def: The MetaGraphDef proto to fill.
obj: The checkpointable object being exported.
signature_functions: A dictionary mapping signature keys to concrete
functions containing signatures to add to the MetaGraph.
object_saver: A CheckpointableSaver to add to the MetaGraph.
Returns:
An _AssetInfo, which contains information to help creating the SavedModel.
"""
signatures = {}
# List objects from the eager context to make sure Optimizers give us the
# right Graph-dependent variables.
accessible_objects = util.list_objects(obj)
resource_initializer_functions = _trace_resource_initializers(
accessible_objects)
exported_graph = ops.Graph()
resource_initializer_ops = []
with exported_graph.as_default():
object_map, resource_map, asset_info = _map_resources(accessible_objects)
for resource_initializer_function in resource_initializer_functions:
asset_dependencies = []
for capture in resource_initializer_function.graph.external_captures:
asset_initializer = asset_info.asset_initializers_by_resource.get(
capture, None)
if asset_initializer is not None:
asset_dependencies.append(asset_initializer)
with ops.control_dependencies(asset_dependencies):
resource_initializer_ops.append(
_call_function_with_mapped_captures(
resource_initializer_function, [], resource_map))
with ops.control_dependencies(resource_initializer_ops):
init_op = control_flow_ops.no_op()
# Add the same op to the main_op collection and to the init_op
# signature. The collection is for compatibility with older loader APIs;
# only one will be executed.
meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(
init_op.name)
meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(
signature_def_utils.op_signature_def(
init_op, constants.INIT_OP_SIGNATURE_KEY))
# Saving an object-based checkpoint again gathers variables. We need to do the
# gathering from the eager context so Optimizers save the right set of
# variables, but want any operations associated with the save/restore to be in
# the exported graph (thus the `to_graph` argument).
saver = object_saver.freeze(object_map=object_map, to_graph=exported_graph)
# We must resolve the concrete function to add to MetaGraph while in eager
# mode.
concrete_functions = []
for accessible_object in accessible_objects:
for function in function_serialization.list_all_polymorphic_functions(
accessible_object).values():
concrete_functions.extend(
function_serialization.list_all_concrete_functions(function))
with exported_graph.as_default():
signatures = _generate_signatures(signature_functions, resource_map)
for concrete_function in concrete_functions:
concrete_function.add_to_graph()
saver_def = saver.to_proto()
meta_graph_def.saver_def.CopyFrom(saver_def)
graph_def = exported_graph.as_graph_def(add_shapes=True)
# Clean reference cycles so repeated export()s don't make work for the garbage
# collector.
ops.dismantle_graph(exported_graph)
meta_graph_def.graph_def.CopyFrom(graph_def)
meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING)
meta_graph_def.asset_file_def.extend(asset_info.asset_defs)
for signature_key, signature in signatures.items():
meta_graph_def.signature_def[signature_key].CopyFrom(signature)
meta_graph.strip_graph_default_valued_attrs(meta_graph_def)
return asset_info
def _write_object_graph(root, export_dir, asset_file_def_index):
"""Save a SavedObjectGraph proto for `root`."""
# SavedObjectGraph is similar to the CheckpointableObjectGraph proto in the
# checkpoint. It will eventually go into the SavedModel.
proto = saved_object_graph_pb2.SavedObjectGraph()
checkpointable_objects, node_ids, slot_variables = util.find_objects(root)
util.fill_object_graph_proto(checkpointable_objects, node_ids, slot_variables,
proto)
for obj, obj_proto in zip(checkpointable_objects, proto.nodes):
_write_object_proto(obj, obj_proto, asset_file_def_index)
function_serialization.add_polymorphic_functions_to_object_graph_proto(
checkpointable_objects, proto)
extra_asset_dir = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.EXTRA_ASSETS_DIRECTORY))
file_io.recursive_create_dir(extra_asset_dir)
object_graph_filename = os.path.join(
extra_asset_dir, compat.as_bytes("object_graph.pb"))
file_io.write_string_to_file(object_graph_filename, proto.SerializeToString())
def _write_object_proto(obj, proto, asset_file_def_index):
"""Saves an object into SavedObject proto."""
if isinstance(obj, tracking.TrackableAsset):
proto.asset.SetInParent()
proto.asset.asset_file_def_index = asset_file_def_index[obj]
else:
proto.user_object.SetInParent()
@tf_export("saved_model.save", v1=["saved_model.experimental.save"])
def save(obj, export_dir, signatures=None):
# pylint: disable=line-too-long
"""Exports the Checkpointable object `obj` to [SavedModel format](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md).
Example usage:
```python
class Adder(tf.train.Checkpoint):
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def add(self, x):
return x + x + 1.
to_export = Adder()
tf.saved_model.save(to_export, '/tmp/adder')
```
The resulting SavedModel is then servable with an input named "x", its value
having any shape and dtype float32.
The optional `signatures` argument controls which methods in `obj` will be
available to programs which consume `SavedModel`s, for example serving
APIs. Python functions may be decorated with
`@tf.function(input_signature=...)` and passed as signatures directly, or
lazily with a call to `get_concrete_function` on the method decorated with
`@tf.function`.
If the `signatures` argument is omitted, `obj` will be searched for
`@tf.function`-decorated methods. If exactly one `@tf.function` is found, that
method will be used as the default signature for the SavedModel. This behavior
is expected to change in the future, when a corresponding
`tf.saved_model.load` symbol is added. At that point signatures will be
completely optional, and any `@tf.function` attached to `obj` or its
dependencies will be exported for use with `load`.
When invoking a signature in an exported SavedModel, `Tensor` arguments are
identified by name. These names will come from the Python function's argument
names by default. They may be overridden by specifying a `name=...` argument
in the corresponding `tf.TensorSpec` object. Explicit naming is required if
multiple `Tensor`s are passed through a single argument to the Python
function.
The outputs of functions used as `signatures` must either be flat lists, in
which case outputs will be numbered, or a dictionary mapping string keys to
`Tensor`, in which case the keys will be used to name outputs.
Since `tf.keras.Model` objects are also Checkpointable, this function can be
used to export Keras models. For example, exporting with a signature
specified:
```python
class Model(tf.keras.Model):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
def serve(self, serialized):
...
m = Model()
tf.saved_model.save(m, '/tmp/saved_model/')
```
Exporting from a function without a fixed signature:
```python
class Model(tf.keras.Model):
@tf.function
def call(self, x):
...
m = Model()
tf.saved_model.save(
m, '/tmp/saved_model/',
signatures=m.call.get_concrete_function(
tf.TensorSpec(shape=[None, 3], dtype=tf.float32, name="inp")))
```
`tf.keras.Model` instances constructed from inputs and outputs already have a
signature and so do not require a `@tf.function` decorator or a `signatures`
argument. If neither are specified, the model's forward pass is exported.
```python
x = input_layer.Input((4,), name="x")
y = core.Dense(5, name="out")(x)
model = training.Model(x, y)
tf.saved_model.save(model, '/tmp/saved_model/')
# The exported SavedModel takes "x" with shape [None, 4] and returns "out"
# with shape [None, 5]
```
Variables must be tracked by assigning them to an attribute of a tracked
object or to an attribute of `obj` directly. TensorFlow objects (e.g. layers
from `tf.keras.layers`, optimizers from `tf.train`) track their variables
automatically. This is the same tracking scheme that `tf.train.Checkpoint`
uses, and an exported `Checkpoint` object may be restored as a training
checkpoint by pointing `tf.train.Checkpoint.restore` to the SavedModel's
"variables/" subdirectory. Currently variables are the only stateful objects
supported by `tf.saved_model.save`, but others (e.g. tables) will be supported
in the future.
`tf.function` does not hard-code device annotations from outside the function
body, instead using the calling context's device. This means for example that
exporting a model which runs on a GPU and serving it on a CPU will generally
work, with some exceptions. `tf.device` annotations inside the body of the
function will be hard-coded in the exported model; this type of annotation is
discouraged. Device-specific operations, e.g. with "cuDNN" in the name or with
device-specific layouts, may cause issues. Currently a `DistributionStrategy`
is another exception: active distribution strategies will cause device
placements to be hard-coded in a function. Exporting a single-device
computation and importing under a `DistributionStrategy` is not currently
supported, but may be in the future.
SavedModels exported with `tf.saved_model.save` [strip default-valued
attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes)
automatically, which removes one source of incompatibilities when the consumer
of a SavedModel is running an older TensorFlow version than the
producer. There are however other sources of incompatibilities which are not
handled automatically, such as when the exported model contains operations
which the consumer does not have definitions for.
The current implementation of `tf.saved_model.save` targets serving use-cases,
but omits information which will be necessary for the planned future
implementation of `tf.saved_model.load`. Exported models using the current
`save` implementation, and other existing SavedModels, will not be compatible
with `tf.saved_model.load` when it is implemented. Further, `save` will in the
future attempt to export `@tf.function`-decorated methods which it does not
currently inspect, so some objects which are exportable today will raise
exceptions on export in the future (e.g. due to complex/non-serializable
default arguments). Such backwards-incompatible API changes are expected only
prior to the TensorFlow 2.0 release.
Args:
obj: A checkpointable object to export.
export_dir: A directory in which to write the SavedModel.
signatures: Optional, either a `tf.function` with an input signature
specified or the result of `f.get_concrete_function` on a
`@tf.function`-decorated function `f`, in which case `f` will be used to
generate a signature for the SavedModel under the default serving
signature key. `signatures` may also be a dictionary, in which case it
maps from signature keys to either `tf.function` instances with input
signatures or concrete functions. The keys of such a dictionary may be
arbitrary strings, but will typically be from the
`tf.saved_model.signature_constants` module.
Raises:
ValueError: If `obj` is not checkpointable.
@compatibility(eager)
Not supported when graph building. From TensorFlow 1.x,
`tf.enable_eager_execution()` must run first. May not be called from within a
function body.
@end_compatibility
"""
if not context.executing_eagerly():
with ops.init_scope():
if context.executing_eagerly():
raise AssertionError(
"tf.saved_model.save is not supported inside a traced "
"@tf.function. Move the call to the outer eagerly-executed "
"context.")
else:
raise AssertionError(
"tf.saved_model.save is not supported when graph building. "
"tf.enable_eager_execution() must run first when calling it from "
"TensorFlow 1.x.")
# pylint: enable=line-too-long
if not isinstance(obj, base.CheckpointableBase):
raise ValueError(
"Expected a Checkpointable object for export, got {}.".format(obj))
if signatures is None:
# Note that we run this before saving the checkpoint, since looping over
# attributes may have the side effect of creating variables in some cases.
signatures = _find_function_to_export(obj)
signatures = _canonicalize_signatures(signatures)
# TODO(allenl): Factor out some subset of SavedModelBuilder which is 2.x
# compatible (no sessions) and share it with this export API rather than
# making a SavedModel proto and writing it directly.
saved_model = saved_model_pb2.SavedModel()
meta_graph_def = saved_model.meta_graphs.add()
object_saver = util.CheckpointableSaver(obj)
asset_info = _fill_meta_graph_def(
meta_graph_def, obj, signatures, object_saver)
saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
# So far we've just been generating protocol buffers with no I/O. Now we write
# the checkpoint, copy assets into the assets directory, and write out the
# SavedModel proto itself.
utils_impl.get_or_create_variables_dir(export_dir)
object_saver.save(utils_impl.get_variables_path(export_dir))
builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map,
export_dir)
path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(path, saved_model.SerializeToString())
_write_object_graph(obj, export_dir, asset_info.asset_index)
| apache-2.0 | -1,015,136,350,667,706,800 | 45.010738 | 162 | 0.722212 | false | 4.222989 | false | false | false |
stephane-caron/ijhr-2016 | pymanoid/cone.py | 1 | 2305 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Stephane Caron <[email protected]>
#
# This file is part of pymanoid.
#
# pymanoid is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanoid is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# pymanoid. If not, see <http://www.gnu.org/licenses/>.
from cdd import Matrix, Polyhedron, RepType
from numpy import array, hstack, zeros
NUMBER_TYPE = 'float' # 'float' or 'fraction'
class ConeException(Exception):
def __init__(self, M):
self.M = M
class NotConeFace(ConeException):
def __str__(self):
return "Matrix is not a cone face"
class NotConeSpan(ConeException):
def __str__(self):
return "Matrix is not a cone span"
def face_of_span(S):
"""
Returns the face matrix S^F of the span matrix S,
that is, a matrix such that
{x = S z, z >= 0} if and only if {S^F x <= 0}.
"""
V = hstack([zeros((S.shape[1], 1)), S.T])
# V-representation: first column is 0 for rays
V_cdd = Matrix(V, number_type=NUMBER_TYPE)
V_cdd.rep_type = RepType.GENERATOR
P = Polyhedron(V_cdd)
H = array(P.get_inequalities())
b, A = H[:, 0], H[:, 1:]
for i in xrange(H.shape[0]):
if b[i] != 0:
raise NotConeSpan(S)
return -A
def span_of_face(F):
"""
Compute the span matrix F^S of the face matrix F,
that is, a matrix such that
{F x <= 0} if and only if {x = F^S z, z >= 0}.
"""
b, A = zeros((F.shape[0], 1)), -F
# H-representation: A x + b >= 0
F_cdd = Matrix(hstack([b, A]), number_type=NUMBER_TYPE)
F_cdd.rep_type = RepType.INEQUALITY
P = Polyhedron(F_cdd)
V = array(P.get_generators())
for i in xrange(V.shape[0]):
if V[i, 0] != 0: # 1 = vertex, 0 = ray
raise NotConeFace(F)
return V[:, 1:]
| gpl-3.0 | 8,395,834,680,207,245,000 | 25.802326 | 79 | 0.625163 | false | 3.153215 | false | false | false |
cs98jrb/Trinity | mysite/events/forms/booking.py | 1 | 2961 | __author__ = 'james'
from django.utils.translation import ugettext as _
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate
from events.models import Booking
from orders.models import Order, OrderItem
class BookingForm(forms.ModelForm):
# set the css of required fields
required_css_class = 'required'
email = forms.EmailField(
max_length=254,
label="Contact email",
required=True,
help_text="This is required so we can contact you."
)
tandc = forms.BooleanField(
label="Accept terms and conditions",
required=True,
)
def __init__(self, request, *args, **kwargs):
booking = super(BookingForm, self).__init__(*args, **kwargs)
# add label
self.fields['quantity'].label = "Number of people"
try:
if not request.user.is_anonymous():
self.fields['email'].initial = request.user.email
except User.DoesNotExist:
pass
class Meta:
model = Booking
fields = ['email', 'quantity', ]
def save(self, event, price, user, commit=True):
from django.contrib.contenttypes.models import ContentType
#
booking = super(BookingForm, self).save(commit=False)
booking.booked_by = user
booking.event = event
booking.price = price
total_booked = 0
open_order_list = Order.objects.open_order(user=user)
if open_order_list:
order = open_order_list[0]
for item in order.orderitem_set.all():
total_booked += item.content_object.quantity
if not(event.pricing_set.all().filter(online_book=True)
and not event.fully_booked):
raise ValidationError(
_('This event is fully booked'),
code='Fully Booked'
)
commit = False
elif event.num_spaces < (booking.quantity + total_booked):
places = booking.quantity + total_booked
raise ValidationError(
_('Not enough spaces for %(places)s people.'),
code='No Space',
params={'places': places},
)
commit = False
if commit:
booking.save()
# Add to open order
if not open_order_list:
order = Order(ordered_by=user)
order.save()
order_item = OrderItem(
order=order,
description=event.__unicode__(),
value=(price.value*booking.quantity),
vat=price.vat,
content_type=ContentType.objects.get_for_model(booking),
object_id=booking.id
)
order_item.save()
return booking
def clean(self):
return self.cleaned_data | gpl-2.0 | -287,810,376,454,806,340 | 28.039216 | 72 | 0.570078 | false | 4.367257 | false | false | false |
Midnighter/pyorganism | setup.py | 1 | 2511 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
==================
PyOrganism Package
==================
:Authors:
Moritz Emanuel Beber
:Date:
2012-05-22
:Copyright:
Copyright(c) 2012 Jacobs University of Bremen. All rights reserved.
:File:
setup.py
"""
import sys
from os.path import join
from setuptools import (setup, Extension)
try:
from Cython.Distutils import build_ext
except ImportError as err:
sys.exit("Apologies, you need 'Cython' to install 'pyorganism'.")
if __name__ == "__main__":
# continuous
sources = ["continuous_wrapper.pyx", "continuous.c"]
c_path = join("pyorganism", "regulation", "src")
continuous = Extension("pyorganism.regulation.continuous_wrapper",
sources=[join(c_path, src) for src in sources],
include_dirs=[c_path]
)
setup(
name="pyorganism",
version="0.2.5",
license="BSD",
description="analyze organisational principles in living organisms",
author="Moritz Emanuel Beber",
author_email="moritz (dot) beber (at) gmail (dot) com",
url="http://github.com/Midnighter/pyorganism",
zip_safe=False,
install_requires=[
"future",
"networkx",
"numpy",
"pandas"
],
packages=["pyorganism",
"pyorganism.io",
"pyorganism.metabolism",
"pyorganism.regulation",
],
# package_data = {"pyorganism": ["data/*.xml", "data/*.txt", "data/*.tsv"]},
ext_modules=[continuous],
cmdclass={"build_ext": build_ext},
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
)
| bsd-3-clause | 7,473,944,302,607,792,000 | 29.621951 | 94 | 0.562724 | false | 3.966825 | false | false | false |
abice-sbr/adaptsearch | blastalign.py | 1 | 4394 | import string, re
# Written by Robert Belshaw (School of Biomedical & Healthcare Sciences, University of Plymouth) & Aris Katzourakis (Department of Zoology, University of Oxford)
# For more information and to cite see Belshaw, R & Katzourakis, A (2005) BlastAlign: a program that uses blast to align problematic nucleotide sequences. Bioinformatics 21:122-123.
# Please send any comments to [email protected] or [email protected]
file = open('blast_out', 'r')
buffer = file.readlines()
def Calculate_hits():
Number_of_landmarks = len(Permanent_dictionary[KeyList[0]]) # use legth of first entry
counter = 1
while counter < Number_of_landmarks: # Less than because list starts from zero
number_of_hits = 0
for item in KeyList:
list = Permanent_dictionary[item]
landmark = list[counter]
if landmark != '*':
number_of_hits = number_of_hits + 1
List_of_hits.append(number_of_hits)
counter = counter +1
return List_of_hits
def doInsertRoutine(list, value):
no_ast = 0
old_diff = 0
switch = 0
for item in list:
if item == '*':
no_ast = no_ast+1
else:
new_diff = (item - value)*(item - value)
if item < value:
no_ast = 0
else:
i = list.index(item)
if new_diff > old_diff:
i = i-no_ast
list.insert(i, value)
else:
list.insert(i, value)
switch = 1
break
old_diff = new_diff
if switch == 0:
no_ast = 0
for item in list:
if item == '*':
no_ast = no_ast+1
else:
no_ast = 0
i = len(list) - no_ast # Finds position before any trailing asterisks
list.insert(i, value)
return list, i
def go_through_Library(Library_dictionary, tempKey, LandmarkInsertPos):
tempKeyList = []
for item in KeyList:
tempKeyList.append(item)
tempKeyList.remove(tempKey)
for item in tempKeyList:
tempList = []
for subitem in Permanent_dictionary[item]:
tempList.append(subitem)
if Library_dictionary.has_key(item):
tempList.insert(LandmarkInsertPos, Library_dictionary[item])
Permanent_dictionary[item] = tempList
else:
tempList.insert(LandmarkInsertPos, '*')
Permanent_dictionary[item] = tempList
def process_previous_block(tempKey, tempValue, Library_dictionary):
landmark = 0
tempList = []
for item in (Permanent_dictionary[tempKey]):
tempList.append(item)
for item in (Permanent_dictionary[tempKey]):
if item != '*':
if (tempValue >= item-30) and (tempValue <= item+30):
landmark = 1
else:
pass
if landmark == 0:
theAnswer = doInsertRoutine(tempList, tempValue)
tempList = theAnswer[0]
LandmarkInsertPos = theAnswer[1]
Permanent_dictionary[tempKey] = tempList
go_through_Library(Library_dictionary, tempKey, LandmarkInsertPos)
def makeOutFile():
theOutFile = open('blast_out_python', 'w')
theOutFile.write('\t\t') # Just to line up entries for ease of viewing
for item in List_of_hits:
theOutFile.write('%s\t' %item)
theOutFile.write('\n')
for item in KeyList:
theOutFile.write('%s\t' %item)
for listItem in Permanent_dictionary[item]:
theOutFile.write('%s\t' %listItem)
theOutFile.write('\n')
Query_dictionary = {}
Library_dictionary = {}
Permanent_dictionary = {}
KeyList = []
list = [0]
List_of_hits = [] # To note whether entries are unique or not
for line in buffer:
if line[0] == '*':
entry = ""
entry = line[1:-1]
Permanent_dictionary[entry] = list
KeyList.append(entry)
n=0
previousKey = "null" # Need in case have identical sequences & then need to avoid unassigned variable
for line in buffer:
tempList = []
if line[0:5] == 'Query':
if n >= 1:
process_previous_block(QueryKey, QueryValue, Library_dictionary)
Library_dictionary = {}
line = string.split(line)
QueryKey = (line[0])[5:]
QueryValue = string.atoi(line[1])
Query_dictionary[QueryKey] = QueryValue
n=n+1
elif line[0:7] == 'Library':
line = string.split(line)
LibraryKey = (line[0])[7:]
LibraryValue = string.atoi(line[1])
if LibraryKey != QueryKey:
if previousKey == LibraryKey:
previousDist = (previousValue-QueryValue)*(previousValue-QueryValue)
currentDist = (LibraryValue-QueryValue)*(LibraryValue-QueryValue)
if currentDist < previousDist:
Library_dictionary[LibraryKey] = LibraryValue
else:
Library_dictionary[LibraryKey] = LibraryValue
previousKey = (line[0])[7:]
previousValue = string.atoi(line[1])
Calculate_hits()
makeOutFile()
| gpl-3.0 | 5,974,816,698,575,030,000 | 28.891156 | 181 | 0.69117 | false | 2.913793 | false | false | false |
chrismamil/chowda | test/test_chowda.py | 1 | 2201 | import unittest
import os
import chowda.parsing as parse
import datetime
import pandas as pd
from chowda.load import load_file
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
TEST_FILE = "CTL1 wk3 exp1 RAW data.txt"
TEST_1 = os.path.join(DATA_DIR, TEST_FILE)
class TestChowda(unittest.TestCase):
def setup(self):
test_file = os.path.join(DATA_DIR, TEST_FILE)
with open(test_file) as in_handle:
self.in_data = in_handle.readlines()
def test_parse_experiment_time(self):
result = parse.parse_experiment_time(self.in_data[0])
self.assertEquals(result.keys()[0], "Experiment Started")
def test_parse_subject(self):
result = parse.parse_subject(self.in_data[1])
self.assertEquals(result["Subject"], "CNS1")
def test_parse_mass(self):
result = parse.parse_subject_mass(self.in_data[2])
self.assertEquals(result["Subject Mass"], 34.26)
def test_load_file(self):
from chowda.load import load_file
result = load_file(TEST_1)
self.assertEquals(result[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
def test_get_header(self):
from chowda.load import get_header
result = get_header(TEST_1)
self.assertEquals(result[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
self.assertEquals(result[-1].split(",")[0].strip(), '"========"')
def test_get_data(self):
from chowda.load import get_data
result = get_data(TEST_1)
self.assertEquals(result[0].split(",", 1)[0], "Interval")
def test_partition_file(self):
from chowda.load import partition_file
header, data = partition_file(TEST_1)
self.assertEquals(header[0].strip(),
'"Oxymax Windows V 2.30 Data File"')
self.assertEquals(header[-1].split(",")[0].strip(), '"========"')
self.assertEquals(data[0].split(",", 1)[0], "Interval")
def test_load_dataframe(self):
from chowda.load import load_dataframe
result = load_dataframe(parse.get_data(self.in_data))
self.assertEquals(result["Interval"].ix[0], "001")
| mit | -1,276,954,630,114,679,000 | 35.081967 | 73 | 0.613358 | false | 3.510367 | true | false | false |
CDKGlobal/cd-performance-promotion | cd_perf_promotion/engines/comparisonengine.py | 1 | 19434 | import json
import operator
class ComparisonEngine:
"""
Queries the performance tools' APIs and determines if the build passes
the target requirements.
"""
def check_health_severity(self, violation):
"""
Fails the build if the defined severity is found in the health rule
violations
Keyword arguments:
violation - dictionary that contains all of the information for a single
violation (as determined by AppDynamics)
"""
# Add the violation to the output file after removing unecessary data
self.output_json["appdynamics"]["healthrule_violations"].append(violation)
# Fail the build
self.output_json["promotion_gates"]["appdynamics_health"] = False
self.build_status_passed = False
def compare_appdynamics(self, healthrule_violations, warning, critical):
"""
Performs the comparison between the defined violation severity settings
and the violations that occurred
Keyword arguments:
healthrule_violations - Dictionary that contains all of the AppDynamics
health violations
warning - Boolean that indicates whether the user thinks
that health rule violations with a status of
"WARNING" are important enough to evaluate
critical - Boolean that indicates whether the user thinks
that health rule violations with a status of
"CRITICAL" are important enough to evaluate
"""
# Set the health to True by default and flip it if necessary
self.output_json["promotion_gates"]["appdynamics_health"] = True
for violation in healthrule_violations:
# Check if the severity settings that we care about exist in the health rule violations
if ((warning == True) and (violation["severity"] == "WARNING")):
self.check_health_severity(violation)
if ((critical == True) and (violation["severity"] == "CRITICAL")):
self.check_health_severity(violation)
def compare_blazemeter(self, metric_title, target_data, metric_data, transaction_index, operator):
"""
Performs the comparison between configuration promotion gates and the
actual blazemeter test data
Keyword arguments:
metric_title - String title that indicates the data item that is being
evaluated
target_data - Number that indicates the cutoff point for the specific
metric as determined by the user in the config
metric_data - The actual performance data number that is compared
against
transaction_index - The index of the transaction in the list of
transactions
operator - <, >, <=, >, == which is used to compare the real
data against the config
"""
if (target_data > 0):
# Metric is set in config, begin comparison
# Add the data to the output file
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title] = metric_data
# Get the "passed" JSON key name ready
metric_title_passed = metric_title + "_passed"
# Determine if promotion gate was met
# Uses the operator module so that the process_performance_data function can determine
# what operator (<, >, <=, >=, etc.) should be used
if operator(metric_data, target_data):
# Success
if metric_title_passed not in self.output_json["promotion_gates"]:
# Not mentioned before, add it in
# Not necessary to make the overall status True again if it's True
# and if it was False for one transaction the overall status should still be False
self.output_json["promotion_gates"][metric_title_passed] = True
# Regardless, add it into the transaction data
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title_passed] = True
else:
# Failure
self.output_json["promotion_gates"][metric_title_passed] = False
self.output_json["blazemeter"]["transactions"][transaction_index][metric_title_passed] = False
self.build_status_passed = False
def compare_webpagetest(self, metric_title, target_data, metric_data, run_index, view, operator):
"""
Performs the comparison between configuration promotion gates and the
actual WebPageTest test data
Keyword arguments:
metric_title - String title that indicates the data item that is being
evaluated
target_data - Number that indicates the cutoff point for the specific
metric as determined by the user in the config
metric_data - The actual performance data number that is compared
against
view - Either first_view or repeat_view
operator - <, >, <=, >, == which is used to compare the real
data against the config
"""
if (target_data > 0):
# Metric is set in config, begin comparison
# Convert the metric data to an int (WebPageTest's XML output makes everything a string)
metric_data = int(metric_data)
# Add the data to the output file
if (run_index == None):
# Data from the averages section
self.output_json["webpagetest"]["average"][view][metric_title] = metric_data
else:
# Data from the runs section
self.output_json["webpagetest"]["runs"][run_index][view][metric_title] = metric_data
# Get the "passed" JSON key name ready
metric_title_passed = metric_title + "_passed"
# Determine if promotion gate was met
# Uses the operator module so that the process_performance_data function can determine
# what operator (<, >, <=, >=, etc.) should be used
if operator(metric_data, target_data):
# Success
if metric_title_passed not in self.output_json["promotion_gates"]:
# Not mentioned before, add it in
# Not necessary to make the overall status True again if it's True
# and if it was False for one transaction the overall status should still be False
if ((metric_title_passed in self.output_json["promotion_gates"] and self.output_json["promotion_gates"][metric_title_passed] != False) or (metric_title_passed not in self.output_json["promotion_gates"])):
self.output_json["promotion_gates"][metric_title_passed] = True
# Regardless, add it into the transaction data
if (run_index == None):
self.output_json["webpagetest"]["average"][view][metric_title_passed] = True
else:
self.output_json["webpagetest"]["runs"][run_index][view][metric_title_passed] = True
else:
# Failure
self.output_json["promotion_gates"][metric_title_passed] = False
if (run_index == None):
self.output_json["webpagetest"]["average"][view][metric_title_passed] = False
else:
self.output_json["webpagetest"]["runs"][run_index][view][metric_title_passed] = False
self.build_status_passed = False
def process_data(self, config_data, perf_data):
"""
Determines if the build meets promotion gate criteria based off of the
information in the config file (retrieved by configengine) and the data
from the modules (retrieved by dataengine)
Keyword Arguments:
config_data - dictionary that contains all of the information retrieved
by the config engine
perf_data - dictionary that contains all of the information retrieved
by the data engine
"""
# Prepare the output file promotion gates section
self.output_json["promotion_gates"] = {}
# AppDynamics Module
if (config_data["appdynamics"]["exists"] == True):
# Check for AppDynamics Health Violations (only if the user cares)
if ((config_data["promotion_gates"]["warning"] == True) or (config_data["promotion_gates"]["critical"] == True)):
# Output something so that the user isn't confused, regardless of whether health violations were found
self.output_json["appdynamics"] = {"healthrule_violations": []}
if (perf_data["appdynamics"]["healthrule_violations"] != []):
# Uh-oh, there's something wrong with the build
self.compare_appdynamics(perf_data["appdynamics"]["healthrule_violations"], config_data["promotion_gates"]["warning"], config_data["promotion_gates"]["critical"])
else:
# No health violations, good to go!
self.output_json["promotion_gates"]["appdynamics_health"] = True
# BlazeMeter Module
if (config_data["blazemeter"]["exists"] == True):
# Compare BlazeMeter metrics
# Add BlazeMeter into the output file
self.output_json["blazemeter"] = {"transactions": []}
for index, transaction in enumerate(perf_data["blazemeter"]["transactions"]):
# Add transaction information into the output
self.output_json["blazemeter"]["transactions"].append({"transaction_id": transaction["transaction_id"], "transaction_name": transaction["transaction_name"]})
# Average Response Time
self.compare_blazemeter("response_time_avg", config_data["promotion_gates"]["response_time_avg"], transaction["response_time_avg"], index, operator.lt)
# Max Response Time
self.compare_blazemeter("response_time_max", config_data["promotion_gates"]["response_time_max"], transaction["response_time_max"], index, operator.lt)
# Response Time Geometric Mean
self.compare_blazemeter("response_time_geomean", config_data["promotion_gates"]["response_time_geomean"], transaction["response_time_geomean"], index, operator.lt)
# Response Time Standard Deviation
self.compare_blazemeter("response_time_stdev", config_data["promotion_gates"]["response_time_stdev"], transaction["response_time_stdev"], index, operator.lt)
# Response Time 90% Line
self.compare_blazemeter("response_time_tp90", config_data["promotion_gates"]["response_time_tp90"], transaction["response_time_tp90"], index, operator.lt)
# Response Time 95% Line
self.compare_blazemeter("response_time_tp95", config_data["promotion_gates"]["response_time_tp95"], transaction["response_time_tp95"], index, operator.lt)
# Response Time 99% Line
self.compare_blazemeter("response_time_tp99", config_data["promotion_gates"]["response_time_tp99"], transaction["response_time_tp99"], index, operator.lt)
# Maximum Latency
self.compare_blazemeter("latency_max", config_data["promotion_gates"]["latency_max"], transaction["latency_max"], index, operator.lt)
# Average Latency
self.compare_blazemeter("latency_avg", config_data["promotion_gates"]["latency_avg"], transaction["latency_avg"], index, operator.lt)
# Latency Standard Deviation
self.compare_blazemeter("latency_stdev", config_data["promotion_gates"]["latency_stdev"], transaction["latency_stdev"], index, operator.lt)
# Average Bandwidth
self.compare_blazemeter("bandwidth_avg", config_data["promotion_gates"]["bandwidth_avg"], transaction["bandwidth_avg"], index, operator.lt)
# Transaction Rate
self.compare_blazemeter("transaction_rate", config_data["promotion_gates"]["transaction_rate"], transaction["transaction_rate"], index, operator.gt)
# WebPageTest Module
if (config_data["webpagetest"]["exists"] == True):
# Compare WebPageTest metrics
# Add WebPageTest into the output file
self.output_json["webpagetest"] = {"average": {}, "runs": []}
# Keep track of the views for looping purposes
views = ["first_view", "repeat_view"]
# Make sure that we care about the data before processing it
if (("first_view" in config_data["promotion_gates"]) or ("repeat_view" in config_data["promotion_gates"])):
# Check out the averages for the runs
# This is less for failing the build and more for adding the data into the output file
for view in views:
if (view in config_data["promotion_gates"]):
# Set up average first_view
self.output_json["webpagetest"]["average"][view] = {}
# Speed Index (Average)
self.compare_webpagetest("speed_index", config_data["promotion_gates"][view]["speed_index"], perf_data["webpagetest"]["average"][view]["SpeedIndex"], None, view, operator.gt)
# Time to First Paint (Average)
self.compare_webpagetest("first_paint", config_data["promotion_gates"][view]["first_paint"], perf_data["webpagetest"]["average"][view]["firstPaint"], None, view, operator.lt)
# Time to First Byte (Average)
self.compare_webpagetest("first_byte", config_data["promotion_gates"][view]["first_byte"], perf_data["webpagetest"]["average"][view]["TTFB"], None, view, operator.lt)
# Time to Fully Loaded (Average)
self.compare_webpagetest("fully_loaded", config_data["promotion_gates"][view]["fully_loaded"], perf_data["webpagetest"]["average"][view]["fullyLoaded"], None, view, operator.lt)
# Time to Visual Complete (Average)
self.compare_webpagetest("visual_complete", config_data["promotion_gates"][view]["visual_complete"], perf_data["webpagetest"]["average"][view]["visualComplete"], None, view, operator.lt)
# Time to Start Render (Average)
self.compare_webpagetest("start_render", config_data["promotion_gates"][view]["start_render"], perf_data["webpagetest"]["average"][view]["render"], None, view, operator.lt)
# Time to Last Visual Change (Average)
self.compare_webpagetest("last_visual_change", config_data["promotion_gates"][view]["last_visual_change"], perf_data["webpagetest"]["average"][view]["lastVisualChange"], None, view, operator.lt)
# Time to <title></title> Tags Loaded
self.compare_webpagetest("title_time", config_data["promotion_gates"][view]["title_time"], perf_data["webpagetest"]["average"][view]["titleTime"], None, view, operator.lt)
# Page Size (Bytes In)
self.compare_webpagetest("page_size", config_data["promotion_gates"][view]["page_size"], perf_data["webpagetest"]["average"][view]["bytesIn"], None, view, operator.lt)
# Loop over all of the runs
# Most of the time there will likely be only one
for run_id, run in enumerate(perf_data["webpagetest"]["runs"]):
# Add transaction information into the output
self.output_json["webpagetest"]["runs"].append({"run_id": run["run_id"]})
# Loop over all of the views for each run
for view in views:
if (view in config_data["promotion_gates"]):
# Set up first_view for the run
self.output_json["webpagetest"]["runs"][run_id][view] = {}
# Speed Index
self.compare_webpagetest("speed_index", config_data["promotion_gates"][view]["speed_index"], perf_data["webpagetest"]["runs"][run_id][view]["SpeedIndex"], run_id, view, operator.gt)
# Time to First Paint
self.compare_webpagetest("first_paint", config_data["promotion_gates"][view]["first_paint"], perf_data["webpagetest"]["runs"][run_id][view]["firstPaint"], run_id, view, operator.lt)
# Time to First Byte
self.compare_webpagetest("first_byte", config_data["promotion_gates"][view]["first_byte"], perf_data["webpagetest"]["runs"][run_id][view]["TTFB"], run_id, view, operator.lt)
# Time to Fully Loaded
self.compare_webpagetest("fully_loaded", config_data["promotion_gates"][view]["fully_loaded"], perf_data["webpagetest"]["runs"][run_id][view]["fullyLoaded"], run_id, view, operator.lt)
# Time to Visual Complete
self.compare_webpagetest("visual_complete", config_data["promotion_gates"][view]["visual_complete"], perf_data["webpagetest"]["runs"][run_id][view]["visualComplete"], run_id, view, operator.lt)
# Time to Start Render
self.compare_webpagetest("start_render", config_data["promotion_gates"][view]["start_render"], perf_data["webpagetest"]["runs"][run_id][view]["render"], run_id, view, operator.lt)
# Time to Last Visual Change
self.compare_webpagetest("last_visual_change", config_data["promotion_gates"][view]["last_visual_change"], perf_data["webpagetest"]["runs"][run_id][view]["lastVisualChange"], run_id, view, operator.lt)
# Time to <title></title> Tags Loaded
self.compare_webpagetest("title_time", config_data["promotion_gates"][view]["title_time"], perf_data["webpagetest"]["runs"][run_id][view]["titleTime"], run_id, view, operator.lt)
# Page Size (Bytes In)
self.compare_webpagetest("page_size", config_data["promotion_gates"][view]["page_size"], perf_data["webpagetest"]["runs"][run_id][view]["bytesIn"], run_id, view, operator.lt)
# Set the overall status in the output JSON file
self.output_json["promotion_gates"]["passed"] = self.build_status_passed
# We're done!
print("Processed performance data")
return self.output_json
def __init__(self):
"""
Class starting point
"""
# Build Status
self.build_status_passed = True
# Output JSON report data
# Later appended by the AppDynamics and BlazeMeter processing functions
self.output_json = {}
| mit | -1,119,515,072,854,985,200 | 64.877966 | 229 | 0.591129 | false | 4.58241 | true | false | false |
Frky/scat | src/shell/memory/addrtable.py | 1 | 1331 | #-*- coding: utf-8 -*-
class AddrTable(object):
TABLE_SIZE = 10000
def __init__(self, dic=False):
self.__addr = list()
self.__dic = dic
for i in xrange(AddrTable.TABLE_SIZE):
if self.__dic:
self.__addr.append(dict())
else:
self.__addr.append(list())
self.__curr_key = None
self.__curr_addr = None
def contains(self, addr):
key = addr % AddrTable.TABLE_SIZE
if self.__dic:
return addr in self.__addr[key].keys()
else:
return addr in self.__addr[key]
def add(self, addr):
key = addr % AddrTable.TABLE_SIZE
if self.__dic:
self.__addr[key][addr] = list()
else:
self.__addr[key].append(addr)
def remove(self, addr):
key = addr % AddrTable.TABLE_SIZE
self.__addr[key].remove(addr)
def add_dic(self, addr, fid):
if not self.__dic:
raise Exception
key = addr % AddrTable.TABLE_SIZE
self.__addr[key][addr].append(fid)
def items(self):
for key in self.__addr:
if self.__dic:
for addr, call in key.items():
yield addr, call
else:
for addr in key:
yield addr
| mit | -7,471,110,895,551,581,000 | 26.163265 | 50 | 0.486852 | false | 3.996997 | false | false | false |
BenjaminSchubert/web-polls | backend/errors/http.py | 1 | 1737 | """
This module contains a collection of commonly encountered HTTP exceptions.
This allows all these http exceptions to be treated in the same way and simplifies the return of errors to the user.
"""
from errors import ErrorMessage
__author__ = "Benjamin Schubert <[email protected]>"
class BaseHTTPException(Exception):
"""
This is the base HTTP Exception.
It should not be used as is, as it signifies that the server had an unexpected error.
"""
status_code = 500 # type: int
def __init__(self, payload: ErrorMessage = None, status_code: int = None):
"""
Create a new `BaseHTTPException`.
:param payload: payload to send to explain the error to the user.
:param status_code: HTTP status code to send. If not given, will fallback to `self.status_code`.
"""
super().__init__(self)
if payload is None:
payload = dict()
self.payload = payload
if status_code is not None:
self.status_code = status_code
class ForbiddenException(BaseHTTPException):
def __init__(self):
super().__init__({}, 401)
class BadRequestException(BaseHTTPException):
"""This is an exception to throw to return a 400 BAD REQUEST to the user."""
def __init__(self, payload: ErrorMessage):
"""
Create a new `BadRequestException`.
:param payload: payload to send to explain the error to the user.
"""
super().__init__(payload, 400)
class NotFoundException(BaseHTTPException):
"""This is an exception to throw to return a 404 NOT FOUND to the user."""
def __init__(self):
"""Create a new `NotFoundException`."""
super().__init__(None, 404)
| mit | -7,086,277,074,543,223,000 | 27.95 | 116 | 0.639033 | false | 4.320896 | false | false | false |
ralbayaty/KaggleRetina | testing/censureHistCalc.py | 1 | 4517 | from skimage.feature import CENSURE
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
import numpy as np
import cv2
import sys
from PIL import Image, ImageDraw
def draw_keypoints(img, kp, scale):
draw = ImageDraw.Draw(img)
# Draw a maximum of 300 keypoints
for i in range(min(len(scale),300)):
x1 = kp[i,1]
y1 = kp[i,0]
x2 = kp[i,1]+2**scale[i]
y2 = kp[i,0]+2**scale[i]
coords = (x1, y1, x2, y2)
draw.ellipse(coords, fill = None, outline ='white')
if __name__ == '__main__':
try:
file_name = sys.argv[1]
except:
print("Didn't give me a file...")
file_name = "Lenna.png"
def nothing(*arg):
pass
# Create sliderbars to change the values of CENSURE parameters online
# Defaults: min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10
cv2.namedWindow('censure')
cv2.createTrackbar('min_scale', 'censure', 1, 10, nothing)
cv2.createTrackbar('max_scale', 'censure', 7, 20, nothing)
cv2.createTrackbar('mode', 'censure', 2, 2, nothing)
cv2.createTrackbar('non_max_threshold', 'censure', 6, 1000, nothing)
cv2.createTrackbar('line_threshold', 'censure', 10, 100, nothing)
# Read image from file, then inspect the image dimensions
img = cv2.imread(file_name,1)
height, width, channels = img.shape
# Pull the different color channels from the image
blue = img[:,:,0]
green = img[:,:,1]
red = img[:,:,2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Make a PIL image from each channel so we can use PIL.Iamge.thumbnail to resize if needed
blue1 = Image.fromarray(blue)
green1 = Image.fromarray(green)
red1 = Image.fromarray(red)
gray1 = Image.fromarray(gray)
# Check if dimensions are above desired, if so then resize keepig aspect ratio
m, n = 512, 512
if height > m or width > n:
blue1.thumbnail((m,n), Image.ANTIALIAS)
green1.thumbnail((m,n), Image.ANTIALIAS)
red1.thumbnail((m,n), Image.ANTIALIAS)
gray1.thumbnail((m,n), Image.ANTIALIAS)
# CENSURE related
mode_dict = {"0": "DoB", "1": "Octagon", "2": "STAR"}
last_num_kp = 0
while True:
vis = gray.copy()
img = img1.copy()
# Read the values of the sliderbars and save them to variables
min_scale = cv2.getTrackbarPos('min_scale', 'censure')
max_scale = cv2.getTrackbarPos('max_scale', 'censure')
if min_scale is 0:
min_scale = 1
if min_scale + max_scale < 3:
max_scale = min_scale + 2
mode = mode_dict[str(cv2.getTrackbarPos('mode', 'censure'))]
non_max_threshold = float(cv2.getTrackbarPos('non_max_threshold', 'censure'))/1000
line_threshold = cv2.getTrackbarPos('line_threshold', 'censure')
# Create a CENSURE feature detector
censure = CENSURE(min_scale=min_scale, max_scale=max_scale, mode=mode,
non_max_threshold=non_max_threshold, line_threshold=line_threshold)
# Obtain the CENSURE features
censure.detect(blue1)
kp_blue, scale_blue = censure.keypoints, censure.scales
censure.detect(green1)
kp_green, scale_green = censure.keypoints, censure.scales
censure.detect(red1)
kp_red, scale_red = censure.keypoints, censure.scales
censure.detect(gray1)
kp_gray, scale_gray = censure.keypoints, censure.scales
# Print the # of features if it has changed between iterations
num_kp = len(censure.keypoints)
if last_num_kp != num_kp:
print("Number of keypoints: " + str(len(censure.keypoints)))
last_num_kp = num_kp
# Draw the feature points on the images
draw_keypoints(blue1, kp_blue, scale_blue)
draw_keypoints(green1, kp_green, scale_green)
draw_keypoints(red1, kp_red, scale_red)
draw_keypoints(gray1, kp_gray, scale_gray)
# Obtain the histogram of scale values
plt.clf() # clear the figure from any previous plot
scale_hist, bin_edges = np.histogram(censure.scales,max_scale-min_scale, (min_scale,max_scale+1))
plt.bar(bin_edges[:-1]-0.5, scale_hist, width = 1)
plt.show(block=False)
plt.draw()
# Show the image with keypoints drawn over
image = cv2.cvtColor(np.asarray(img),cv2.COLOR_BGR2RGB)
cv2.imshow('censure', image)
if 0xFF & cv2.waitKey(500) == 27:
break
cv2.destroyAllWindows() | gpl-2.0 | 159,560,390,159,278,880 | 36.032787 | 102 | 0.629179 | false | 3.210377 | false | false | false |
qiou/Dev | python/edf.py | 1 | 4511 | #=========================================================================
# Dependencies / Libraries
#=========================================================================
import time
import serial
import MySQLdb
import subprocess
from time import sleep
import datetime
#=========================================================================
# Fonction Tableau/Dictionnaire
#=========================================================================
def checksum (etiquette, valeur):
sum = 32
for c in etiquette: sum = sum + ord(c)
for c in valeur: sum = sum + ord(c)
sum = (sum & 63) + 32
return chr(sum)
#=========================================================================
# Fonction LireTeleinfo
#=========================================================================
def ReadTeleinfo ():
# Attendre le debut du message
while ser.read(1) != chr(2): pass
message = ""
fin = False
while not fin:
char = ser.read(1)
if char != chr(2):
message = message + char
else:
fin = True
trames = [
trame.split(" ")
for trame in message.strip("\r\n\x03").split("\r\n")
]
tramesValides = dict([
[trame[0],trame[1]]
for trame in trames
if (len(trame) == 3) and (checksum(trame[0],trame[1]) == trame[2])
])
return tramesValides
# print('Lecture des trames Teleinformation avec la carte RPIDOM')
#=========================================================================
# Connexion au port
#=========================================================================
ser = serial.Serial(
port='/dev/ttyAMA0',
baudrate=1200,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.SEVENBITS )
#=========================================================================
# Definition variables de trame et chargement d'une valeur initiale
#=========================================================================
vIINST = 0
vMOTDETAT = 0
vOPTARIF = 0
vISOUSC = 0
vADCO = 0
vPAPP = 0
vIMAX = 0
vBASE = 0
vADPS = 0
#=========================================================================
# Read serial data
#=========================================================================
#print '\nPremiere voie'
ser.write('A')
sleep(1)
ser.flushInput()
tramesOk = ReadTeleinfo()
trouve = False
for etiquette in tramesOk:
if etiquette == 'IINST':
#print etiquette , ":", tramesOk[etiquette]
vIINST = tramesOk[etiquette]
if etiquette == 'MOTDETAT':
#print etiquette , ":", tramesOk[etiquette]
vMOTDETAT = tramesOk[etiquette]
if etiquette == 'OPTARIF':
#print etiquette , ":", tramesOk[etiquette]
vOPTARIF = tramesOk[etiquette]
if etiquette == 'ISOUSC':
#print etiquette , ":", tramesOk[etiquette]
vISOUSC = tramesOk[etiquette]
if etiquette == 'ADCO':
#print etiquette , ":", tramesOk[etiquette]
vADCO = tramesOk[etiquette]
if etiquette == 'PAPP':
#print etiquette , ":", tramesOk[etiquette]
vPAPP = tramesOk[etiquette]
if etiquette == 'IMAX':
#print etiquette , ":", tramesOk[etiquette]
vIMAX = tramesOk[etiquette]
if etiquette == 'BASE':
#print etiquette , ":", tramesOk[etiquette]
vBASE = tramesOk[etiquette]
if etiquette == 'ADPS':
#print etiquette , ":", tramesOk[etiquette]
vADPS = tramesOk[etiquette]
#=========================================================================
# Date and Hour
#=========================================================================
vHEURE = datetime.datetime.now().strftime('%H:%M')
vDATE = datetime.datetime.today().strftime('%Y-%m-%d')
#=========================================================================
# Connect and insert into DB
#=========================================================================
db = MySQLdb.connect(host="192.168.1.250",port=3307,user="root",passwd="MariaQiou",db="edf" )
cursor = db.cursor()
if vBASE > 0:
cursor.execute("""INSERT INTO teleinfo(DATE, HEURE, IINST, MOTDETAT, OPTARIF, ISOUSC, ADCO, PAPP, IMAX, BASE, ADPS) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""" ,(vDATE, vHEURE, vIINST, vMOTDETAT, vOPTARIF, vISOUSC, vADCO, vPAPP, vIMAX, vBASE, vADPS))
# Write into DB
db.commit()
db.rollback()
db.close()
#=========================================================================
ser.close()
| gpl-2.0 | 3,979,199,428,035,871,000 | 35.379032 | 265 | 0.441809 | false | 3.617482 | false | false | false |
rithms/hearthstone | xml_to_json.py | 1 | 4835 | #!/usr/bin/env python
from bs4 import BeautifulSoup
import glob
import json
#############################################
# Convert Hearthstone card data XML to JSON #
#############################################
__author__ = "Taylor Caldwell - http://github.com/rithms"
__copyright__ = "Copyright 2015, Taylor Caldwell"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Taylor Caldwell"
__email__ = "[email protected]"
__status__ = "Production"
# EnumIds - Non-Boolean
enum_dict = {
45 : "health",
47 : "attack",
48 : "cost",
183 : "cardSet",
184 : "cardTextInHand",
185 : "cardName",
187 : "durability",
199 : "class",
200 : "race",
201 : "faction",
202 : "cardType",
203 : "rarity",
251 : "attackVisualType",
252 : "cardTextInPlay",
268 : "devState",
325 : "targetingArrowText",
330 : "enchantmentBirthVisual",
331 : "enchantmentIdleVisual",
342 : "artistName",
351 : "flavorText",
365 : "howToGetThisGoldCard",
364 : "howToGetThisCard",
#377 : "unknownHasOnDrawEffect",
#380 : "unknownBlackrockHeroes",
#389 : "unknownDuneMaulShaman",
#402 : "unknownIntenseGaze",
#401 : "unknownBroodAffliction"
}
# EnumIds - Boolean
bool_dict = {
32 : "Trigger Visual",
114 : "elite",
321 : "collectible",
189 : "Windfury",
190 : "Taunt",
191 : "Stealth",
192 : "Spell Power",
194 : "Divine Shield",
197 : "Charge",
205 : "Summoned",
208 : "Freeze",
212 : "Enrage",
215 : "Overload",
217 : "Deathrattle",
218 : "Battlecry",
219 : "Secret",
220 : "Combo",
240 : "Can't Be Damaged",
293 : "Morph",
335 : "Invisible Deathrattle",
338 : "One Turn Effect",
339 : "Silence",
340 : "Counter",
349 : "Immune To Spell Power",
350 : "Adjacent Buff",
361 : "Heal Target",
362 : "Aura",
363 : "Poisonous",
367 : "AI Must Play",
370 : "Affected By Spell Power",
388 : "Spare Part",
}
# Card Class IDs
class_dict = {
0 : "Developer",
2 : "Druid",
3 : "Hunter",
4 : "Mage",
5 : "Paladin",
6 : "Priest",
7 : "Rogue",
8 : "Shaman",
9 : "Warlock",
10 : "Warrior",
11 : "Dream"
}
# Card Set IDs
set_dict = {
2 : "Basic",
3 : "Classic",
4 : "Reward",
5 : "Missions",
7 : "System",
8 : "Debug",
11 : "Promotion",
12 : "Curse of Naxxramas",
13 : "Goblin vs Gnomes",
14 : "Blackrock Mountain",
16 : "Credits"
}
# Card Type IDs
type_dict = {
3 : "Hero",
4 : "Minion",
5 : "Spell",
6 : "Enchantment",
7 : "Weapon",
10 : "Hero Power"
}
# Card Race IDs
race_dict = {
14 : "Murloc",
15 : "Demon",
17 : "Mechanical",
20 : "Beast",
21 : "Totem",
23 : "Pirate",
24 : "Dragon"
}
# Card Faction IDs
faction_dict = {
1 : "Horde",
2 : "Alliance",
3 : "Neutral"
}
# Card Rarity IDs
rarity_dict = {
0 : "Developer",
1 : "Common",
2 : "Free",
3 : "Rare",
4 : "Epic",
5 : "Legendary"
}
# Get the name of the corresponding enum ID
def get_name(enum_id, d):
if enum_id in d:
return d[enum_id]
for f in glob.glob('cardxml0/CAB-cardxml0/TextAsset/*.txt'):
with open(f) as cardfile:
file_name = f.split('/')[-1].split('.')[0]
cardsoup = BeautifulSoup(cardfile.read(), features="xml")
cards = cardsoup.find_all('Entity')
json_dict = { 'data' : {} }
for card in cards:
card_id = card.get('CardID')
json_dict['data'][card_id] = { 'id' : card_id, 'mechanics' : [] }
tags = card.find_all('Tag')
for tag in tags:
enum_id = int(tag.get('enumID'))
if(tag.get('type') == 'String'):
enum_name = tag.text
else:
enum_name = tag.get('value')
if enum_id in enum_dict:
field = enum_dict[enum_id]
if field == 'class':
enum_name = get_name(int(enum_name), class_dict)
elif field == 'cardSet':
enum_name = enum_name = get_name(int(enum_name), set_dict)
elif field == 'cardType':
enum_name = get_name(int(enum_name), type_dict)
elif field == 'race':
enum_name = get_name(int(enum_name), race_dict)
elif field == 'faction':
enum_name = get_name(int(enum_name), faction_dict)
elif field == 'rarity':
enum_name = get_name(int(enum_name), rarity_dict)
json_dict['data'][card_id][enum_dict[enum_id]] = enum_name
elif enum_id in bool_dict:
field = bool_dict[enum_id]
if field == 'collectible' or field == 'elite':
if enum_name == '1':
json_dict['data'][card_id][field] = True
elif enum_name == '0':
json_dict['data'][card_id][field] = False
else:
if enum_name == '1':
json_dict['data'][card_id]['mechanics'].append(field)
for key in bool_dict:
field = bool_dict[key]
if field == 'collectible' or field == 'elite':
if field not in json_dict['data'][card_id]:
json_dict['data'][card_id][field] = False
if not json_dict['data'][card_id]['mechanics']:
del json_dict['data'][card_id]['mechanics']
with open(file_name+'.json', 'w') as outfile:
json.dump(json_dict, outfile, sort_keys=True)
| mit | 7,331,306,430,884,571,000 | 20.20614 | 67 | 0.588211 | false | 2.512994 | false | false | false |
bodedev/prospera | plataforma/management/commands/atualizar_saldos.py | 1 | 2085 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.management.base import BaseCommand
from plataforma.constants import ETHER_DIVISOR
from plataforma.models import Saldo
import requests
def buscar_saldo(carteira):
try:
r = requests.get("https://api.etherscan.io/api?module=account&action=tokenbalance&contractaddress=%s&address=%s&tag=latest&apikey=%s" % (settings.ETHERSCAN_CONTRACT_ADDRESS, carteira, settings.ETHERSCAN_APIKEY))
if r.status_code == 200:
data = r.json()
if data["status"] == "1":
saldo = float(data["result"]) / float(ETHER_DIVISOR)
_, created = Saldo.objects.update_or_create(carteira=carteira, defaults={"total": saldo})
print "%s: %0.6f (%s)" % (carteira, saldo, str(created))
return True
return False
except Exception, e:
print "Nao consegui pegar o saldo da carteira %s" % carteira
return None
class Command(BaseCommand):
help = u"Atualiza o saldo de todas as carteiras de um contrato."
def handle(self, *args, **options):
url = "https://api.etherscan.io/api?module=logs&action=getLogs&fromBlock=%s&toBlock=latest&address=%s&apikey=%s" % (settings.ETHERSCAN_START_BLOCK_NUMBER, settings.ETHERSCAN_CONTRACT_ADDRESS, settings.ETHERSCAN_APIKEY)
r = requests.get(url)
data = r.json()
saldos_atualizados = []
for transacion in data["result"]:
carteira_from = transacion["topics"][1].replace("0x000000000000000000000000", "0x")
if carteira_from not in saldos_atualizados:
if buscar_saldo(carteira_from):
saldos_atualizados.append(carteira_from)
if len(transacion["topics"]) >= 3:
carteira_to = transacion["topics"][2].replace("0x000000000000000000000000", "0x")
if carteira_to not in saldos_atualizados:
if buscar_saldo(carteira_to):
saldos_atualizados.append(carteira_to)
print "Fim de processo!"
| mit | -6,934,085,334,793,678,000 | 44.326087 | 226 | 0.632134 | false | 3.434926 | false | false | false |
jas0n1ee/SonyCameraAPI | takePicture.py | 1 | 1212 | #!/usr/bin/env python
from sonyAPI2 import API2
import cv2
import urllib2
import numpy as np
import time
import struct
api = API2()
api.update_api_list()
try:
result = api.do('getAvailableCameraFunction')
current = result['result'][0]
availavle = result['result'][1]
if current != "Remote Shooting":
if "Remote Shooting" in availavle:
api.do('setCameraFunction',["Remote Shooting"])
api.update_api_list()
else:
print "Remote Shooting not availavle"
except KeyError:
print result
try:
result = api.do('getAvailableShootMode')
current = result['result'][0]
availavle = result['result'][1]
if current != "still":
if "still" in availavle:
api.do('setShootMode',["still"])
api.update_api_list()
else:
print "stil Shooting not availavle"
except KeyError:
print result
try:
result = api.do('actTakePicture')
url = result['result'][0][0]
except KeyError:
print result
except TypeError:
print result
f = urllib2.urlopen(url)
d = np.asarray(bytearray(f.read()), dtype='uint8')
img = cv2.imdecode(d,cv2.IMREAD_COLOR)
cv2.imshow('postview',img)
time.sleep(10)
| apache-2.0 | 5,010,609,995,021,559,000 | 23.734694 | 59 | 0.640264 | false | 3.338843 | false | true | false |
junhe/chopper | src/MWpyFS/Monitor.py | 1 | 44187 | # Chopper is a diagnostic tool that explores file systems for unexpected
# behaviors. For more details, see paper Reducing File System Tail
# Latencies With Chopper (http://research.cs.wisc.edu/adsl/Publications/).
#
# Please send bug reports and questions to [email protected].
#
# Written by Jun He at University of Wisconsin-Madison
# Copyright (C) 2015 Jun He ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# The monitor is used to monitor the FS fragmentation status.
# What I want to see is, generally, how's the metadata. This may include:
#
# SIZE of inode and extent tree. (number of inode block and extent tree
# block). This can be find by debugfs "dump_extents [-n] [-l] filespec".
# But you have to do it for ALL files in the file system, which might be
# slow. I haven't got a better approach. A good indicator of metadata
# problem is #_metadata_block/#_data_block. This should be very informative
# about the aging of a file system which causes metadata disaster.
# I expect the following from the output of this per file:
#
# filepath create_time n_metablock n_datablock metadata_ratio filebytes
#
# Extent fragmentation overview. This can be obtained by e2freefrag. This
# should give me a good sense of how fragemented the FS is. The acceleration
# rate of fragmentation might be a good indicator of whether a workload
# can cause metadata problem. (Because of fragmentation, physical blocks
# might not be able to allocated contiguously, then it needs two or more
# extents to the logically contiguous blocks.)
# I expect the following from the output of this per FS:
# JUST LIKE THE ORIGINAL OUTPUT BUT FORMAT IT A LITTLE BIT
#
#
#
#
# TODO:
# 1. I need to figure out a good way to figure out
# dspan of the interested files.
# 2. Is there a better way in btrfs to find only the
# interested file, other than deleting all the
# uninteresting file.
#
import subprocess
from time import strftime, localtime, sleep
import re
import shlex
import os
import pprint
import shutil
import fnmatch
import itertools
import glob
import btrfs_db_parser
import xfs_db_parser
import dataframe
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def fill_white_space(path, filler="_"):
path.strip()
return path.replace(" ", filler)
class FSMonitor:
"""
This monitor probes the ext4 file system and return information I
want in a nice format.
"""
def __init__(self, dn, mp, ld="/tmp", cw=20, filesystem='ext4'):
self.devname = dn # this should be the device name of the partition
self.mountpoint = mp # please only provide path without mountpoint
# when using this class.
self.col_width = cw
self.logdir = ld
self.resetMonitorTime()
self.resetJobID()
self.filesystem = filesystem # the file system this monitor monitors
def resetMonitorTime(self, monitorid=""):
"monitor_time is used to identify each data retrieval"
if monitorid == "":
self.monitor_time = strftime("%Y-%m-%d-%H-%M-%S", localtime())
else:
self.monitor_time = monitorid
def resetJobID(self, jobid="DefaultJOBID"):
self.jobid = jobid
def _spliter_dumpfs(self, line):
line = line.replace(",", " ")
elems = line.split(":")[1]
elems = elems.split()
new_elems = [] # [[a0,a1],[b0,b1]...]
for elem in elems:
e = elem.split("-")
elen = len(e)
if elen == 2:
new_elems.append(e)
elif elen == 1:
e = e*2
new_elems.append(e)
else:
print "wrong split", elem
exit(1)
return new_elems
def dumpfsSummary(self):
if self.filesystem != 'ext4':
return
print "dumpfs..."
cmd = ["dumpe2fs", "-h", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
print "dumpfs finished. Parsing results..."
proc.wait()
return proc.communicate()[0]
def dumpfs(self):
if self.filesystem != 'ext4':
return
print "dumpfs..."
cmd = ["dumpe2fs", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
print "dumpfs finished. Parsing results..."
freeblocks = []
freeinodes = []
for line in proc.stdout:
if line.startswith(" Free blocks:"):
freeblocks += self._spliter_dumpfs(line)
elif line.startswith(" Free inodes:"):
freeinodes += self._spliter_dumpfs(line)
else:
pass
proc.wait()
# initialize
freeblocks_df = dataframe.DataFrame(header=['start', 'end'],
table=freeblocks)
freeinodes_df = dataframe.DataFrame(header=['start', 'end'],
table=freeinodes)
# add additional columns
freeblocks_df.addColumn(key="monitor_time",
value=self.monitor_time)
freeblocks_df.addColumn(key="jobid",
value=self.jobid)
freeblocks_df.addColumn(key="HEADERMARKER_freeblocks",
value="DATAMARKER_freeblocks")
freeinodes_df.addColumn(key="monitor_time",
value=self.monitor_time)
freeinodes_df.addColumn(key="jobid",
value=self.jobid)
freeinodes_df.addColumn(key="HEADERMARKER_freeinodes",
value="DATAMARKER_freeinodes")
return {"freeblocks":freeblocks_df, "freeinodes":freeinodes_df}
def e2freefrag(self):
if self.filesystem != 'ext4':
return
cmd = ["e2freefrag", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc.wait()
part = 0
sums_dict = {}
hist_table = ""
hist_df = dataframe.DataFrame()
for line in proc.stdout:
if part == 0:
if "HISTOGRAM" in line:
part = 1
continue
mo = re.search( r'(.*): (\d+)', line, re.M)
if mo:
keyname = mo.group(1)
keyname = keyname.replace('.', '')
keyname = "_".join(keyname.split())
sums_dict[keyname] = mo.group(2)
elif part == 1:
# This part is the histogram.
line = line.strip()
if "Extent Size" in line:
hist_table = "Extent_start Extent_end Free_extents Free_Blocks Percent"
hist_df.header = hist_table.split()
continue
fline = re.sub(r'[\-:\n]', "", line)
fline = re.sub(r'\.{3}', "", fline)
row = fline.split()
hist_df.addRowByList(row)
hist_df.addColumns(keylist = ["HEADERMARKER_freefrag_hist",
"monitor_time",
"jobid"],
valuelist = ["DATAMARKER_freefrag_hist",
self.monitor_time,
self.jobid])
# convert dict to data frame
sums_df = dataframe.DataFrame(header=sums_dict.keys(),
table=[sums_dict.values()])
sums_df.addColumn(key="HEADERMARKER_freefrag_sum",
value="DATAMARKER_freefrag_sum")
sums_df.addColumn(key="monitor_time",
value=self.monitor_time)
sums_df.addColumn(key="jobid",
value=self.jobid)
return {"FragSummary":sums_df, "ExtSizeHistogram":hist_df}
def imap_of_a_file(self, filepath):
if self.filesystem != 'ext4':
return
#cmd = "debugfs " + self.devname + " -R 'imap " + filepath + "'"
cmd = ['debugfs', self.devname, '-R', 'imap "' + filepath + '"']
print cmd, '......'
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
imapdict = {}
for line in proc.stdout:
#print line
if "block group" in line:
nums = re.findall(r'\d+', line)
if len(nums) != 2:
print "Error parsing imap"
exit(1)
imapdict['inode_number'] = nums[0]
imapdict['group_number'] = nums[1]
elif 'located at block' in line:
items = line.split()
imapdict['block_number'] = items[3].rstrip(',')
imapdict['offset_in_block'] = items[5]
proc.wait()
#print imapdict
return imapdict
def dump_extents_of_a_file(self, filepath):
"This function only gets ext list for this file"
if self.filesystem != 'ext4':
return
#print "filepath:", filepath
#cmd = "debugfs " + self.devname + " -R 'dump_extents " + filepath + "'"
cmd = ['debugfs', self.devname, '-R', 'dump_extents "' + filepath + '"']
#print cmd, '......'
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
ext_list = [] # Use list here in case I want to extract data in Python
header = []
max_level = 0
df_ext = dataframe.DataFrame()
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag"]
df_ext.header = header
for line in proc.stdout:
#print "LLL:", line,
if "Level" in line:
pass
else:
savedline = line
line = re.sub(r'[/\-]', " ", line)
tokens = line.split()
if len(tokens) == 8:
# there is no physical end
tokens.insert(7, tokens[6]) #TODO: this is dangerous
d = {}
for i in range(9):
try:
d[ header[i] ] = tokens[i]
except:
print savedline
print "token:", tokens
print "header:", header # having a try-except can grant you
# the opportunity to do something
# after bad thing happen
if len(tokens) == 10:
d["Flag"] = tokens[10]
else:
d["Flag"] = "NA"
df_ext.addRowByDict(d)
proc.wait()
# Put the location of the inode the df_ext, level_index as -1 to
# indicate that it is a inode
imapdict = self.imap_of_a_file(filepath)
d = {}
d['Level_index'] = '-1'
d['Max_level'] = '-1'
d['Entry_index'] = 'NA'
d['N_Entry'] = 'NA'
d['Logical_start'] = 'NA'
d['Logical_end'] = 'NA'
d['Physical_start'] = imapdict['block_number']
d['Physical_end'] = imapdict['block_number']
d['Length'] = '1'
d['Flag'] = 'NA'
df_ext.addRowByDict(d)
df_ext.addColumn(key = "filepath",
value = fill_white_space(filepath))
df_ext.addColumn(key = "HEADERMARKER_extlist",
value = "DATAMARKER_extlist")
df_ext.addColumn(key = "jobid",
value = self.jobid)
df_ext.addColumn(key = "monitor_time",
value = self.monitor_time)
return df_ext
def setBlock(self, blockn, count):
if self.filesystem != 'ext4':
return
cmd = "debugfs " + self.devname + \
" -w -R 'setb " + str(blockn) + " " + str(count) + "'"
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
proc.wait()
return proc.returncode
def isAllBlocksInUse(self, blockn, count):
"if any of the blocks is not in use, return false. return true otherwise"
if self.filesystem != 'ext4':
return
cmd = "debugfs " + self.devname + \
" -w -R 'testb " + str(blockn) + " " + str(count) + "'"
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
if 'not' in line:
return False
proc.wait()
return True
def dumpextents_sum(self, filepath):
"TODO: merge this with dump_extents_of_a_file()"
if self.filesystem != 'ext4':
return
#cmd = "debugfs " + self.devname + " -R 'dump_extents " + filepath + "'"
#cmd = ['debugfs', self.devname, '-R', '"dump_extents ' + filepath + '"']
cmd = ['debugfs', self.devname, '-R', 'dump_extents "' + filepath + '"']
#print cmd, "........."
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
header = []
n_entries = [0] * 3 # n_entries[k] is the number of entries at level k
# it can be used to calculate number of
# internal/leaf nodes
max_level = 0
exttable = ""
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag"]
for line in proc.stdout:
#print "LLL:", line,
if "Level" in line:
pass
else:
savedline = line
line = re.sub(r'[/\-]', " ", line)
tokens = line.split()
if len(tokens) == 8:
# there is no physical end
tokens.insert(7, "NA") #TODO: this is dangerous
d = {}
for i in range(9):
try:
d[ header[i] ] = tokens[i]
except:
print savedline
print "token:", tokens
print "header:", header # having a try-except can grant you
# the opportunity to do something
# after bad thing happen
if len(tokens) == 10:
d["Flag"] = tokens[10]
else:
d["Flag"] = "NA"
n_entries[ int(d["Level_index"]) ] = int( d["N_Entry"] )
max_level = int( d["Max_level"] )
#print "..... finished stdout parsing .... "
proc.terminate()
#print "..... after terminating .... "
# calculate number of meatadata blocks
# only 1st and 2nd levels takes space.
# How to calculate:
# if there is only 1 level (root and level 1).
# the number of entires in level 0 indicates the
# number of nodes in level 1.
# Basically, the number of entries in level i
# equals the number of ETB of the next level
n_metablock = 0
if max_level == 0:
# the tree has no extent tree block outside of the inode
n_metablock = 0
else:
for n in n_entries[0:max_level]:
n_metablock += n
dumpdict = {}
dumpdict["filepath"] = fill_white_space(filepath)
dumpdict["n_metablock"] = n_metablock
others = self.filefrag(filepath)
if others.has_key('nblocks'):
dumpdict["n_datablock"] = others["nblocks"]
else:
dumpdict["n_datablock"] = 'NA'
if others.has_key('nbytes'):
dumpdict["filebytes"] = others["nbytes"]
else:
dumpdict["filebytes"] = 'NA'
#print "Reached end of debugfs...."
return dumpdict
def filefrag(self, filepath):
if self.filesystem != 'ext4':
return
fullpath = os.path.join(self.mountpoint, filepath)
cmd = ["filefrag", "-sv", fullpath]
#print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
mydict = {}
for line in proc.stdout:
if line.startswith("File size of"):
#print line
line = line.split(" is ")[1]
#print line
nums = re.findall(r'\d+', line)
if len(nums) != 3:
print "filefrag something wrong"
exit(1)
mydict["nbytes"] = nums[0]
mydict["nblocks"] = nums[1]
mydict["blocksize"] = nums[2]
return mydict
def getAllInodePaths(self, target="."):
"it returns paths of all files and diretories"
rootpath = os.path.join(self.mountpoint)
paths = []
with cd(rootpath):
cmd = ['find', target]
print cmd
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
paths.append(line.replace("\n", ""))
proc.wait()
return paths
def getExtentList_of_a_dir(self, target):
"""
this only works for absolute path
"""
if self.filesystem != 'ext4':
return
#files = self.getAllInodePaths(target)
files = get_all_my_files(target)
#print files
#exit(1)
df = dataframe.DataFrame()
for f in files:
f = os.path.relpath(f, target)
if len(df.header) == 0:
df = self.dump_extents_of_a_file(f)
else:
df.table.extend( self.dump_extents_of_a_file(f).table )
return df
def getPerFileBlockCounts(self, rootdir="."):
if self.filesystem != 'ext4':
return
files = self.getAllInodePaths(rootdir)
counts_df = dataframe.DataFrame()
for f in files:
d = self.dumpextents_sum(f)
if len(counts_df.header) == 0:
counts_df.header = d.keys()
counts_df.addRowByDict(d)
counts_df.addColumns(keylist=["HEADERMARKER_extstats",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extstats",
self.monitor_time,
self.jobid])
return counts_df
def getFSBlockCount(self, df_files):
"df_files has number of metablocks datablocks of each file"
if self.filesystem != 'ext4':
return
if len(df_files.table) == 0:
return ""
fs_nmetablocks = 0
fs_ndatablocks = 0
nmetaindex = df_files.header.index('n_metablock')
ndataindex = df_files.header.index('n_datablock')
for row in df_files.table:
if row[nmetaindex] == 'NA' or row[ndataindex] == 'NA':
fs_nmetablocks = 'NA'
fs_ndatablocks = 'NA'
break
fs_nmetablocks += int(row[nmetaindex])
fs_ndatablocks += int(row[ndataindex])
headerstr = "fs_nmetablocks fs_ndatablocks monitor_time HEADERMARKER_extstatssum jobid"
valuelist = [fs_nmetablocks, fs_ndatablocks, self.monitor_time,
'DATAMARKER_extstatssum', self.jobid]
fsblkcount_df = dataframe.DataFrame(
header=headerstr.split(),
table=[valuelist])
return fsblkcount_df
def widen(self, s):
return s.ljust(self.col_width)
def dict2table(self, mydict):
mytable = ""
header = ""
for keyname in mydict:
header += self.widen(keyname) + " "
header += self.widen("monitor_time") + " HEADERMARKER_freefrag_sum\n"
vals = ""
for keyname in mydict:
vals += self.widen(mydict[keyname]) + " "
vals += self.widen(str(self.monitor_time)) + " DATAMARKER_freefrag_sum\n"
return header + vals
def display(self, savedata=False, logfile="", monitorid="", jobid="myjobid"):
self.resetMonitorTime(monitorid=monitorid)
self.resetJobID(jobid=jobid)
ret_dict = {'d_span':'NA',
'physical_layout_hash':'NA'}
if savedata:
if logfile == "":
filename = self.monitor_time + ".result"
else:
filename = logfile
fullpath = os.path.join(self.logdir, filename)
f = open(fullpath, 'w')
if self.filesystem == 'ext3':
extlist = ext34_getExtentList_of_myfiles(target=self.mountpoint)
df_ext = extlist_block_to_byte(extlist)
if savedata and extlist != None:
h = "---------------- extent list -------------------\n"
f.write(extlist.toStr())
elif self.filesystem == 'ext4':
######################
# get extents of all files
extlist = self.getExtentList_of_a_dir(target=self.mountpoint)
df_ext = extlist_translate_new_format(extlist)
#print df_ext.toStr()
#exit(1)
if savedata and extlist != None:
h = "---------------- extent list -------------------\n"
f.write(extlist.toStr())
######################
# e2freefrag
#frag = self.e2freefrag()
#if savedata and frag != None:
#frag0_header = "----------- Extent summary -------------\n"
#frag1_header = "----------- Extent Histogram -------------\n"
#f.write(frag0_header + frag["FragSummary"].toStr())
#f.write(frag1_header + frag["ExtSizeHistogram"].toStr())
######################
# dumpfs
#freespaces = self.dumpfs()
#if savedata and frag != None:
#dumpfs_header = "----------- Dumpfs Header ------------\n"
#f.write(dumpfs_header + freespaces['freeblocks'].toStr())
#f.write(dumpfs_header + freespaces['freeinodes'].toStr())
elif self.filesystem == 'xfs':
df_ext = self.xfs_getExtentList_of_a_dir(self.mountpoint)
#df_ext = self.xfs_getExtentList_of_a_dir('./dir.1/')
#df_ext.table.extend(df_ext0.table)
df_ext = extlist_translate_new_format(df_ext)
#print df_ext.toStr()
#exit(1)
if savedata and df_ext != None:
df_ext.addColumns(keylist=["HEADERMARKER_extlist",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extlist",
self.monitor_time,
self.jobid])
h = "---------------- extent list -------------------\n"
f.write( h + df_ext.toStr() )
elif self.filesystem == 'btrfs':
# too many files thera sometimes, let me remove some
remove_unecessary(self.mountpoint)
tree_lines = btrfs_db_parser.btrfs_debug_tree(self.devname)
tree_parser = btrfs_db_parser.TreeParser(tree_lines)
df_dic = tree_parser.parse()
df_rawext = df_dic['extents']
df_chunk = df_dic['chunks']
paths = get_all_my_files(self.mountpoint)
df_map = btrfs_db_parser.get_filepath_inode_map2(paths)
#print df_rawext.toStr()
#print df_chunk.toStr()
#print df_map.toStr()
#exit(0)
df_ext = btrfs_convert_rawext_to_ext(df_rawext, df_chunk, df_map)
if savedata:
df_ext.addColumns(keylist=["HEADERMARKER_extlist",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extlist",
self.monitor_time,
self.jobid])
h = "---------------- extent list -------------------\n"
f.write( h + df_ext.toStr())
else:
print "Unsupported file system."
exit(1)
if savedata:
f.flush()
f.close()
# calculate return value
print df_ext.toStr()
#exit(0)
ret_dict['d_span'] = get_d_span_from_extent_list(df_ext,
'.file')
ret_dict['distance_sum'] = \
get_distant_sum_from_extent_list(df_ext, '.file')
if ret_dict['distance_sum'] < 0:
print 'distance_sum should be >=0'
allpaths = get_paths_in_df(df_ext)
myfiles = [os.path.basename(path) for path in allpaths \
if '.file' in path]
myfiles.sort( key=lambda x:int(x.split('.')[0]) ) #sort by file id
ret_dict['datafiles'] = '|'.join( myfiles )
dspans = []
for f in myfiles:
dspans.append( get_d_span_from_extent_list(df_ext, f) )
dspans = [str(x) for x in dspans]
ret_dict['datafiles_dspan'] = '|'.join( dspans )
num_extents = []
for f in myfiles:
num_extents.append( get_num_ext_from_extent_list(df_ext, f) )
num_extents = [str(x) for x in num_extents]
ret_dict['num_extents'] = '|'.join( num_extents )
ret_dict['physical_layout_hash'] \
= get_physical_layout_hash(df_ext,
'file',
merge_contiguous=True)
return ret_dict
def stat_a_file(self, filepath):
filepath = os.path.join(self.mountpoint, filepath)
cmd = ["stat", filepath]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
output = proc.communicate()[0] # communicate() uses buffer. Don't use it
lines = output.strip()
lines = lines.split('\n')
stat_dict = {}
for line in lines:
#print line
if not "Inode" in line:
continue
mo = re.search( r'Inode:\s(\d+)', line, re.M)
if mo:
print mo.group(1)
inode_number = mo.group(1)
stat_dict['inode_number'] = inode_number
return stat_dict
def xfs_get_extentlist_of_a_file(self, filepath):
inode_number = self.stat_a_file(filepath)['inode_number']
df = xfs_db_parser.xfs_get_extent_tree(inode_number, self.devname)
df.addColumn(key = "filepath",
value = fill_white_space(filepath))
return df
def xfs_getExtentList_of_a_dir(self, target="."):
"rootdir is actually relative to mountpoint. Seems bad"
#files = self.getAllInodePaths(target)
files = get_all_my_files(target)
df = dataframe.DataFrame()
for f in files:
#print "UU____UU"
if len(df.header) == 0:
df = self.xfs_get_extentlist_of_a_file(f)
else:
df.table.extend( self.xfs_get_extentlist_of_a_file(f).table )
return df
############################################
SECTORSIZE=512
def get_num_sectors(length):
return int((length+SECTORSIZE-1)/SECTORSIZE)
def get_distant_sum(extentlist):
"""
extentlist is a list like:
[ {'off':xxx, 'len':xxx}, {..}, ..]
This unit is byte.
"""
#print extentlist
# for each extent
distsum = 0
n = 0
for ext in extentlist:
distsum += extent_distant_sum(ext)
n += get_num_sectors(ext['len'])
for ext1, ext2 in itertools.combinations(extentlist, 2):
distsum += extent_pair_distant_sum(ext1, ext2)
return distsum
def extent_distant_sum(extent):
"""
The sum of all pair distance inside the extent is:
n(n-1)(n+1)/6
"""
# doing a trick to get ceiling without floats
n = get_num_sectors(extent['len'])
# hmm.. define the distance of 1 sector
# to be 1.
if n == 1:
return 1
#print "n:", n
ret = n*(n-1)*(n+1)/6
#print extent, ret
return ret
def extent_pair_distant_sum( extent1, extent2 ):
"ext1 and ext2 cannot overlap!"
if extent1['off'] > extent2['off']:
extent1, extent2 = extent2, extent1
m = get_num_sectors(extent1['len'])
n = get_num_sectors(extent2['len'])
k = (extent2['off']-extent1['off']-extent1['len'])/SECTORSIZE
ret = m*n*(m+n+2*k)/2
#print extent1, extent2, ret
return ret
if __name__ == '__main__':
print get_distant_sum( [
{'off':0, 'len':512},
#{'off':512, 'len':512}] )
{'off':512*10, 'len':512}] )
def remove_unecessary(top):
objlist = os.listdir(top)
for name in objlist:
if name.endswith('.file') or name.startswith('dir.'):
continue
path = os.path.join(top, name)
if os.path.isfile(path):
os.remove(path)
#print 'remove FILE:', path
else:
shutil.rmtree(path)
#print 'remove DIR:', path
subprocess.call('sync')
def get_all_my_files( target ):
matches = []
for root, dirnames, filenames in os.walk(target):
for filename in fnmatch.filter(filenames, '*.file'):
matches.append(os.path.join(root, filename))
dirnames[:] = fnmatch.filter(dirnames, 'dir.*')
return matches
def ext34_getExtentList_of_myfiles(target):
files = get_all_my_files(target)
df = dataframe.DataFrame()
for f in files:
if len(df.header) == 0:
df = filefrag(f)
else:
df.table.extend( filefrag(f).table )
return df
def get_physical_layout_hash(df_ext, filter_str, merge_contiguous=False):
"""
It only cares about physical block positions.
It has nothing to do with filename, logical address of blocks..
Just sort the physical block start and end, then do a hash
Inlcuding inode, ETB, and data extent!
Another way to find layout is to get all the free blocks and do
hash on them. It is more straight free space.
"""
hdr = df_ext.header
phy_blocks = []
for row in df_ext.table:
if filter_str in row[hdr.index('filepath')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
phy_blocks.append( physical_start )
phy_blocks.append( physical_end )
# There can be over lap between extents for inode and only for inode
# block number can be overlapped in extent
# block number of the same extent always next to each other
phy_blocks.sort()
if merge_contiguous:
# the block number are ALWAYS in pair, even after sorting
# [start, end, start, end, start, end, ...]
# This may not work for BTRFS!
merged = []
n = len(phy_blocks)
assert n % 2 == 0
for i in range(0, n, 2):
# i is start of an extent
if i == 0: # the first extent
merged.append( phy_blocks[i] )
merged.append( phy_blocks[i+1] )
continue
if phy_blocks[i] == phy_blocks[i-1] + 1:
# can be merged
merged[-1] = phy_blocks[i+1]
elif phy_blocks[i] == phy_blocks[i-2] and \
phy_blocks[i+1] == phy_blocks[i-1]:
# hmm... duplicated extent. can only happen to inode
pass # do nothing
else:
# cannot be merged
merged.append( phy_blocks[i] )
merged.append( phy_blocks[i+1] )
phy_blocks = merged
return hash( str(phy_blocks) )
def get_inode_num_from_dfmap(filepath, df_map):
hdr = df_map.header
for row in df_map.table:
if row[hdr.index('filepath')] == filepath:
return row[hdr.index('inode_number')]
return None
def get_all_vir_ranges_of_an_inode(inode_number, df_rawext):
hdr = df_rawext.header
ranges = []
for row in df_rawext.table:
if str(row[hdr.index('inode_number')]) == str(inode_number):
d = {
'virtual_start': int(row[hdr.index('Virtual_start')]),
'length': int(row[hdr.index('Length')])
}
ranges.append( d )
return ranges
def btrfs_df_map_to_dic(df_map):
d = {}
hdr = df_map.header
for row in df_map.table:
filepath = row[hdr.index('filepath')]
inode_number = row[hdr.index('inode_number')]
d[str(inode_number)] = filepath
return d
def btrfs_convert_rawext_to_ext(df_rawext, df_chunk, df_map):
#print df_rawext.toStr()
#print df_chunk.toStr()
#print df_map.toStr()
dic_map = btrfs_df_map_to_dic(df_map)
hdr = df_rawext.header
devices = set()
df_ext = dataframe.DataFrame()
df_ext.header = ['Level_index',
'Max_level',
'Entry_index',
'N_Entry',
'Virtual_start',
'Logical_start',
'Logical_end',
'Physical_start',
'Physical_end',
'Length',
'Flag',
'filepath']
for row in df_rawext.table:
rowdic = {}
for col in hdr:
rowdic[col] = row[hdr.index(col)]
#print rowdic
phy_starts = btrfs_db_parser.virtual_to_physical( rowdic['Virtual_start'], df_chunk )
for stripe in phy_starts:
devices.add( stripe['devid'] )
assert len(devices) == 1, 'we only allow one device at this time'
rowdic['Physical_start'] = stripe['physical_addr']
rowdic['Physical_end'] = stripe['physical_addr'] + \
int( rowdic['Length'] )
rowdic['Logical_end'] = int(rowdic['Logical_start']) + \
int( rowdic['Length'] )
rowdic['Level_index'] = 0
rowdic['Max_level'] = 0
rowdic['Entry_index'] = 0
rowdic['N_Entry'] = 0
rowdic['filepath'] = dic_map[str( rowdic['inode_number'] )]
rowdic['Flag'] = "NA"
df_ext.addRowByDict( rowdic )
return df_ext
def extlist_translate_new_format(df_ext):
"""
Use ending of file and new unit(byte)
Only df_ext of ext4 and xfs need this, btrfs already
uses byte as unit.
But does btrfs use the new style of ending?
"""
df_ext = extlist_lastblock_to_nextblock(df_ext)
df_ext = extlist_block_to_byte(df_ext)
return df_ext
def extlist_lastblock_to_nextblock(df_ext):
"""
for ext4 and xfs, the Logical_end and Physical_end point
to the last block of the file. This is not convenient when
we translate the unit from block to byte.
so in this function, we shift the _end to point to the
next block of the file (out of the file), kind of like
the .end() of iterator in C++.
For example, it was 8,8 for a file, indicating, the first
and the last block of the file is 8.
After the translating of this file, it is 8,9.
"""
colnames = ['Logical_end', 'Physical_end']
hdr = df_ext.header
for row in df_ext.table:
for col in colnames:
x = row[hdr.index(col)]
if x != 'NA':
x = int(x) + 1
row[hdr.index(col)] = x
return df_ext
def extlist_block_to_byte(df_ext):
"""
Translate the unit from block to byte for extent list
Translated:
Logical_start Logical_end Physical_start Physical_end
This function should be used as soon as the df_ext is created
so all the later functions that use this df_ext can treat it
as byte.
"""
BLOCKSIZE = 4096
colnames = ['Logical_start', 'Logical_end',
'Physical_start', 'Physical_end', 'Length']
hdr = df_ext.header
for row in df_ext.table:
for col in colnames:
x = row[hdr.index(col)]
if x != 'NA':
x = int(x) * BLOCKSIZE
row[hdr.index(col)] = x
return df_ext
def get_num_ext_from_extent_list(df_ext, filename):
"Get number of extents"
hdr = df_ext.header
cnt = 0
for row in df_ext.table:
if filename == os.path.basename(row[hdr.index('filepath')]) and \
row[hdr.index('Level_index')] != '-1':
cnt += 1
return cnt
def get_paths_in_df(df_ext):
hdr = df_ext.header
paths = set()
for row in df_ext.table:
paths.add( row[hdr.index('filepath')] )
return list(paths)
def get_d_span_from_extent_list(df_ext, filepath):
hdr = df_ext.header
byte_max = -1
byte_min = float('Inf')
for row in df_ext.table:
if filepath in row[hdr.index('filepath')] and \
row[hdr.index('Level_index')] != '-1' and \
row[hdr.index('Level_index')] == row[hdr.index('Max_level')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
mmin = min(physical_start, physical_end)
mmax = max(physical_start, physical_end)
if mmin < byte_min:
byte_min = mmin
if mmax > byte_max:
byte_max = mmax
if byte_max == -1:
# no extent found
return 'NA'
else:
return byte_max - byte_min
def get_distant_sum_from_extent_list(df_ext, filepath):
hdr = df_ext.header
extlist = []
for row in df_ext.table:
if filepath in row[hdr.index('filepath')] and \
row[hdr.index('Level_index')] != '-1' and \
row[hdr.index('Level_index')] == row[hdr.index('Max_level')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
d = {
'off': physical_start,
'len': physical_end - physical_start
}
extlist.append( d )
distsum = get_distant_sum( extlist )
return distsum
def stat_a_file(filepath):
filepath = os.path.join(filepath)
cmd = ["stat", filepath]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
output = proc.communicate()[0] # communicate() uses limited buffer
lines = output.strip()
lines = lines.split('\n')
stat_dict = {}
for line in lines:
#print line
if not "Inode" in line:
continue
mo = re.search( r'Inode:\s(\d+)', line, re.M)
if mo:
#print mo.group(1)
inode_number = mo.group(1)
stat_dict['inode_number'] = inode_number
return stat_dict
def get_all_paths(mountpoint, dir):
"it returns paths of all files and diretories"
paths = []
with cd(mountpoint):
cmd = ['find', dir]
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
paths.append(line.replace("\n", ""))
proc.wait()
return paths
def isfilefrag_ext_line(line):
if 'Filesystem' in line or \
'blocksize' in line or \
('logical' in line and 'length' in line) or\
('extent' in line and 'found' in line):
return False
else:
return True
def filefrag(filepath):
cmd = ["filefrag", "-sv", filepath]
#print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
df_ext = dataframe.DataFrame()
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag", "filepath"]
df_ext.header = header
#ext logical physical expected length flags
#0 0 1545 12 merged
for line in proc.stdout:
if isfilefrag_ext_line(line):
items = line.split()
# it is 4 because there might be some line without
# both expected and flags
assert len(items) >= 4, line
if len(items) == 5 or len(items) == 4:
items.insert(3, -1)
#print items
d = {
'Level_index': 0,
'Max_level' : 0,
'Entry_index': int(items[0]),
'N_Entry' : 'NA',
'Logical_start': int(items[1]),
'Logical_end': int(items[1]) + int(items[4]),
'Physical_start': int(items[2]),
'Physical_end': int(items[2]) + int(items[4]),
'Length' : int(items[4]),
'Flag' : 'NA',
'filepath' : filepath
}
df_ext.addRowByDict(d)
#pprint.pprint(d)
#print df_ext.toStr()
proc.wait()
return df_ext
def get_possible_cpu():
f = open("/sys/devices/system/cpu/possible", 'r')
line = f.readline()
f.close()
return line.strip()
def get_available_cpu_dirs():
"Counting dirs is more accurate than */cpu/possible, at least on emulab"
cpudirs = [name for name in glob.glob("/sys/devices/system/cpu/cpu[0-9]*") \
if os.path.isdir(name)]
return cpudirs
def get_online_cpuids():
with open('/sys/devices/system/cpu/online', 'r') as f:
line = f.readline().strip()
# assuming format of 0-2,4,6-63
items = line.split(',')
cpus = []
for item in items:
if '-' in item:
a,b = item.split('-')
a = int(a)
b = int(b)
cpus.extend(range(a, b+1))
else:
cpus.append(int(item))
return cpus
def switch_cpu(cpuid, mode):
path = "/sys/devices/system/cpu/cpu{cpuid}/online"
path = path.format(cpuid=cpuid)
modedict = {'ON':'1', 'OFF':'0'}
f = open(path, 'w')
f.write(modedict[mode])
f.flush()
f.close()
return
| gpl-2.0 | 4,359,682,231,986,908,700 | 33.280062 | 95 | 0.512707 | false | 3.906206 | false | false | false |
isaachenrion/jets | src/proteins/train/validation.py | 1 | 1461 | import logging
import time
import torch
from src.data_ops.wrapping import unwrap
from ..loss import loss
def half_and_half(a,b):
a = torch.stack([torch.triu(x) for x in a], 0)
b = torch.stack([torch.tril(x, diagonal=-1) for x in b], 0)
return a + b
def validation(model, data_loader):
t_valid = time.time()
model.eval()
valid_loss = 0.
yy, yy_pred = [], []
half = []
mask = []
hard_pred = []
for i, batch in enumerate(data_loader):
(x, y, y_mask, batch_mask) = batch
y_pred = model(x, mask=batch_mask)
vl = loss(y_pred, y, y_mask, batch_mask)
valid_loss = valid_loss + float(unwrap(vl))
yy.append(unwrap(y))
yy_pred.append(unwrap(y_pred))
mask.append(unwrap(batch_mask))
half.append(unwrap(half_and_half(y, y_pred)))
hard_pred.append(unwrap(half_and_half(y, (y_pred > 0.5).float())))
del y; del y_pred; del y_mask; del x; del batch_mask; del batch
valid_loss /= len(data_loader)
#grads = torch.cat([p.grad.view(-1) for p in model.parameters() if p.grad is not None], 0)
logdict = dict(
yy=yy,
yy_pred=yy_pred,
half=half,
hard_pred=hard_pred,
mask=mask,
valid_loss=valid_loss,
model=model,
#grads=grads,
)
model.train()
t1=time.time()
logging.info("Validation took {:.1f} seconds".format(time.time() - t_valid))
return logdict
| bsd-3-clause | -3,322,496,645,558,040,600 | 23.762712 | 94 | 0.578371 | false | 3.069328 | false | false | false |
byteface/sing | core/PyPal.py | 1 | 16532 | """
PyPal.py
@author: byteface
"""
class PyPal(object):
"""
PyPal is the heart for all pypals :)
"""
# TODO - tell command to pass messages to other pypals. non conflicting. saves having to quit out of current one
# TODO - list commands
# TODO - learn from. quick command to copy commands between pypals may be useful. save moving between dirs and copying
# memory? - obj with funcitons for loading data etc.
# dictionary that stores object from _meta.json
o = None
# TODO - if current context is gone should be able to go through history
# MULTIPLE CONTEXT OBJECT MAY NEED TO EXISTS. searching for relevant ones is a requirement
context=None
# TODO - should every statement should carry certainty?. for now maybe store number 0-1 on here?
#certainty=0
# TODO third person, you, actor???... you can 'be' another person
#perspective={}
# the natural language processing engine. eventually will live on a brain object
nlp=None # TODO - should be an array
# natural language generation. used for output
nlg=None # TODO - as above
def __init__(self,data):
"""
data param is obj with unique name. i.e {'name':'pypal'}
"""
import json
with open("bin/%s/_meta.json" % data['name']) as json_file:
self.o = json.load(json_file)['object']
# TODO - externalise the class
self.nlp=NLP( self )
# TODO - externalise the class
self.nlg=NLG( self )
#self.context=Context( [self], [self] ) # talk to self
def introduce(self):
"""
introduce - when a pypal is first created this is what it says
"""
self.nlg.say( "Hi my name is %s, Thankyou for creating me!" % self.o['name'] )
self.listen()
def welcome(self):
"""
welcome - whenever you init a pypal
"""
self.nlg.say( "%s I see you have returned!" % self.o['friend'] )
# TODO - display stats?
self.listen()
# TODO - listen should really be an open stream at the moment this is just a friend channel.
# TODO - create channels for pypal>pyal comms
# TODO - event should be created
# TODO - should be having thoughts
def listen(self):
# NOTE - listen currently considers it to be friend who is talking
#self_obj = self.nlp.runWordAsFunction( 'bin/%s/brain/perception/self/' % self.o['name'], self.o['name'] )
#friend_obj = self.nlp.runWordAsFunction( 'bin/%s/brain/perception/physical/animal/human/' % self.o['name'], self.o['friend'] )
#friend_obj={}
try:
# THIS IS A APPARENTLY nix ONLY SOLUTION FOR AUTO PROCESSING
# IT WILL TIME OUT IF NO INPUT RESPONSE AND RUN AUTOMATIONS
# steps towards automation. I looked and using mulitprocessing and thread but non can stop a raw_input
# for now i'm doing this way just as I'm building some content bots and need it sorting
# the timeout for automation
#import signal
#signal.signal(signal.SIGALRM, self.automate)
#signal.alarm(10)
#from threading import Timer
#t=Timer(10,self.automate)
#t.start()
self.nlg.say( "I am listening..." )
import sys
from select import select
# TODO - keys presses should reset the timeout
timeout = 10000 # TODO - add to a pypal config?? - make timeout longer. for testing type automate. have flag/config for autobots?
rlist, _, _ = select([sys.stdin], [], [], timeout)
if rlist:
s = sys.stdin.readline().strip()
self.process(s)
self.listen()
else:
self.nlg.say( "No input. Automating..." ) # TODO - just run as bg proccess
self.automate()
self.listen()
return
# NOTE - DOESNT RUN ANYMORE
# NOTE - old way. preserving here for now until figure this all out
#self.nlg.say( "I am listening..." )
#information = raw_input("> ")
#self.process( information )
# TODO - parralell process for automation whilst listening?
except:
self.nlg.log( "FAIL :::::listen" )
def automate(self,*args,**kwargs):
"""
automate is a super simple task runner
it executes tasks listed in brain/automation/tasks.json
"""
self.nlg.log( "automate" )
try:
# add and run automation.py
path = 'bin/%s/brain/automation' % self.o['name']
import sys
sys.path.append( path )
task_runner = __import__( 'automate' )
task_runner.run(self,path)
except Exception:
self.nlg.log( "AUTOMATE FAIL!" )
# TODO - what when automations are complete?. currently returns to listening
#self.listen()
return
history=[] # TODO - should the history go on the context obj?
## TODO - this should be a HEAR function and should process chunks
# TODO - this is something that would be also good to parrallel process and decide which streams of informtion to listen to or ignore
def process(self,information,caller=None,callee=None):
self.context=Context( self, information ) # FOR NOW JUST FOR STORING PATHS
self.history.append(information)
# update the context object
#self.context=Context( [caller], [callee] )
# bust all into words, squash whitespace
words = information.split(None)
# if its a one letter answer. some helpers/shortcuts
if len(words)==1:
# added a repeat function
if information == 'r':
print self.history[len(self.history)-2]
self.process( self.history[len(self.history)-2] )
return
# show command history
if information == 'h':
for h in history:
print h
return
# TODO - some more 1 key helpers
# 'r' - repeat last command
# 'h' - history
# 'c' - show all available commands in pypal
self.nlp.processOneWord( information )
#self.listen()
return
self.nlp.processSentence( information )
#self.listen()
return
# TODO - need to ask meaning of words. to at least put it into memory for considering
# should also be able to check dictionary / nltk sources. but needs to build a program for the word
def ask_word_meaning(self,word):
self.nlp.say( "What is '%s'?" % word )
answer = raw_input("> ")
# TODO - NO - should probs be processess response
self.nlp.addNewWord( word, answer )
# when the bot is not active could explore data sources
# using a decorator pattern for behaviours on data
# def explore(self):
# TODO - let bot decide/choose which data source to consume
# TODO - can bot find new data sources? from interaction with other bots
# TODO - to begin with will attempt to buid knowledge graph data sets
# from webpages/ relational object maps from text
# can also explore things in the world
# theres various ways of determining what to explore in the world
# TODO - create a discover function?...
# do this by going into unknown. i.e. inventing urls to read.
# figure out how to chain commands?
# how to 'think of something to do'
# def spawn(self):
# def merge(self,pypal):
# the first job of the context object is to store caller, callee information
# Who is talking and who are they talking to
# NOTE / TODO - this may evolve with time
class Context(object):
"""
Context still to be fully defined.
Will hold things like conversation history and caller/callee information and is used to aid comprehension
not just personable but subject context
i.e. if i say show list, then add to list should add to the one ive shown
hmmmm caller callee is perspective and incorreclty stubbed here. probably why i removed
the implementation. unless perspective is an object that also resides in a context object?
NOW GIVES SOME PATH INFO. can access in a command like
o.context.COMMAND_PATH
Also forces app into running 1 command at a time. which is good. as thats how a brain kinda works.
you could probably still spin threads in commands if requried. but we context we have 1 train of thought which is the running command
"""
# both can be lists of animals
caller=None
callee=None
# useful for commands to know where they are loading from
# so can dump/store stuff there directly when scraping etc.
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
BASEPATH=''
# TODO - may move these 3 onto the NLP. and make context more of a caretaker pattern extended to surmise and store sessions.
LAST_COMMAND =''
COMMAND_PATH =''
#PARAMS=''
def __init__(self, parent, command, caller=None,callee=None):
# self.caller=caller
# self.callee=callee
self.BASEPATH = './bin/%s/brain/commands' % parent.o['name']
self.LAST_COMMAND = command
path = '/'.join( self.LAST_COMMAND.split(' ') )
file = '_'.join( self.LAST_COMMAND.split(' ') ) + '.py'
#self.COMMAND_PATH = '%s/%s/%s' % ( self.BASEPATH, path, file )
self.COMMAND_PATH = '%s/%s' % ( self.BASEPATH, path )
#self.PARAMS='' # NOTE - gets updated once string is parsed
class NLP(object):
"""
NLP are processes for word parsing. Generating functions for words.
Essentially a custom module loader
"""
owner=None
# TODO -
#TIME
#PLACE
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
def __init__(self,owner):
self.owner=owner
def addNewWord( self, word, answer ):
# TODO - addNewWord. store what friend thinks/says it is
return
def processOneWord( self, word ):
"""
parse single word commands.
basically runs a command from the command folder
"""
#TODO - check that the word is clean
#TODO - see if we know the word
#TODO - deal with inuendo
#TODO - lemmatiser maybe required. or we re-routing manually?
knows_word=False;
c_path = 'bin/%s/brain/commands' % self.owner.o['name']
if self.has_command(c_path+"/"+word+"/"+word+".py"):
self.owner.nlg.log( "command detected" )
knows_word=True
return self.runWordAsFunction( c_path, word )
if knows_word == False:
self.owner.nlg.say( "I don't yet know that word" )
# now that we know input function we can run it..
# TODO - should probably create new problem though and process that way
# TODO - all the meta properties need updating
def runWordAsFunction(self,path,word):
import sys
sys.path.append( "%s/%s" % (path,word) )
try:
# TODO - check and update all the meta props
command_module = __import__( word )
reload(command_module) # reload class without restarting pypal
return command_module.run(self.owner)
except Exception, e:
self.owner.nlg.say( "Sorry, I can't do that, I tried but it didn't work" )
self.owner.nlg.log( "CHECK YOUR VIRUTAL ENVIRONMENT IS RUNNING." )
pass
# TODO - try to find the finite verb
# NOTE - AT THE MOMENT ONLY PROCESSING COMMANDS
def processSentence( self, sentence ):
# print "processSentence"
words = sentence.split(None)
word_count = len(words)
basepath = 'bin/%s/brain/commands' % self.owner.o['name']
word_path_arr=[]
# walk up the sentence
for word in words:
root = basepath+"/"+'/'.join(word_path_arr)
has_path = self.has_path( root +"/"+ word )
# if next word is the last word. check for a command and run it without params.
if (len(word_path_arr)+1)==word_count:
path = root+"/"+word
function = '_'.join( word_path_arr ) + "_" + word
if self.has_command(path+"/"+function+".py"):
return self.runSentenceAsFunction( path, function )
# if nowhere to go. but there's a command at current path. run it and pass the rest as param
if (False==has_path):
function = '_'.join( word_path_arr )
if self.has_command(root+"/"+function+".py"):
# get params by removing where we were up to
params = sentence.replace( ' '.join( word_path_arr ), '' )
# REMOVE THE WHITE SPACE FROM START OF PARAMS
params = params[1:]
# TODO - note. i see i built up to path to strip param. problem here is param is on the command_path. and doesn't get parsed off until here. during execution.
# TODO - will have a rethink about how want context to work before changing this. so for now will operate on the context obj here
# TODO - when doing change, nlp ref should probs get given to context. or context keeps them all in array.
self.owner.context.COMMAND_PATH = self.owner.context.COMMAND_PATH.replace( params, '' )
#self.owner.context.PARAMS = params
# TODO - throw error if no param is passed
if params == None or params == '':
print 'ERROR:parameter expected. none recieved'
# run the function
return self.runSentenceAsFunction( root, function, params )
else:
break
word_path_arr.append(word)
# TODO - if no command, attempt gnerating reponse from the self compiled programs.
# TODO - integrate memory, world states, schemas and emotions
# A LAD is a KAD : cognitive learning
return self.owner.nlg.say( "No command found" )
# params at the moment are 'rest of string'
# long term might break around finite verb and pass whole string?
def runSentenceAsFunction(self,path,function,params=None):
#print "runSentenceAsFunction"
#print path, function, params
import sys
sys.path.append( path )
try:
# TODO - check all the meta props
# TODO - may need to also write to some of the meta
# TODO - if no meta create a default one
command_module = __import__( function )
reload(command_module) # reload class without restarting pypal
if(params!=None):
return command_module.run(self.owner,params)
else:
return command_module.run(self.owner)
pass
except Exception, e:
self.owner.nlg.log( "runSentenceAsFunction FAIL!! \
\n happens when : \
\n failing code in the command. i.e imports used by the command not intalled \
\n venv not running \
\n not passing params when required" )
return False
#self.owner.listen()
pass
# run several possibilities. decide which is most relevant?
# the listener as to suppose an ontological truth in each word as they hear it
# when that doesn't happen even over sets of words things have to be considered
# and find more context or information. even lead to questioning
def suppose():
pass
## ---------------------------- NLP LANGUGAGE UTILS -----------------------------------
# check a lookup table of yes words. program needs to be able to expand that list
# TODO - if one word use lookup, else use NLTK sentimement tool
# NOTE - false DOES NOT MEAN it is negative, it could be neutral
def is_string_positive( s ):
pass
# check a lookup table of no words. program needs to be able to expand that list
# TODO - if one word use lookup, else use NLTK sentimement tool
# NOTE - false DOES NOT MEAN it is positive, it could be neutral
def is_string_negative( s ):
pass
# check a lookup table of
# TODO - building lookup tables on the fly is something we need to do
# RETURN THE NUMBER OR WORD FALSE
def is_string_number( s ):
# TODO - check if NLTK can do this
pass
def is_math_operator():
# TODO - check if NLTK can do this
pass
## ---------------------------- NLP FILE UTILS -----------------------------------
# TODO - may get rid of this lookup and have root words as delegators
def hasParams( self, path, word ):
"""
check if parameters True
"""
try:
#TODO - shoud just check if folder has param folder
import Program
program = Program.Program( path, word );
canHasParams = program.meta.get_property( 'rules', 'parameters' );
return canHasParams
except:
print "no meta or param found"
return False # force false if passing a non command. TODO- BUT. we shouldn't be calling if the case.
def has_path( self, path_to_directory ):
import os.path
return os.path.isdir(path_to_directory)
def has_command(self, path_to_py_file):
import os.path
return os.path.isfile(path_to_py_file)
class NLG(object):
"""
NLG - generates sentences in the natural language
at moment just logs strings to output.
from now on all output should come through here
"""
owner=None
def __init__(self,owner):
self.owner=owner
def say( self, words ):
"""
output helps distinguish pypals when in the console
"""
print "%s : %s" % ( self.owner.o['name'], words )
return
# TODO - setup python logger
# TODO - pass ref to pypal?
# TODO - logs should write to a file and be accessible by events. i.e. evt12345 - created variable xxx
def log( self, words ):
"""
log differs to the 'say' method.
log should be more about debugging.
say should be user comms
"""
return # NOTE <<<<<<<<<<<<<<<<<<<<<< im not running
# TOOD - if debug is true
import logging
logging.warning( "------------------------------------- %s : %s" % ( self.owner.o['name'], words ) )
return
| gpl-2.0 | 2,327,574,827,839,849,000 | 27.259829 | 163 | 0.67578 | false | 3.362897 | false | false | false |
gfyoung/numpy | numpy/lib/twodim_base.py | 2 | 27180 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
import functools
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core import overrides
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
return (x, y, bins, weights)
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause | -2,810,006,162,861,620,700 | 26.289157 | 79 | 0.551214 | false | 3.448363 | false | false | false |
greyshell/Pen-Test | leetcode/factorial.py | 1 | 1129 | #!/usr/bin/python
# author: greyshell
"""
[+] problem description
=======================
find the factorial of a number
1) recursive two_sum
2) tail recursive two_sum
[+] reference
=============
TBD
"""
def tail_recursion_driver(n):
"""
tail recursive two_sum
:param n: int
:return: int
"""
return factorial_tail_recursion(n, 1) # 1 is used to start the first accumulation
def factorial_tail_recursion(n, a):
"""
better than normal recursion as it could be optimized by the compiler by not saving the current stack frame
:param n: int
:param a: int => it accumulates the result
:return: int
"""
if n == 1 or n == 0:
return a # it carries the final result
else:
return factorial_tail_recursion(n - 1, n * a)
def factorial(n):
"""
normal recursive two_sum
:return: int
"""
if n == 1 or n == 0: # base case for n = 0, 1
return 1
else: # recursive case when n > 1
return n * factorial(n - 1)
def main():
print tail_recursion_driver(12)
print factorial(0)
if __name__ == '__main__':
main()
| mit | 5,056,837,605,008,551,000 | 18.135593 | 111 | 0.581045 | false | 3.528125 | false | false | false |
timothyclemansinsea/smc | src/k8s/smc-hub/control.py | 1 | 9152 | #!/usr/bin/env python3
"""
Hub management script
"""
import os, shutil, sys, tempfile
join = os.path.join
# Boilerplate to ensure we are in the directory fo this path and make the util module available.
SCRIPT_PATH = os.path.split(os.path.realpath(__file__))[0]
sys.path.insert(0, os.path.abspath(os.path.join(SCRIPT_PATH, '..', 'util')))
os.chdir(SCRIPT_PATH)
import util
# For now in all cases, we just call the container the following; really it should
# maybe be smc-webapp-static#sha1hash, which makes switching between versions easy, etc.
NAME='smc-hub'
SECRETS = os.path.abspath(join(SCRIPT_PATH, '..', '..', 'data', 'secrets'))
def build(tag, rebuild, upgrade=False, commit=None):
"""
Build Docker container by installing and building everything inside the container itself, and
NOT using ../../static/ on host.
"""
# First build smc-hub-base, which is generic install of ubuntu packages, so we should rarely
# clear the cache for this.
v = ['sudo', 'docker', 'build', '-t', '{name}-base'.format(name=NAME)]
if upgrade:
v.append("--no-cache")
v.append(".")
util.run(v, path=join(SCRIPT_PATH, 'image-base'))
# Next build smc-hub, which depends on smc-hub-base.
v = ['sudo', 'docker', 'build', '-t', tag]
if commit:
v.append("--build-arg")
v.append("commit={commit}".format(commit=commit))
if rebuild: # will cause a git pull to happen
v.append("--no-cache")
v.append('.')
util.run(v, path=join(SCRIPT_PATH,'image'))
def build_docker(args):
if args.commit:
args.tag += ('-' if args.tag else '') + args.commit[:6]
tag = util.get_tag(args, NAME)
build(tag, args.rebuild, args.upgrade, args.commit)
if not args.local:
util.gcloud_docker_push(tag)
def run_on_kubernetes(args):
if args.test:
rethink_cpu_request = hub_cpu_request = '10m'
rethink_memory_request = hub_memory_request = '200Mi'
else:
hub_cpu_request = '300m'
hub_memory_request = '1Gi'
rethink_cpu_request = '300m'
rethink_memory_request = '1Gi'
util.ensure_secret_exists('sendgrid-api-key', 'sendgrid')
util.ensure_secret_exists('zendesk-api-key', 'zendesk')
args.local = False # so tag is for gcloud
if args.replicas is None:
args.replicas = util.get_desired_replicas(NAME, 2)
tag = util.get_tag(args, NAME, build)
opts = {
'image_hub' : tag,
'replicas' : args.replicas,
'pull_policy' : util.pull_policy(args),
'min_read_seconds' : args.gentle,
'smc_db_hosts' : args.database_nodes,
'smc_db_pool' : args.database_pool_size,
'smc_db_concurrent_warn' : args.database_concurrent_warn,
'hub_cpu_request' : hub_cpu_request,
'hub_memory_request' : hub_memory_request,
'rethink_cpu_request' : rethink_cpu_request,
'rethink_memory_request' : rethink_memory_request
}
if args.database_nodes == 'localhost':
from argparse import Namespace
ns = Namespace(tag=args.rethinkdb_proxy_tag, local=False)
opts['image_rethinkdb_proxy'] = util.get_tag(ns, 'rethinkdb-proxy', build)
filename = 'smc-hub-rethinkdb-proxy.template.yaml'
else:
filename = '{name}.template.yaml'.format(name=NAME)
t = open(join('conf', filename)).read()
with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
r = t.format(**opts)
#print(r)
tmp.write(r)
tmp.flush()
util.update_deployment(tmp.name)
if NAME not in util.get_services():
util.run(['kubectl', 'expose', 'deployment', NAME])
def stop_on_kubernetes(args):
util.stop_deployment(NAME)
def load_secret(name, args):
path = args.path
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
raise RuntimeError("path='{path}' must be a directory".format(path=path))
file = join(path, name)
if not os.path.exists(file):
raise RuntimeError("'{file}' must exist".format(file=file))
util.create_secret(name+'-api-key', file)
def status(args):
# Get all pod names
v = util.get_pods(run=NAME)
print("Getting last %s lines of logs from %s pods"%(args.tail, len(v)))
for x in v:
lg = util.get_logs(x['NAME'], tail=args.tail, container='smc-hub').splitlines()
blocked = concurrent = 0
for w in lg:
if 'BLOCKED for' in w: # 2016-07-07T17:39:23.159Z - debug: BLOCKED for 1925ms
b = int(w.split()[-1][:-2])
blocked = max(blocked, b)
if 'concurrent]' in w: # 2016-07-07T17:41:16.226Z - debug: [1 concurrent] ...
concurrent = max(concurrent, int(w.split()[3][1:]))
x['blocked'] = blocked
x['concurrent'] = concurrent
bad = util.run("kubectl describe pod {name} |grep Unhealthy |tail -1 ".format(name=x['NAME']), get_output=True, verbose=False).splitlines()
if len(bad) > 0:
x['unhealthy'] = bad[-1].split()[0]
else:
x['unhealthy'] = ''
print("%-30s%-12s%-12s%-12s%-12s%-12s"%('NAME', 'CONCURRENT', 'BLOCKED', 'UNHEALTHY', 'RESTARTS', 'AGE'))
for x in v:
print("%-30s%-12s%-12s%-12s%-12s%-12s"%(x['NAME'], x['concurrent'], x['blocked'], x['unhealthy'], x['RESTARTS'], x['AGE']))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Control deployment of {name}'.format(name=NAME))
subparsers = parser.add_subparsers(help='sub-command help')
sub = subparsers.add_parser('build', help='build docker image')
sub.add_argument("-t", "--tag", default="", help="tag for this build")
sub.add_argument("-c", "--commit", default='',
help="build a particular sha1 commit; the commit is automatically appended to the tag")
sub.add_argument("-r", "--rebuild", action="store_true",
help="re-pull latest hub source code from git and install any dependencies")
sub.add_argument("-u", "--upgrade", action="store_true",
help="re-install the base Ubuntu packages")
sub.add_argument("-l", "--local", action="store_true",
help="only build the image locally; don't push it to gcloud docker repo")
sub.set_defaults(func=build_docker)
sub = subparsers.add_parser('run', help='create/update {name} deployment on the currently selected kubernetes cluster'.format(name=NAME))
sub.add_argument("-t", "--tag", default="", help="tag of the image to run")
sub.add_argument("-r", "--replicas", default=None, help="number of replicas")
sub.add_argument("-f", "--force", action="store_true", help="force reload image in k8s")
sub.add_argument("-g", "--gentle", default=30, type=int,
help="how gentle to be in doing the rolling update; in particular, will wait about this many seconds after each pod starts up (default: 30)")
sub.add_argument("-d", "--database-nodes", default='localhost', type=str, help="database to connect to. If 'localhost' (the default), will run a local rethindkb proxy that is itself pointed at the rethinkdb-cluster service; if 'rethinkdb-proxy' will use that service.")
sub.add_argument("-p", "--database-pool-size", default=50, type=int, help="size of database connection pool")
sub.add_argument("--database-concurrent-warn", default=300, type=int, help="if this many concurrent queries for sustained time, kill container")
sub.add_argument("--rethinkdb-proxy-tag", default="", help="tag of rethinkdb-proxy image to run")
sub.add_argument("--test", action="store_true", help="using for testing so make very minimal resource requirements")
sub.set_defaults(func=run_on_kubernetes)
sub = subparsers.add_parser('delete', help='delete the deployment')
sub.set_defaults(func=stop_on_kubernetes)
sub = subparsers.add_parser('load-sendgrid', help='load the sendgrid password into k8s from disk',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
sub.add_argument('path', type=str, help='path to directory that contains the password in a file named "sendgrid"')
sub.set_defaults(func=lambda args: load_secret('sendgrid',args))
sub = subparsers.add_parser('load-zendesk', help='load the zendesk password into k8s from disk',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
sub.add_argument('path', type=str, help='path to directory that contains the password in a file named "zendesk"')
sub.set_defaults(func=lambda args: load_secret('zendesk',args))
util.add_deployment_parsers(NAME, subparsers, default_container='smc-hub')
sub = subparsers.add_parser('status', help='display status info about concurrent and blocked, based on recent logs')
sub.add_argument("-t", "--tail", default=100, type=int, help="how far back to go in log")
sub.set_defaults(func=status)
args = parser.parse_args()
if hasattr(args, 'func'):
args.func(args)
| gpl-3.0 | -8,445,299,926,647,956,000 | 46.419689 | 275 | 0.633086 | false | 3.521354 | false | false | false |
Yuecai/com-yuecai-dream | src/nodelay/forms.py | 1 | 9690 | # coding=utf-8
#########################################################################
# File Name: forms.py
# Original Author: 段凯强
# Mail: [email protected]
# Created Time: 2013-12-26
# Update:
#########################################################################
#########################################################################
# Copyright (c) 2013~2014 by 段凯强
# Reand the file "license" distributed with these sources, or XXXX
# XXXXXXXXXXXXXXXXXX switch for additional information, such as how
# to use, copy, modify, sell and/or distribute this software and its
# documentation any purpose anyway.
#########################################################################
import datetime, time
import re
from django import forms
class BasicTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
title = forms.CharField()
content = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Basic':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_title(self):
pattern = re.compile(u'^[a-zA-Z0-9\u4e00-\u9fa5]+$')
title = self.cleaned_data['title']
l = len(title)
if l >= 1 and l <= 10 and pattern.match(title):
return title
raise forms.ValidationError('title_err')
def clean_content(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
content = self.cleaned_data['content']
l = len(content)
if l >= 1 and l <= 100 and pattern.match(content) and not pattern_blank.match(content):
return content
raise forms.ValidationError('content_err')
class BookTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
bookName = forms.CharField()
readFrom = forms.CharField()
readTo = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Book':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_bookName(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
bookName = self.cleaned_data['bookName']
l = len(bookName)
if l >= 1 and l <= 50 and pattern.match(bookName) and not pattern_blank.match(bookName):
return bookName
raise forms.ValidationError('bookName_err')
def clean_readFrom(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
readFrom = self.cleaned_data['readFrom']
l = len(readFrom)
if l >= 1 and l <= 50 and pattern.match(readFrom) and not pattern_blank.match(readFrom):
return readFrom
raise forms.ValidationError('readFrom_err')
def clean_readTo(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
readTo = self.cleaned_data['readTo']
l = len(readTo)
if l >= 1 and l <= 50 and pattern.match(readTo) and not pattern_blank.match(readTo):
return readTo
raise forms.ValidationError('readTo_err')
class WorkTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
summary = forms.CharField()
goal = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Work':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_summary(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
summary = self.cleaned_data['summary']
l = len(summary)
if l >= 1 and l <= 50 and pattern.match(summary) and not pattern_blank.match(summary):
return summary
raise forms.ValidationError('summary')
def clean_goal(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
goal = self.cleaned_data['goal']
l = len(goal)
if l >= 1 and l <= 50 and pattern.match(goal) and not pattern_blank.match(goal):
return goal
raise forms.ValidationError('goal_err')
class HomeworkTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
courseName = forms.CharField()
introduction = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Homework':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_courseName(self):
pattern = re.compile(u'^[a-zA-Z0-9\u4e00-\u9fa5]+$')
courseName = self.cleaned_data['courseName']
l = len(courseName)
if l >= 1 and l <= 10 and pattern.match(courseName):
return courseName
raise forms.ValidationError('courseName_err')
def clean_introduction(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
introduction = self.cleaned_data['introduction']
l = len(introduction)
if l >= 1 and l <= 100 and pattern.match(introduction) and not pattern_blank.match(introduction):
return introduction
raise forms.ValidationError('introduction_err')
class TaskIdForm(forms.Form):
taskId = forms.IntegerField()
def clean_taskId(self):
taskId = self.cleaned_data['taskId']
if taskId > 0:
return taskId
raise forms.ValidationError('taskId_err')
class ChangeDateForm(forms.Form):
taskId = forms.IntegerField()
date = forms.DateField()
time = forms.IntegerField()
def clean_taskId(self):
taskId = self.cleaned_data['taskId']
if taskId > 0:
return taskId
raise forms.ValidationError('taskId_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
class ExchangeTaskForm(forms.Form):
taskId1 = forms.IntegerField()
taskId2 = forms.IntegerField()
def clean_taskId1(self):
taskId1 = self.cleaned_data['taskId1']
if taskId1 > 0:
return taskId1
raise forms.ValidationError('taskId1_err')
def clean_taskId2(self):
taskId2 = self.cleaned_data['taskId2']
if taskId2 > 0:
return taskId2
raise forms.ValidationError('taskId2_err')
class DelayShiftTaskForm(forms.Form):
fromId = forms.IntegerField()
toId = forms.IntegerField()
def clean_fromId(self):
fromId = self.cleaned_data['fromId']
if fromId > 0:
return fromId
raise forms.ValidationError('fromId_err')
def clean_toId(self):
toId = self.cleaned_data['toId']
if toId > 0:
return toId
raise forms.ValidationError('toId_err')
| bsd-3-clause | 8,940,460,477,779,460,000 | 33.025362 | 105 | 0.577787 | false | 3.504104 | false | false | false |
quantopian/zipline | zipline/data/in_memory_daily_bars.py | 1 | 5363 | from six import iteritems
import numpy as np
import pandas as pd
from pandas import NaT
from trading_calendars import TradingCalendar
from zipline.data.bar_reader import OHLCV, NoDataOnDate, NoDataForSid
from zipline.data.session_bars import CurrencyAwareSessionBarReader
from zipline.utils.input_validation import expect_types, validate_keys
from zipline.utils.pandas_utils import check_indexes_all_same
class InMemoryDailyBarReader(CurrencyAwareSessionBarReader):
"""
A SessionBarReader backed by a dictionary of in-memory DataFrames.
Parameters
----------
frames : dict[str -> pd.DataFrame]
Dictionary from field name ("open", "high", "low", "close", or
"volume") to DataFrame containing data for that field.
calendar : str or trading_calendars.TradingCalendar
Calendar (or name of calendar) to which data is aligned.
currency_codes : pd.Series
Map from sid -> listing currency for that sid.
verify_indices : bool, optional
Whether or not to verify that input data is correctly aligned to the
given calendar. Default is True.
"""
@expect_types(
frames=dict,
calendar=TradingCalendar,
verify_indices=bool,
currency_codes=pd.Series,
)
def __init__(self,
frames,
calendar,
currency_codes,
verify_indices=True):
self._frames = frames
self._values = {key: frame.values for key, frame in iteritems(frames)}
self._calendar = calendar
self._currency_codes = currency_codes
validate_keys(frames, set(OHLCV), type(self).__name__)
if verify_indices:
verify_frames_aligned(list(frames.values()), calendar)
self._sessions = frames['close'].index
self._sids = frames['close'].columns
@classmethod
def from_panel(cls, panel, calendar, currency_codes):
"""Helper for construction from a pandas.Panel.
"""
return cls(dict(panel.iteritems()), calendar, currency_codes)
@property
def last_available_dt(self):
return self._calendar[-1]
@property
def trading_calendar(self):
return self._calendar
@property
def sessions(self):
return self._sessions
def load_raw_arrays(self, columns, start_dt, end_dt, assets):
if start_dt not in self._sessions:
raise NoDataOnDate(start_dt)
if end_dt not in self._sessions:
raise NoDataOnDate(end_dt)
asset_indexer = self._sids.get_indexer(assets)
if -1 in asset_indexer:
bad_assets = assets[asset_indexer == -1]
raise NoDataForSid(bad_assets)
date_indexer = self._sessions.slice_indexer(start_dt, end_dt)
out = []
for c in columns:
out.append(self._values[c][date_indexer, asset_indexer])
return out
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
field : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
return self.frames[field].loc[dt, sid]
def get_last_traded_dt(self, asset, dt):
"""
Parameters
----------
asset : zipline.asset.Asset
The asset identifier.
dt : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
pd.Timestamp : The last know dt for the asset and dt;
NaT if no trade is found before the given dt.
"""
try:
return self.frames['close'].loc[:, asset.sid].last_valid_index()
except IndexError:
return NaT
@property
def first_trading_day(self):
return self._sessions[0]
def currency_codes(self, sids):
codes = self._currency_codes
return np.array([codes[sid] for sid in sids])
def verify_frames_aligned(frames, calendar):
"""
Verify that DataFrames in ``frames`` have the same indexing scheme and are
aligned to ``calendar``.
Parameters
----------
frames : list[pd.DataFrame]
calendar : trading_calendars.TradingCalendar
Raises
------
ValueError
If frames have different indexes/columns, or if frame indexes do not
match a contiguous region of ``calendar``.
"""
indexes = [f.index for f in frames]
check_indexes_all_same(indexes, message="DataFrame indexes don't match:")
columns = [f.columns for f in frames]
check_indexes_all_same(columns, message="DataFrame columns don't match:")
start, end = indexes[0][[0, -1]]
cal_sessions = calendar.sessions_in_range(start, end)
check_indexes_all_same(
[indexes[0], cal_sessions],
"DataFrame index doesn't match {} calendar:".format(calendar.name),
)
| apache-2.0 | -1,749,134,559,772,530,400 | 30.547059 | 78 | 0.611039 | false | 4.222835 | false | false | false |
Subsets and Splits