repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
harmsm/uhbd | previous_releases/0.5.0/single_file.py | 1 | 1800 | """
single_file.py:
A script that runs a UHBD calculation on a single file.
"""
"""
Version notes:
0.4: 060113
0.4.1: 060403
Hokiness fix. Changed from some_path = x + os.sep + y to os.path.join(x,y)
"""
__author__ = "Michael J. Harms"
__date__ = "060403"
__version__ = "0.4.1"
# USER INPUTS
pH_start = 0
pH_stop = 16
pH_interval = 0.25
ionic_strength = 0.1
dielectric = 20
import initialize
import uhbd
import os
import sys
import argParser
def main(filename,output_path,pH_start,pH_stop,pH_interval,ionic_strength,dielectric):
filename = os.path.join(initialize.invocation_path,filename)
# Create output directory (if invoked from command line)
if __name__ == "__main__":
try:
os.mkdir(os.path.join(initialize.invocation_path,output_path))
except OSError, value:
# Don't stop if we are only overwriting existing directory
if value[0] != 17:
print 'File error.'
print value[0], output_path, value[1]
sys.exit()
# Perform UHBD run
uhbd.main(filename,pH_start,pH_stop,pH_interval,ionic_strength,dielectric)
uhbd.copyFinalOutput(os.path.join(initialize.invocation_path,output_path))
uhbd.runCleanup()
# If this is invoked from the command line, run the main function
if __name__ == "__main__":
# Grab command line options
required, optional = argParser.main(sys.argv,["pdb_file","output_dir"],
["inpfile","outdir"],
["dielectric","ionic_strength","pHtitr"])
main(required["pdb_file"],required["output_dir"],optional.pHtitr[0],
optional.pHtitr[1],optional.pHtitr[2],optional.ionic_strength,
optional.dielectric)
| unlicense | 3,873,800,045,858,526,700 | 27.125 | 86 | 0.612222 | false |
jiajiax/crosswalk-test-suite | cordova/cordova-feature-android-tests/feature/mobilespec_close.py | 1 | 2244 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Lin, Wanming <[email protected]>
import unittest
import os
import commands
import comm
import time
class TestMobileSpecAppFunctions(unittest.TestCase):
def test_close(self):
comm.setUp()
app_name = "mobilespec"
pkg_name = "org.apache." + app_name.lower()
if not comm.check_app_installed(pkg_name, self):
comm.app_install(app_name, pkg_name, self)
if not comm.check_app_launched(pkg_name, self):
print "Close app ---------------->%s App haven't launched, need to launch it!" % app_name
comm.app_launch(app_name, pkg_name, self)
time.sleep(1)
comm.app_stop(pkg_name, self)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,241,130,745,474,430,000 | 41.339623 | 101 | 0.723262 | false |
mattboyer/sqbrite | setup.py | 1 | 1984 | from setuptools.command.sdist import sdist as SetuptoolsSdist
from setuptools import setup, find_packages
import os
import shutil
import version
from src import PROJECT_NAME, PROJECT_DESCRIPTION, README_PATH
class SdistAndClean(SetuptoolsSdist):
'''
Runs the default setuptools sdist command and then cleans the egg info
directory.
'''
def run(self):
SetuptoolsSdist.run(self)
# FIXME This works, but there *has* to be a cleaner way
for distfile in self.filelist.files:
if distfile.endswith('PKG-INFO'):
egginfo_dir = os.path.dirname(distfile)
shutil.rmtree(egginfo_dir)
def package_names():
return [PROJECT_NAME] + \
[PROJECT_NAME + '.' + package for package in find_packages('src')]
long_description = None
with open(README_PATH, 'r') as readme:
long_description = readme.read()
setup(
cmdclass={
'sdist': SdistAndClean,
},
name=PROJECT_NAME,
version=version.get_git_version(),
url='https://github.com/mattboyer/sqbrite',
description=PROJECT_DESCRIPTION,
long_description=long_description or PROJECT_DESCRIPTION,
author='Matt Boyer',
author_email='[email protected]',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Database',
'Topic :: System :: Recovery Tools',
],
packages=package_names(),
# Packaging data files in Python is a complete shitshow
# We need this *AND* an "include" line in MANIFEST.IN
include_package_data=True,
package_dir={PROJECT_NAME: 'src'},
install_requires=[
'pyxdg',
'pyyaml',
],
entry_points={
'console_scripts': [
PROJECT_NAME+'='+PROJECT_NAME+'.sqlite_recover:main',
],
},
)
| mit | -8,243,067,687,102,157,000 | 28.176471 | 74 | 0.632056 | false |
kubeflow/testing | py/kubeflow/testing/create_kf_instance.py | 1 | 10831 | """Create a Kubeflow instance.
The purpose of this script is to automate the creation of Kubeflow Deployments
corresponding to different versions of Kubeflow.
TODO: This script is obsolete; we should get rid of it in favor of
create_unique_kf_instance.py.
"""
import argparse
import logging
import json
import os
import re
import requests
import retrying
import shutil
import subprocess
import tempfile
import yaml
from googleapiclient import discovery
from google.cloud import storage
from kubeflow.testing import util
from retrying import retry
from oauth2client.client import GoogleCredentials
@retry(wait_fixed=60000, stop_max_attempt_number=5)
def run_with_retry(*args, **kwargs):
util.run(*args, **kwargs)
def delete_storage_deployment(project, name):
credentials = GoogleCredentials.get_application_default()
dm = discovery.build("deploymentmanager", "v2", credentials=credentials)
deployments_client = dm.deployments()
try:
op = deployments_client.delete(project=project, deployment=name,
deletePolicy="DELETE").execute()
except Exception as e:
if hasattr(e, 'content'):
m = json.loads(e.content)
if m.get("error", {}).get("code") == 404:
return
raise
raise
util.wait_for_gcp_operation(dm.operations(), project, None, op["name"])
def create_info_file(args, app_dir, git_describe):
"""Creates an info file in the KF app directory."""
# This step needs to be called after kfctl init because the directory needs to
# exist.
with open(os.path.join(app_dir, "kf_app.yaml"), "w") as hf:
app = {
"labels": {
"GIT_LABEL": git_describe,
"PURPOSE": "kf-test-cluster",
},
}
if args.job_name:
app["labels"]["DEPLOYMENT_JOB"] = args.job_name
yaml.dump(app, hf)
def build_kfctl_go(args):
"""Build kfctl go."""
build_dir = os.path.join(args.kubeflow_repo, "bootstrap")
# We need to use retry builds because when building in the test cluster
# we see intermittent failures pulling dependencies
util.run(["make", "build-kfctl"], cwd=build_dir)
kfctl_path = os.path.join(build_dir, "bin", "kfctl")
return kfctl_path
def deploy_with_kfctl_go(kfctl_path, args, app_dir, env):
"""Deploy Kubeflow using kfctl go binary."""
# username and password are passed as env vars and won't appear in the logs
#
# TODO(https://github.com/kubeflow/kubeflow/issues/2831): We should be
# loading the config in the repo we have checked out kfctl doesn't support
# specifying a file URI. Once it does we should change --version to
# use it.
#
# TODO(zhenghuiwang): use the master of kubeflow/manifests once
# https://github.com/kubeflow/kubeflow/issues/3475 is fixed.
logging.warning("Loading configs %s.", args.kfctl_config)
if args.kfctl_config.startswith("http"):
response = requests.get(args.kfctl_config)
raw_config = response.content
else:
with open(args.kfctl_config) as hf:
raw_config = hf.read()
config_spec = yaml.load(raw_config)
# We need to specify a valid email because
# 1. We need to create appropriate RBAC rules to allow the current user
# to create the required K8s resources.
# 2. Setting the IAM policy will fail if the email is invalid.
email = util.run(["gcloud", "config", "get-value", "account"])
if not email:
raise ValueError("Could not determine GCP account being used.")
config_spec["spec"]["project"] = args.project
config_spec["spec"]["email"] = email
config_spec["spec"]["zone"] = args.zone
config_spec["spec"] = util.filter_spartakus(config_spec["spec"])
logging.info("KFDefSpec:\n%s", str(config_spec))
with tempfile.NamedTemporaryFile(suffix=".yaml", delete=False) as f:
config_file = f.name
logging.info("Writing file %s", f.name)
yaml.dump(config_spec, f)
util.run([kfctl_path, "init", app_dir, "-V", "--config=" + config_file],
env=env)
util.run([kfctl_path, "generate", "-V", "all"], env=env, cwd=app_dir)
util.run([kfctl_path, "apply", "-V", "all"], env=env, cwd=app_dir)
def main(): # pylint: disable=too-many-locals,too-many-statements
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
"--project", default="kubeflow-ci-deployment", type=str,
help=("The project."))
parser.add_argument(
"--zone", default="us-east1-d", type=str, help=("The zone to deploy in."))
parser.add_argument(
"--oauth_file",
default=("gs://kubeflow-ci-deployment_kf-data/"
"kf-iap-oauth.kubeflow-ci-deployment.yaml"),
type=str, help=("The file containing the OAuth client ID & secret"
"for IAP."))
parser.add_argument(
"--kubeflow_repo",
default="/home/jlewi/git_kubeflow",
type=str, help=("Path to the Kubeflow repo to use"))
parser.add_argument(
"--kfctl_config",
default=("https://raw.githubusercontent.com/kubeflow/kubeflow/master"
"/bootstrap/config/kfctl_gcp_iap.yaml"),
type=str, help=("Path to the kfctl config to use"))
parser.add_argument(
"--apps_dir",
default=os.getcwd(),
type=str, help=("Directory to store kubeflow apps."))
parser.add_argument(
"--name", type=str, default="", help=("Name for the deployment."))
parser.add_argument(
"--snapshot_file",
default="", type=str, help=("A json file containing information about the "
"snapshot to use."))
parser.add_argument(
"--job_name",
default="", type=str, help=("Pod name running the job."))
args = parser.parse_args()
bucket, blob_path = util.split_gcs_uri(args.oauth_file)
client = storage.Client(project=args.project)
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(blob_path)
contents = blob.download_as_string()
oauth_info = yaml.load(contents)
git_describe = util.run(["git", "describe", "--tags", "--always", "--dirty"],
cwd=args.kubeflow_repo).strip("'")
if args.snapshot_file:
logging.info("Loading info from snapshot file %s", args.snapshot_file)
with open(args.snapshot_file) as hf:
snapshot_info = json.load(hf)
name = snapshot_info["name"]
else:
name = args.name
kfctl_path = build_kfctl_go(args)
app_dir = os.path.join(args.apps_dir, name)
# Clean up previous deployment. We attempt to run "kfctl delete all"
# but we don't depend on it succeeding because the app directory might
# not be up to date.
# since we are not able to guarantee apps config in repository is up to date.
if os.path.exists(app_dir):
try:
util.run([kfctl_path, "delete", "all", "--delete_storage"], cwd=app_dir)
except subprocess.CalledProcessError as e:
logging.error("kfctl delete all failed; %s", e)
if os.path.exists(app_dir):
shutil.rmtree(app_dir)
if not os.path.exists(args.apps_dir):
os.makedirs(args.apps_dir)
# Delete deployment beforehand. If not, updating action might be failed when
# resource permission/requirement is changed. It's cleaner to delete and
# re-create it.
delete_deployment = os.path.join(args.kubeflow_repo, "scripts", "gke",
"delete_deployment.sh")
util.run([delete_deployment, "--project=" + args.project,
"--deployment=" + name, "--zone=" + args.zone], cwd=args.apps_dir)
# Delete script doesn't delete storage deployment by design.
delete_storage_deployment(args.project, name + "-storage")
env = {}
env.update(os.environ)
env.update(oauth_info)
labels = {
"GIT_LABEL": git_describe,
"PURPOSE": "kf-test-cluster",
}
label_args = []
for k, v in labels.items():
# labels can only take as input alphanumeric characters, hyphens, and
# underscores. Replace not valid characters with hyphens.
val = v.lower().replace("\"", "")
val = re.sub(r"[^a-z0-9\-_]", "-", val)
label_args.append("{key}={val}".format(key=k.lower(), val=val))
endpoint = "{name}.endpoints.{project}.cloud.goog".format(
name=name,
project=args.project)
# Fire-and-forgot process to undelete endpoint services. Deletion to
# endpoint service is soft-deletion, e.g. it will be purged after 30
# days. If any deployments is trying to re-use the same endpoint, it
# will be an error if it's in soft-deletion. Need to undelete it so
# that endpoint-controller could complete its job.
try:
util.run(["gcloud", "endpoints", "services", "undelete", endpoint,
"--verbosity=info", "--project="+args.project])
except subprocess.CalledProcessError as e:
logging.info("endpoint undeletion is failed: %s", e)
deploy_with_kfctl_go(kfctl_path, args, app_dir, env)
create_info_file(args, app_dir, git_describe)
logging.info("Annotating cluster with labels: %s", str(label_args))
# Set labels on the deployment
util.run(["gcloud", "--project", args.project,
"deployment-manager", "deployments", "update", name,
"--update-labels", ",".join(label_args)],
cwd=app_dir)
# Set labels on the cluster. Labels on the deployment is not shown on
# Pantheon - it's easier for users to read if cluster also has labels.
util.run(["gcloud", "container", "clusters", "update", name,
"--project", args.project,
"--zone", args.zone,
"--update-labels", ",".join(label_args)],
cwd=app_dir)
# To work around lets-encrypt certificate uses create a self-signed
# certificate
kubeflow_branch = None
for repo in snapshot_info["repos"]:
if repo["repo"] == "kubeflow":
kubeflow_branch = repo["branch"]
logging.info("kubeflow branch %s", kubeflow_branch)
if kubeflow_branch == "v0.6-branch":
logging.info("Creating a self signed certificate")
util.run(["kubectl", "config", "use-context", name])
tls_endpoint = "--host={0}.endpoints.{1}.cloud.goog".format(
name, args.project)
cert_dir = tempfile.mkdtemp()
util.run(["kube-rsa", tls_endpoint], cwd=cert_dir)
util.run(["kubectl", "-n", "kubeflow", "create", "secret", "tls",
"envoy-ingress-tls", "--cert=ca.pem", "--key=ca-key.pem"],
cwd=cert_dir)
shutil.rmtree(cert_dir)
else:
# starting with 0.7 we are moving to managed GKE certificates.
# So we can't just generate a self-signed certificate
# TODO(jlewi): If we still hit lets-encrypt quota issues then
# we can fix this by generating new hostnames
logging.info("Not creating a self signed certificate")
if __name__ == "__main__":
main()
| apache-2.0 | -987,374,734,785,568,400 | 33.714744 | 80 | 0.654049 | false |
wmizzi/tn2capstone | lib/jsonprocesser.py | 1 | 8657 | import json
import uuid
import datetime
import os
import socket
class jsonprocesser:
def __init__(self):
self.client_mac = str(hex(uuid.getnode()))
self.filestamp = datetime.datetime.now().strftime("%H-%M_%d-%m-%y")
self.timestamp = str(datetime.datetime.utcnow())
print self.timestamp
#filename = client_mac + timestamp + '.json'
self.filename = os.path.abspath('results/' + self.client_mac + self.filestamp + '.json')
data = json.dumps({"UserInfo":{"user id":self.client_mac,"timestamp":self.timestamp,"ip":"null","lat":0,"lon":0},
"SpeedTests":{"TCP":{"upload":-1,"download":-1},
"UDP":{"download":{"4k":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1},
"1080p":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1},
"720p":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1},
"420p":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1}},
"upload":{"screensharing":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1},
"standard_video_calling":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1},
"hd_video_calling":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1}},
"2way":{"high_VOIP":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1,"latency":-1},
"low_VOIP":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1,"latency":-1},
"gaming":{"PLR":-1,"jitter_lat":-1,"jitter_iat":-1,"latency":-1}}}},
"TRACEROUTE":{}})
jsonFile = open(self.filename, "w+")
jsonFile.write(data)
print self.filename
def json_update_tcp(self, iperf_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["TCP"]["upload"] = iperf_results["tcp_upload"]
data["SpeedTests"]["TCP"]["download"] = iperf_results['tcp_download']
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_4k(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["download"]["4k"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["download"]["4k"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["download"]["4k"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_1080p(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["download"]["1080p"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["download"]["1080p"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["download"]["1080p"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_720p(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["download"]["720p"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["download"]["720p"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["download"]["720p"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_480p(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["download"]["420p"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["download"]["420p"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["download"]["420p"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_screensharing(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["upload"]["screensharing"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["upload"]["screensharing"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["upload"]["screensharing"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_standard_video_calling(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["upload"]["standard_video_calling"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["upload"]["standard_video_calling"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["upload"]["standard_video_calling"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_hd_video_calling(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["upload"]["hd_video_calling"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["upload"]["hd_video_calling"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["upload"]["hd_video_calling"]["jitter_iat"] = udp_results["jitter_iat"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_high_VOIP(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["2way"]["high_VOIP"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["2way"]["high_VOIP"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["2way"]["high_VOIP"]["jitter_iat"] = udp_results["jitter_iat"]
data["SpeedTests"]["UDP"]["2way"]["high_VOIP"]["latency"] = udp_results["latency"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_low_VOIP(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["2way"]["low_VOIP"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["2way"]["low_VOIP"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["2way"]["low_VOIP"]["jitter_iat"] = udp_results["jitter_iat"]
data["SpeedTests"]["UDP"]["2way"]["low_VOIP"]["latency"] = udp_results["latency"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_update_gaming(self, udp_results):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
data["SpeedTests"]["UDP"]["2way"]["gaming"]["PLR"] = udp_results["PLR"]
data["SpeedTests"]["UDP"]["2way"]["gaming"]["jitter_lat"] = udp_results["jitter_lat"]
data["SpeedTests"]["UDP"]["2way"]["gaming"]["jitter_iat"] = udp_results["jitter_iat"]
data["SpeedTests"]["UDP"]["2way"]["gaming"]["latency"] = udp_results["latency"]
jsonFile2 = open(self.filename, "w+")
jsonFile2.write(json.dumps(data))
def json_upload(self,server_ip,port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = open(self.filename,'rb')
l = f.read(1024)
print l
s.connect((server_ip,port))
while(l):
s.send(l)
l= f.read(1024)
f.close()
#s.shutdown(socket.SHUT_WR)
s.close
def print_json(self):
jsonFile = open(self.filename,"r+")
data = json.load(jsonFile)
print data
| bsd-2-clause | -3,928,381,272,662,818,000 | 40.826087 | 126 | 0.516807 | false |
Zerknechterer/pyload | module/plugins/hoster/RapidgatorNet.py | 1 | 5675 | # -*- coding: utf-8 -*-
import pycurl
import re
from module.common.json_layer import json_loads
from module.network.HTTPRequest import BadHeader
from module.plugins.internal.AdsCaptcha import AdsCaptcha
from module.plugins.internal.ReCaptcha import ReCaptcha
from module.plugins.internal.SolveMedia import SolveMedia
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class RapidgatorNet(SimpleHoster):
__name__ = "RapidgatorNet"
__type__ = "hoster"
__version__ = "0.34"
__pattern__ = r'http://(?:www\.)?(rapidgator\.net|rg\.to)/file/\w+'
__config__ = [("use_premium", "bool", "Use premium account if available", True)]
__description__ = """Rapidgator.net hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]"),
("chrox", None),
("stickell", "[email protected]"),
("Walter Purcaro", "[email protected]")]
API_URL = "http://rapidgator.net/api/file"
COOKIES = [("rapidgator.net", "lang", "en")]
NAME_PATTERN = r'<title>Download file (?P<N>.*)</title>'
SIZE_PATTERN = r'File size:\s*<strong>(?P<S>[\d.,]+) (?P<U>[\w^_]+)</strong>'
OFFLINE_PATTERN = r'>(File not found|Error 404)'
JSVARS_PATTERN = r'\s+var\s*(startTimerUrl|getDownloadUrl|captchaUrl|fid|secs)\s*=\s*\'?(.*?)\'?;'
PREMIUM_ONLY_PATTERN = r'You can download files up to|This file can be downloaded by premium only<'
ERROR_PATTERN = r'You have reached your (?:daily|hourly) downloads limit'
WAIT_PATTERN = r'(Delay between downloads must be not less than|Try again in).+'
LINK_FREE_PATTERN = r'return \'(http://\w+.rapidgator.net/.*)\';'
RECAPTCHA_PATTERN = r'"http://api\.recaptcha\.net/challenge\?k=(.*?)"'
ADSCAPTCHA_PATTERN = r'(http://api\.adscaptcha\.com/Get\.aspx[^"\']+)'
SOLVEMEDIA_PATTERN = r'http://api\.solvemedia\.com/papi/challenge\.script\?k=(.*?)"'
def setup(self):
if self.account:
self.sid = self.account.getAccountInfo(self.user).get('sid', None)
else:
self.sid = None
if self.sid:
self.premium = True
self.resumeDownload = self.multiDL = self.premium
self.chunkLimit = 1
def api_response(self, cmd):
try:
json = self.load('%s/%s' % (self.API_URL, cmd),
get={'sid': self.sid,
'url': self.pyfile.url}, decode=True)
self.logDebug("API:%s" % cmd, json, "SID: %s" % self.sid)
json = json_loads(json)
status = json['response_status']
msg = json['response_details']
except BadHeader, e:
self.logError("API: %s" % cmd, e, "SID: %s" % self.sid)
status = e.code
msg = e
if status == 200:
return json['response']
elif status == 423:
self.account.empty(self.user)
self.retry()
else:
self.account.relogin(self.user)
self.retry(wait_time=60)
def handlePremium(self, pyfile):
self.api_data = self.api_response('info')
self.api_data['md5'] = self.api_data['hash']
pyfile.name = self.api_data['filename']
pyfile.size = self.api_data['size']
self.link = self.api_response('download')['url']
def handleFree(self, pyfile):
jsvars = dict(re.findall(self.JSVARS_PATTERN, self.html))
self.logDebug(jsvars)
self.req.http.lastURL = pyfile.url
self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
url = "http://rapidgator.net%s?fid=%s" % (
jsvars.get('startTimerUrl', '/download/AjaxStartTimer'), jsvars['fid'])
jsvars.update(self.getJsonResponse(url))
self.wait(jsvars.get('secs', 45), False)
url = "http://rapidgator.net%s?sid=%s" % (
jsvars.get('getDownloadUrl', '/download/AjaxGetDownload'), jsvars['sid'])
jsvars.update(self.getJsonResponse(url))
self.req.http.lastURL = pyfile.url
self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With:"])
url = "http://rapidgator.net%s" % jsvars.get('captchaUrl', '/download/captcha')
self.html = self.load(url)
for _i in xrange(5):
m = re.search(self.LINK_FREE_PATTERN, self.html)
if m:
self.link = m.group(1)
break
else:
captcha = self.handleCaptcha()
if not captcha:
self.error(_("Captcha pattern not found"))
response, challenge = captcha.challenge()
self.html = self.load(url, post={'DownloadCaptchaForm[captcha]': "",
'adcopy_challenge' : challenge,
'adcopy_response' : response})
if "The verification code is incorrect" in self.html:
self.invalidCaptcha()
else:
self.correctCaptcha()
else:
self.error(_("Download link"))
def handleCaptcha(self):
for klass in (AdsCaptcha, ReCaptcha, SolveMedia):
inst = klass(self)
if inst.detect_key():
return inst
def getJsonResponse(self, url):
res = self.load(url, decode=True)
if not res.startswith('{'):
self.retry()
self.logDebug(url, res)
return json_loads(res)
getInfo = create_getInfo(RapidgatorNet)
| gpl-3.0 | 3,505,129,110,890,734,000 | 33.603659 | 103 | 0.555947 | false |
lwcook/horsetail-matching | horsetailmatching/weightedsum.py | 1 | 7051 | import pdb
import time
import math
import copy
import warnings
import numpy as np
from hm import HorsetailMatching
class WeightedSum(HorsetailMatching):
'''Class for using weighted sum of moments within an optimization.
The code is written such that all arguments that can be used at the
initialization of a WeightedSum object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Is a list of UncertainParameter objects, or a list of
functions that return samples of the each uncertainty.
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
'''
def __init__(self, fqoi, prob_uncertainties, jac=False, samples_prob=1000,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False,
w1=1, w2=1):
self.fqoi = fqoi
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = []
self.jac = jac
self.samples_prob = samples_prob
self.samples_int = 1
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
self.w1 = w1
self.w2 = w2
##############################################################################
## Public Methods
##############################################################################
def evalMetric(self, x, w1=None, w2=None):
'''Evaluates the weighted sum metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param float w1: value to weight the mean by
:param float w2: value to weight the std by
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
'''
if w1 is None:
w1 = self.w1
if w2 is None:
w2 = self.w2
if self.verbose:
print('----------')
print('At design: ' + str(x))
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose: print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
if self.verbose: print('Evaluating metric')
return self._evalWeightedSumMetric(q_samples, grad_samples)
##############################################################################
## Private methods ##
##############################################################################
def _evalWeightedSumMetric(self, q_samples, grad_samples=None):
fjs = np.array(q_samples).flatten()
M = self.samples_prob
mean = (1./M)*np.sum(fjs)
var = (1./M)*np.sum([(fj - mean)**2 for fj in fjs])
ws = self.w1*mean + self.w2*np.sqrt(var)
if grad_samples is None:
return ws
else:
ndv = grad_samples.shape[2]
gradjs = grad_samples[0, :, :]
gradient = np.zeros(ndv)
for kdv in range(ndv):
meang, varg = 0., 0.
for j, fj in enumerate(fjs):
meang += (1./M)*float(gradjs[j, kdv])
varg += (1./M)*2*(fj - mean)*float(gradjs[j, kdv])
gradient[kdv] = meang + 0.5*(var**-0.5)*varg
return ws, gradient
def getHorsetail(self):
return ([0], [0]), ([0], [0]), [([0], [0])]
## Private utility functions
#def _finDiff(fobj, dv, f0=None, eps=10**-6):
#
# if f0 is None:
# f0 = fobj(dv)
#
# fbase = copy.copy(f0)
# fnew = fobj(dv + eps)
# return float((fnew - fbase)/eps)
def _makeIter(x):
try:
iter(x)
return [xi for xi in x]
except:
return [x]
| mit | 8,754,911,680,858,841,000 | 34.791878 | 80 | 0.600624 | false |
kislyuk/tweak | docs/conf.py | 1 | 9203 | # -*- coding: utf-8 -*-
#
# Tweak documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 17 11:55:06 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tweak'
copyright = u'2015, Andrey Kislyuk'
author = u'Andrey Kislyuk'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tweakdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Tweak.tex', u'Tweak Documentation',
u'Andrey Kislyuk', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tweak', u'Tweak Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Tweak', u'Tweak Documentation',
author, 'Tweak', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 | 4,502,705,034,488,236,000 | 31.066202 | 79 | 0.706074 | false |
garthee/gnot | modules/pstemmer.py | 1 | 12097 | class PorterStemmer:
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j - 1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to
restore an e at the end of a short e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i < (self.k0 + 2) or not self.cons(i) or self.cons(i - 1) or not self.cons(i - 2):
return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k - length + 1:self.k + 1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s, readjusting k."""
length = len(s)
self.b = self.b[:self.j + 1] + s + self.b[self.j + length + 1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"):
self.setto("ate")
elif self.ends("bl"):
self.setto("ble")
elif self.ends("iz"):
self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in the stem."""
if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k + 1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"):
self.r("ate")
elif self.ends("tional"):
self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"):
self.r("ence")
elif self.ends("anci"):
self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"):
self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"):
self.r("al")
elif self.ends("entli"):
self.r("ent")
elif self.ends("eli"):
self.r("e")
elif self.ends("ousli"):
self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"):
self.r("ize")
elif self.ends("ation"):
self.r("ate")
elif self.ends("ator"):
self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"):
self.r("al")
elif self.ends("iveness"):
self.r("ive")
elif self.ends("fulness"):
self.r("ful")
elif self.ends("ousness"):
self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"):
self.r("al")
elif self.ends("iviti"):
self.r("ive")
elif self.ends("biliti"):
self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"): self.r("log")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"):
self.r("ic")
elif self.ends("ative"):
self.r("")
elif self.ends("alize"):
self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"):
self.r("ic")
elif self.ends("ful"):
self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"):
pass
else:
return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"):
pass
elif self.ends("ence"):
pass
else:
return
elif self.b[self.k - 1] == 'e':
if self.ends("er"):
pass
else:
return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"):
pass
else:
return
elif self.b[self.k - 1] == 'l':
if self.ends("able"):
pass
elif self.ends("ible"):
pass
else:
return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"):
pass
elif self.ends("ement"):
pass
elif self.ends("ment"):
pass
elif self.ends("ent"):
pass
else:
return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'):
pass
elif self.ends("ou"):
pass
# takes care of -ous
else:
return
elif self.b[self.k - 1] == 's':
if self.ends("ism"):
pass
else:
return
elif self.b[self.k - 1] == 't':
if self.ends("ate"):
pass
elif self.ends("iti"):
pass
else:
return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"):
pass
else:
return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"):
pass
else:
return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"):
pass
else:
return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k - 1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k - 1
def stem(self, p, i, j):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k + 1]
| mit | 3,138,866,654,744,761,300 | 31.783198 | 108 | 0.42432 | false |
eternalNight/ucore_app_go | misc/dashboard/godashboard/package.py | 1 | 14975 | # Copyright 2010 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This is the server part of the package dashboard.
# It must be run by App Engine.
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import datetime
import logging
import os
import re
import sets
import urllib2
# local imports
from auth import auth
import toutf8
import const
template.register_template_library('toutf8')
# Storage model for package info recorded on server.
class Package(db.Model):
path = db.StringProperty()
web_url = db.StringProperty() # derived from path
count = db.IntegerProperty() # grand total
week_count = db.IntegerProperty() # rolling weekly count
day_count = db.TextProperty(default='') # daily count
last_install = db.DateTimeProperty()
# data contributed by gobuilder
info = db.StringProperty()
ok = db.BooleanProperty()
last_ok = db.DateTimeProperty()
def get_day_count(self):
counts = {}
if not self.day_count:
return counts
for d in str(self.day_count).split('\n'):
date, count = d.split(' ')
counts[date] = int(count)
return counts
def set_day_count(self, count):
days = []
for day, count in count.items():
days.append('%s %d' % (day, count))
days.sort(reverse=True)
days = days[:28]
self.day_count = '\n'.join(days)
def inc(self):
count = self.get_day_count()
today = str(datetime.date.today())
count[today] = count.get(today, 0) + 1
self.set_day_count(count)
self.update_week_count(count)
self.count += 1
def update_week_count(self, count=None):
if count is None:
count = self.get_day_count()
total = 0
today = datetime.date.today()
for i in range(7):
day = str(today - datetime.timedelta(days=i))
if day in count:
total += count[day]
self.week_count = total
# PackageDaily kicks off the daily package maintenance cron job
# and serves the associated task queue.
class PackageDaily(webapp.RequestHandler):
def get(self):
# queue a task to update each package with a week_count > 0
keys = Package.all(keys_only=True).filter('week_count >', 0)
for key in keys:
taskqueue.add(url='/package/daily', params={'key': key.name()})
def post(self):
# update a single package (in a task queue)
def update(key):
p = Package.get_by_key_name(key)
if not p:
return
p.update_week_count()
p.put()
key = self.request.get('key')
if not key:
return
db.run_in_transaction(update, key)
class Project(db.Model):
name = db.StringProperty(indexed=True)
descr = db.StringProperty()
web_url = db.StringProperty()
package = db.ReferenceProperty(Package)
category = db.StringProperty(indexed=True)
tags = db.ListProperty(str)
approved = db.BooleanProperty(indexed=True)
re_bitbucket = re.compile(r'^(bitbucket\.org/[a-z0-9A-Z_.\-]+/[a-zA-Z0-9_.\-]+)(/[a-z0-9A-Z_.\-/]+)?$')
re_googlecode = re.compile(r'^[a-z0-9\-]+\.googlecode\.com/(svn|hg|git)(/[a-z0-9A-Z_.\-/]+)?$')
re_github = re.compile(r'^github\.com/[a-z0-9A-Z_.\-]+(/[a-z0-9A-Z_.\-]+)+$')
re_launchpad = re.compile(r'^launchpad\.net/([a-z0-9A-Z_.\-]+(/[a-z0-9A-Z_.\-]+)?|~[a-z0-9A-Z_.\-]+/(\+junk|[a-z0-9A-Z_.\-]+)/[a-z0-9A-Z_.\-]+)(/[a-z0-9A-Z_.\-/]+)?$')
def vc_to_web(path):
if re_bitbucket.match(path):
m = re_bitbucket.match(path)
check_url = 'http://' + m.group(1) + '/?cmd=heads'
web = 'http://' + m.group(1) + '/'
elif re_github.match(path):
m = re_github_web.match(path)
check_url = 'https://raw.github.com/' + m.group(1) + '/' + m.group(2) + '/master/'
web = 'http://github.com/' + m.group(1) + '/' + m.group(2) + '/'
elif re_googlecode.match(path):
m = re_googlecode.match(path)
check_url = 'http://'+path
if not m.group(2): # append / after bare '/hg' or '/git'
check_url += '/'
web = 'http://code.google.com/p/' + path[:path.index('.')]
elif re_launchpad.match(path):
check_url = web = 'https://'+path
else:
return False, False
return web, check_url
re_bitbucket_web = re.compile(r'bitbucket\.org/([a-z0-9A-Z_.\-]+)/([a-z0-9A-Z_.\-]+)')
re_googlecode_web = re.compile(r'code.google.com/p/([a-z0-9\-]+)')
re_github_web = re.compile(r'github\.com/([a-z0-9A-Z_.\-]+)/([a-z0-9A-Z_.\-]+)')
re_launchpad_web = re.compile(r'launchpad\.net/([a-z0-9A-Z_.\-]+(/[a-z0-9A-Z_.\-]+)?|~[a-z0-9A-Z_.\-]+/(\+junk|[a-z0-9A-Z_.\-]+)/[a-z0-9A-Z_.\-]+)(/[a-z0-9A-Z_.\-/]+)?')
re_striphttp = re.compile(r'https?://(www\.)?')
def find_googlecode_vcs(path):
# Perform http request to path/hg or path/git to check if they're
# using mercurial or git. Otherwise, assume svn.
for vcs in ['git', 'hg']:
try:
response = urlfetch.fetch('http://'+path+vcs, deadline=1)
if response.status_code == 200:
return vcs
except: pass
return 'svn'
def web_to_vc(url):
url = re_striphttp.sub('', url)
m = re_bitbucket_web.match(url)
if m:
return 'bitbucket.org/'+m.group(1)+'/'+m.group(2)
m = re_github_web.match(url)
if m:
return 'github.com/'+m.group(1)+'/'+m.group(2)
m = re_googlecode_web.match(url)
if m:
path = m.group(1)+'.googlecode.com/'
vcs = find_googlecode_vcs(path)
return path + vcs
m = re_launchpad_web.match(url)
if m:
return m.group(0)
return False
MaxPathLength = 100
CacheTimeout = 3600
class PackagePage(webapp.RequestHandler):
def get(self):
if self.request.get('fmt') == 'json':
return self.json()
html = memcache.get('view-package')
if not html:
tdata = {}
q = Package.all().filter('week_count >', 0)
q.order('-week_count')
tdata['by_week_count'] = q.fetch(50)
q = Package.all()
q.order('-last_install')
tdata['by_time'] = q.fetch(20)
q = Package.all()
q.order('-count')
tdata['by_count'] = q.fetch(100)
path = os.path.join(os.path.dirname(__file__), 'package.html')
html = template.render(path, tdata)
memcache.set('view-package', html, time=CacheTimeout)
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
self.response.out.write(html)
def json(self):
json = memcache.get('view-package-json')
if not json:
q = Package.all()
s = '{"packages": ['
sep = ''
for r in q.fetch(1000):
s += '%s\n\t{"path": "%s", "last_install": "%s", "count": "%s"}' % (sep, r.path, r.last_install, r.count)
sep = ','
s += '\n]}\n'
json = s
memcache.set('view-package-json', json, time=CacheTimeout)
self.response.set_status(200)
self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'
self.response.out.write(json)
def can_get_url(self, url):
try:
urllib2.urlopen(urllib2.Request(url))
return True
except:
return False
def is_valid_package_path(self, path):
return (re_bitbucket.match(path) or
re_googlecode.match(path) or
re_github.match(path) or
re_launchpad.match(path))
def record_pkg(self, path):
# sanity check string
if not path or len(path) > MaxPathLength or not self.is_valid_package_path(path):
return False
# look in datastore
key = 'pkg-' + path
p = Package.get_by_key_name(key)
if p is None:
# not in datastore - verify URL before creating
web, check_url = vc_to_web(path)
if not web:
logging.error('unrecognized path: %s', path)
return False
if not self.can_get_url(check_url):
logging.error('cannot get %s', check_url)
return False
p = Package(key_name = key, path = path, count = 0, web_url = web)
if auth(self.request):
# builder updating package metadata
p.info = self.request.get('info')
p.ok = self.request.get('ok') == "true"
if p.ok:
p.last_ok = datetime.datetime.utcnow()
else:
# goinstall reporting an install
p.inc()
p.last_install = datetime.datetime.utcnow()
# update package object
p.put()
return True
def post(self):
path = self.request.get('path')
ok = db.run_in_transaction(self.record_pkg, path)
if ok:
self.response.set_status(200)
self.response.out.write('ok')
else:
logging.error('invalid path in post: %s', path)
self.response.set_status(500)
self.response.out.write('not ok')
class ProjectPage(webapp.RequestHandler):
def get(self):
admin = users.is_current_user_admin()
if self.request.path == "/project/login":
self.redirect(users.create_login_url("/project"))
elif self.request.path == "/project/logout":
self.redirect(users.create_logout_url("/project"))
elif self.request.path == "/project/edit" and admin:
self.edit()
elif self.request.path == "/project/assoc" and admin:
self.assoc()
else:
self.list()
def assoc(self):
projects = Project.all()
for p in projects:
if p.package:
continue
path = web_to_vc(p.web_url)
if not path:
continue
pkg = Package.get_by_key_name("pkg-"+path)
if not pkg:
self.response.out.write('no: %s %s<br>' % (p.web_url, path))
continue
p.package = pkg
p.put()
self.response.out.write('yes: %s %s<br>' % (p.web_url, path))
def post(self):
if self.request.path == "/project/edit":
self.edit(True)
else:
data = dict(map(lambda x: (x, self.request.get(x)), ["name","descr","web_url"]))
if reduce(lambda x, y: x or not y, data.values(), False):
data["submitMsg"] = "You must complete all the fields."
self.list(data)
return
p = Project.get_by_key_name("proj-"+data["name"])
if p is not None:
data["submitMsg"] = "A project by this name already exists."
self.list(data)
return
p = Project(key_name="proj-"+data["name"], **data)
p.put()
path = os.path.join(os.path.dirname(__file__), 'project-notify.txt')
mail.send_mail(
sender=const.mail_from,
to=const.mail_submit_to,
subject=const.mail_submit_subject,
body=template.render(path, {'project': p}))
self.list({"submitMsg": "Your project has been submitted."})
def list(self, additional_data={}):
cache_key = 'view-project-data'
tag = self.request.get('tag', None)
if tag:
cache_key += '-'+tag
data = memcache.get(cache_key)
admin = users.is_current_user_admin()
if admin or not data:
projects = Project.all().order('category').order('name')
if not admin:
projects = projects.filter('approved =', True)
projects = list(projects)
tags = sets.Set()
for p in projects:
for t in p.tags:
tags.add(t)
if tag:
projects = filter(lambda x: tag in x.tags, projects)
data = {}
data['tag'] = tag
data['tags'] = tags
data['projects'] = projects
data['admin']= admin
if not admin:
memcache.set(cache_key, data, time=CacheTimeout)
for k, v in additional_data.items():
data[k] = v
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
path = os.path.join(os.path.dirname(__file__), 'project.html')
self.response.out.write(template.render(path, data))
def edit(self, save=False):
if save:
name = self.request.get("orig_name")
else:
name = self.request.get("name")
p = Project.get_by_key_name("proj-"+name)
if not p:
self.response.out.write("Couldn't find that Project.")
return
if save:
if self.request.get("do") == "Delete":
p.delete()
else:
pkg_name = self.request.get("package", None)
if pkg_name:
pkg = Package.get_by_key_name("pkg-"+pkg_name)
if pkg:
p.package = pkg.key()
for f in ['name', 'descr', 'web_url', 'category']:
setattr(p, f, self.request.get(f, None))
p.approved = self.request.get("approved") == "1"
p.tags = filter(lambda x: x, self.request.get("tags", "").split(","))
p.put()
memcache.delete('view-project-data')
self.redirect('/project')
return
# get all project categories and tags
cats, tags = sets.Set(), sets.Set()
for r in Project.all():
cats.add(r.category)
for t in r.tags:
tags.add(t)
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
path = os.path.join(os.path.dirname(__file__), 'project-edit.html')
self.response.out.write(template.render(path, {
"taglist": tags, "catlist": cats, "p": p, "tags": ",".join(p.tags) }))
def redirect(self, url):
self.response.set_status(302)
self.response.headers.add_header("Location", url)
def main():
app = webapp.WSGIApplication([
('/package', PackagePage),
('/package/daily', PackageDaily),
('/project.*', ProjectPage),
], debug=True)
run_wsgi_app(app)
if __name__ == '__main__':
main()
| bsd-3-clause | -8,376,817,798,995,562,000 | 33.90676 | 169 | 0.54177 | false |
rearmlkp/Smart_Flash | Flashcard/urls.py | 1 | 2301 | """Flashcard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
import API.views as views
import Web.views as web_views
# router.register(r'users', views.UserViewSet)
# router.register(r'groups', views.GroupViewSet)
urlpatterns = [
# Admin
url(r'^admin/', admin.site.urls),
# API Stuff
url(r'^cards/(?P<pk>[0-9]+)$', views.card_detail),
url(r'^login/$', views.login),
url(r'^register/$', views.create_user),
url(r'^decks/$', views.get_users_deck),
url(r'^decks/create/$', views.create_deck),
url(r'^decks/(?P<pk>[0-9]+)$', views.card_list),
url(r'^decks/edit/(?P<pk>[0-9]+)$', views.edit_deck),
url(r'^decks/delete/(?P<pk>[0-9]+)$', views.delete_deck),
url(r'^card/edit/(?P<pk>[0-9]+)$', views.edit_card),
url(r'^card/delete/(?P<pk>[0-9]+)$', views.delete_card),
url(r'^decks/review/(?P<pk>[0-9]+)$', views.review_today),
# Web Stuff
url(r'^web/$', web_views.index, name='index'),
url(r'^web/logout/$', web_views.logout, name='logout'),
url(r'^web/register/$', web_views.register, name='register'),
url(r'^web/deck/create$', web_views.deck_create, name='deck_create'),
url(r'^web/deck/edform$', web_views.deck_edit_delete, name='deck_edit_delete'),
url(r'^web/deck/(?P<pk>[0-9]+)$', web_views.deck_detail, name='deck_detail'),
url(r'^web/deck/(?P<pk>[0-9]+)/card/create$', web_views.create_card, name='card_create'),
url(r'^web/deck/review/(?P<pk>[0-9]+)$', web_views.review, name='review'),
url(r'^web/deck/(?P<pk>[0-9]+)/edform$', web_views.card_edit_delete, name='card_edit_delete'),
url(r'^api-auth/', include('rest_framework.urls')),
]
| gpl-3.0 | -7,804,049,766,153,713,000 | 41.611111 | 98 | 0.639722 | false |
daira/zcash | contrib/seeds/generate-seeds.py | 3 | 4418 | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 8233)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 18233)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| mit | -2,900,496,788,579,006,500 | 31.014493 | 98 | 0.574015 | false |
Vicaris/ModPro | moviepy/video/io/ffmpeg_tools.py | 1 | 2297 | """ Misc. bindings to ffmpeg and ImageMagick."""
import os
import sys
import subprocess as sp
from moviepy.tools import subprocess_call
from moviepy.config import get_setting
def ffmpeg_movie_from_frames(filename, folder, fps, digits=6):
"""
Writes a movie out of the frames (picture files) in a folder.
Almost deprecated.
"""
s = "%" + "%02d" % digits + "d.png"
cmd = [get_setting("FFMPEG_BINARY"), "-y", "-f","image2",
"-r", "%d"%fps,
"-i", os.path.join(folder,folder) + '/' + s,
"-b", "%dk"%bitrate,
"-r", "%d"%self.fps,
filename]
subprocess_call(cmd)
def ffmpeg_extract_subclip(filename, t1, t2, targetname=None):
""" makes a new video file playing video file ``filename`` between
the times ``t1`` and ``t2``. """
name,ext = os.path.splitext(filename)
if not targetname:
T1, T2 = [int(1000*t) for t in [t1, t2]]
targetname = name+ "%sSUB%d_%d.%s"(name, T1, T2, ext)
cmd = [get_setting("FFMPEG_BINARY"),"-y",
"-i", filename,
"-ss", "%0.2f"%t1,
"-t", "%0.2f"%(t2-t1),
"-vcodec", "copia", "-acodec", "copia", targetname]
subprocess_call(cmd)
def ffmpeg_merge_video_audio(video,audio,output, vcodec='copia',
acodec='copia', ffmpeg_output=False,
verbose = True):
""" merges video file ``video`` and audio file ``audio`` into one
movie file ``output``. """
cmd = [get_setting("FFMPEG_BINARY"), "-y", "-i", audio,"-i", video,
"-vcodec", vcodec, "-acodec", acodec, output]
subprocess_call(cmd, verbose = verbose)
def ffmpeg_extract_audio(inputfile,output,bitrate=3000,fps=44100):
""" extract the sound from a video file and save it in ``output`` """
cmd = [get_setting("FFMPEG_BINARY"), "-y", "-i", inputfile, "-ab", "%dk"%bitrate,
"-ar", "%d"%fps, output]
subprocess_call(cmd)
def ffmpeg_resize(video,output,tamano):
""" resizes ``video`` to new tamano ``tamano`` and write the result
in file ``output``. """
cmd= [get_setting("FFMPEG_BINARY"), "-i", video, "-vf", "scale=%d:%d"%(res[0], res[1]),
output]
subprocess_call(cmd)
| mit | 703,182,229,391,548,000 | 32.289855 | 91 | 0.551589 | false |
prheenan/Research | Perkins/Projects/WetLab/Util/DilutionUtil.py | 1 | 13174 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
class DilutionObj:
def __init__(self,StockConc,StockVol,DesiredConc,AddVol,Name=""):
self.StockConc=StockConc
self.StockVol=StockVol
self.DesiredConc=DesiredConc
self.AddVol=AddVol
self.Name = Name
class SolutionObj:
def __init__(self,ArrayOfDilutionObjects):
self.arr = ArrayOfDilutionObjects
def GetVolumeToDilute(Concentration,Volume,DesiredConcentration):
"""
Gets the volume to dilute a sample with a given volume and concentration
Args:
Concentration: mass per volume of the system
Volume: volume of the system. Units of mass/Concentration
DesiredConcentration: desired concentration after the diltion
Returns:
Amount of additional volume to add to the system; total volume of
Volume + (<return of this funciton>) gives the desired concentration
"""
# figure out how much stuff we have
try:
ng = Concentration*Volume
except TypeError:
ng = np.array(Concentration)*np.array(Volume)
# what total volume do we need?
volumeNeeded = ng/DesiredConcentration
# how much do we need to add?
volumeToAdd = volumeNeeded-Volume
return volumeToAdd
def PrintDilutions(StockConcs,StockVols,ConcDesired,UnitVol=None,
UnitConc=None,**kwargs):
"""
Convenience wrapper: Print all the dilutions, given desired concentrations
stocks, etc.
Args:
StockConcs,StockVols,ConcDesired: see GetDilutionObj
UnitConc,UnitVol: see PrintVolumeDilutions
**kwargs: ppassed directly to GetDilutionObj
Returns:
all the dilution objects
"""
if (UnitVol is None):
UnitVol = ["uL" for _ in ConcDesired]
if (UnitConc is None):
UnitConc = ["ng/uL" for _ in ConcDesired]
dilutionObj = GetDilutionObjects(StockConcs,StockVols,ConcDesired,**kwargs)
_PrintVolumeDilutions(dilutionObj,UnitVol=UnitVol,UnitConc=UnitConc)
return dilutionObj
def GetDilutionObjects(StockConcs,StockVols,ConcDesired,**kwargs):
"""
Args:
StockConcs,StockVols,ConcDesired: see GetDilutionObj
**kwargs: ppassed directly to GetDilutionObj
Returns:
List of dilution objects
"""
dilutions = GetVolumeToDilute(StockConcs,StockVols,ConcDesired)
dilutionObj = [GetDilutionObj(StockConcs,StockVols,ConcDesired,d,i,
**kwargs)
for i,d in enumerate(dilutions)]
return dilutionObj
def GetFromArrayOrScalar(Array,idx):
"""
Tries to get Array[idx]; otherwise just returns Array
Args:
Array: array-like
idx: number
Returns:
relevant element of the array
"""
try:
if (len(Array) > 1):
return Array[idx]
else:
return Array[0]
except (TypeError,IndexError) as e:
return Array
def GetDilutionObj(StockConcs,StockVols,ConcDesired,VolToAdd,Idx,
StockName=""):
"""
Returns a Dilution object at a given index, given all the informaiton
Args:
StockConcs: array or scalar-like of stock concentrations
StockVols: array or scalar-like of stock volumes
ConcDesired: array or scalar-like of desired concentrations
VolToAdd: array or scalar-like of volumes to add
Idx: index within all the arrays we want
StockName: optional names of the stock
Returns:
DilutionObj to use
"""
DesiredConc = GetFromArrayOrScalar(ConcDesired,Idx)
StockVol = GetFromArrayOrScalar(StockVols,Idx)
StockConc = GetFromArrayOrScalar(StockConcs,Idx)
AddVol = GetFromArrayOrScalar(VolToAdd,Idx)
StockName = GetFromArrayOrScalar(StockName,Idx)
return DilutionObj(StockConc=StockConc,StockVol=StockVol,
DesiredConc=DesiredConc,
AddVol=AddVol,Name=StockName)
def _DilutionString(dilutionObj,UnitVol,UnitConc):
"""
Args:
dilutionObj: list of dilution objects
UnitVol: string, unit of volume
UnitConc: string, unit of Concentration
Returns:
String representation of dilution objects
"""
toRet = ""
n = len(dilutionObj)
for i,d in enumerate(dilutionObj):
stockConcStr = "({:4.1f}{:s}@{:4.1f}{:s})".\
format(float(d.StockVol),UnitVol[i],float(d.StockConc),
UnitConc[i])
volAddStr = "({:4.1f}{:s})".format(d.AddVol,UnitVol[i])
TotalVol = float(d.AddVol) + float(d.StockVol)
toRet += ("{: <4} (#{:03d}) {: <20}" +
"add {: <8} -> {:3.1f}{:6s} in {:3.1f}{:7s}").\
format(d.Name,i,stockConcStr,volAddStr,d.DesiredConc,
UnitConc[i],TotalVol,UnitVol[i])
if (i != n-1):
toRet += "\n"
return toRet
def _PrintVolumeDilutions(dilutionObj,**kwargs):
"""
Gets and prints all the dilution objects
Args:
dilutionObj: list of dilution objects
**kwargs: see DilutionString
"""
print(_DilutionString(dilutionObj,**kwargs))
def GetVolumesNeededByConcentration(StockConcs,ConcsDesired,TotalVolume,
AlreadyHaveMass=None):
"""
Given desired and stock concentrations and a final volume, gives the
volumes needed of each stock
Args:
StockConcs: index i refers to some species, same units as
ConcsDesired[i]
ConcsDesired: what we want in the volume
AlreadyHaveMass: if present, the mass already present in the buffer
we will use. Element [i] should have the same 'mass' units as
StockConcs[i]
Returns:
Array of volumes needed going from StockConcs to ConcsDesired in
TotalVolume (note that TotalVolume-sum(<Return of this function>) is
taken up by some unspecified buffer)
"""
if (AlreadyHaveMass is None):
AlreadyHaveMass = np.zeros_like(StockConcs)
StockArr = np.array(StockConcs)
TotalVolumeNeeded = np.array(ConcsDesired)*TotalVolume/StockArr
EffectiveVolumeAlreadyPresent = np.array(AlreadyHaveMass)/StockArr
return TotalVolumeNeeded - EffectiveVolumeAlreadyPresent
def SeriallyDilute(Stock,DesiredConcentrations,DesiredVolumes,
dilution_concentration=0):
"""
Given a stock and desired concentraitons and desired volumes at each
concentration, returns the list of stocks, volumes, dilutions, and final
stocks
Args:
Stock: concentration, same units as elements of DesiredConcentrations
DesiredConcentrations: array or scalar, same units as stock. These
are the concentrations we want
DesiredVolumes: scalar or array volumes, in units of au/<Stock>,
we want for each dilution. Note that *actual* volume will be a bit
more, since we need something to serially dilute with. E.g., if
DesiredVolumes was 1L, we might need 10mL extra for 'downstream'
Dilutions
dilution_concentration: the concentration of whatever already in the
stock. (i.e. if we aren't using something with a concentration of
zero. For example, if diluting 100mM NaCl with 10mM dilution,
Stock would be 100, DilutionConcentration would be 10
Returns
Tuple of arrays, the elements are grouped from high to low
concentrations for each of:<What stocks we used, What volumes of stocks,
what volume we diluted with, what was the resulting stock>.
Note that the first and last elements are just Stock and DesiredVolumes
"""
NumConc = len(DesiredConcentrations)
MassNeededBelow = 0
VolumeStock = []
VolumeDilute = []
Stocks = []
ResultingStock = []
# work backwards with the last one first, determine what volume
# and concentraton is needed
for i in range(NumConc-1,-1,-1):
VolumeNeeded = GetFromArrayOrScalar(DesiredVolumes,i)
ConcNeeded = GetFromArrayOrScalar(DesiredConcentrations,i)
# what mass is needed 'below' us?
MassNeeded = ConcNeeded*VolumeNeeded
MassNeededBelow += MassNeeded
# determine what total volume of the final solution we need
# (we know the mass, and the concentration is specified)
V0 = MassNeededBelow/ConcNeeded
TmpStock = Stock if (i==0) \
else GetFromArrayOrScalar(DesiredConcentrations,i-1)
conc_diff = dilution_concentration - TmpStock
# We are solving the following system:
# c_stock * V_s + c_dilute * V_dilute = MassNeededBelow
# V_s + V_dilute = V0
VolStock = (dilution_concentration*V0-MassNeededBelow)/conc_diff
VolDilute = (MassNeededBelow-TmpStock*V0 )/conc_diff
# we use the stock 'above' what we need here
VolumeStock.append(VolStock)
VolumeDilute.append(VolDilute)
Stocks.append(TmpStock)
ResultingStock.append(ConcNeeded)
# reverse the arrays so we go big to small (natural order for dilution)
RetSanitize = lambda x: x[::-1]
RetArrs = Stocks,VolumeStock,VolumeDilute,ResultingStock
return [RetSanitize(a) for a in RetArrs]
def PrintSerialDilution(Stocks,VolumeStock,VolumeDilute,FinalStocks,
VolString="uL",ConcString="ng/uL",
BufferString="Buffer"):
"""
Given the results of SeriallyDilute, prints off the relevant information
to
Args:
Stocks,VolumeStock,VolumeDilute,FinalStocks: output of SeriallyDilute
VolString,ConcStrung: units for the volume and concentration
"""
for stock,VolStock,VolDilute,DilutedStock in \
zip(Stocks,VolumeStock,VolumeDilute,FinalStocks):
VolumeTotal = VolStock + VolDilute
StockStr = "{:5.3g}{:s} of {:5.3g}{:s} with {:5.3g}{:s} {:s}".\
format(VolStock,VolString,stock,ConcString,VolDilute,
VolString,BufferString)
ResultStr = "{:5.3g}{:s} of {:5.3g}{:s}".\
format(VolumeTotal,VolString,DilutedStock,ConcString)
print("{:s} gives {:s}".format(StockStr,ResultStr))
def StockVolumeNeededForSerialDilution(Stock,Volumes,Desired):
"""
Gets the total volume needed of the 'stock'
Args:
see PrintSerialSteps
"""
_,VolumeStock,_,_ = SeriallyDilute(Stock,Desired,Volumes)
return VolumeStock[0]
def PrintSerialSteps(Stock,Volumes,Desired,
ConcString="ng/uL",VolString="uL",BufferString="Buffer",
**kwargs):
"""
Given a stock concentration, desired volumes and concentrations, prints
out the steps needed to serially dilute
Args:
see PrintSerialDilution
"""
Stocks,VolumeStock,VolumeDilute,FinalStocks = \
SeriallyDilute(Stock,Desired,Volumes,**kwargs)
PrintSerialDilution(Stocks,VolumeStock,VolumeDilute,
FinalStocks,ConcString=ConcString,
VolString=VolString,BufferString=BufferString)
def PrintSolutionSteps(Stats,Volume,vol_units="uL",BufferName="buffer",
PostVolume=0):
"""
Prints the steps to seriall dilute things
Args:
Stats: List of Tuples; each element is <Name,Concentration Unit,
Stock Concentraiton, Desired concentration, mass present in solution
already>
PostVolume: if true, this is the volume to add after some step (e.g.
thawing). We store the solution at a higher concentration
"""
# get the stocks, desired concntrations, and already-present concentraitons
Stocks = [s[2] for s in Stats]
Desired = [s[3] for s in Stats]
Already = [s[4] for s in Stats]
Volumes = GetVolumesNeededByConcentration(Stocks,Desired,Volume,
AlreadyHaveMass=Already)
BufferVolume = Volume - sum(Volumes) - PostVolume
# check that our buffer is reasonable non-negative. if it is very close
# to zero (less than 1% error), let it slide.
assert (BufferVolume > -Volume/100) , \
"Warning: cant make this solution. Need a negative volume of buffer. "+\
"Use more concentrated stocks"
print("In a total solution of {:.1f}{:s}...".format(Volume,vol_units))
for (name,conc_units,conc_stock,desired_conc,_),vol_stock in\
zip(Stats,Volumes):
print("\t{:.2f}{:s} of {:.2f}{:s} {:s} for {:.2f}{:s} in solution".\
format(vol_stock,vol_units,conc_stock,conc_units,name,
desired_conc,conc_units))
print("\tRemainder is ({:.1f}{:s}) of {:s}".format(BufferVolume,
vol_units,BufferName))
if (PostVolume > 1e-12):
print("\tTo use, add ({:.1f}{:s}) of {:s}".format(PostVolume,
vol_units,BufferName))
| gpl-3.0 | 7,359,071,448,070,813,000 | 38.921212 | 80 | 0.642781 | false |
dfm/emcee3 | emcee3/tests/unit/test_autocorr.py | 1 | 1109 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
import pytest
import numpy as np
from ...autocorr import integrated_time, AutocorrError
__all__ = ["test_nd", "test_too_short"]
def get_chain(seed=1234, ndim=3, N=100000):
np.random.seed(seed)
a = 0.9
x = np.empty((N, ndim))
x[0] = np.zeros(ndim)
for i in range(1, N):
x[i] = x[i-1] * a + np.random.rand(ndim)
return x
def test_1d(seed=1234, ndim=1, N=150000, c=6):
x = get_chain(seed=seed, ndim=ndim, N=N)
tau, M = integrated_time(x, c=c, full_output=True)
assert np.all(M > c * tau)
assert np.all(np.abs(tau - 19.0) / 19. < 0.2)
def test_nd(seed=1234, ndim=3, N=150000):
x = get_chain(seed=seed, ndim=ndim, N=N)
tau = integrated_time(x)
assert np.all(np.abs(tau - 19.0) / 19. < 0.2)
def test_too_short(seed=1234, ndim=3, N=500):
x = get_chain(seed=seed, ndim=ndim, N=N)
with pytest.raises(AutocorrError):
integrated_time(x)
with pytest.raises(AutocorrError):
integrated_time(x, low=100)
tau = integrated_time(x, quiet=True) # NOQA
| mit | 4,308,948,867,629,914,000 | 26.04878 | 54 | 0.61046 | false |
bjoernricks/kaizen | kaizen/phase/phase.py | 1 | 2714 | # vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# kaizen - Continuously improve, build and manage free software
#
# Copyright (C) 2011 Björn Ricks <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
NONE = "None"
DOWNLOADED = "Downloaded"
EXTRACTED = "Extracted"
PATCHED = "Patched"
CONFIGURED = "Configured"
BUILT = "Built"
DESTROOTED = "Destrooted"
ACTIVATED = "Activated"
class UnknownPhaseError(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return "Phase '%s' does not exist." % (self.name)
class Phase(object):
def __init__(self, name, value):
self.value = value
self.name = name
def __cmp__(self, other):
if self.value < other.value:
return -1
if self.value == other.value:
return 0
if self.value > other.value:
return 1
def __eq__(self, other):
if not isinstance(other, Phase):
return False
return self.value == other.value
def __neq__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.value
def __repr__(self):
return "<Phase name='%s' value='%s' id='%s'>" % (self.name, self.value,
id(self))
class Phases(object):
def __init__(self):
self.phases = dict()
self.phase_names = [
NONE,
DOWNLOADED,
EXTRACTED,
PATCHED,
CONFIGURED,
BUILT,
DESTROOTED,
ACTIVATED,
]
for i, name in enumerate(self.phase_names):
self.phases[name] = Phase(name, i)
def get(self, name):
if not name in self.phases:
raise UnknownPhaseError(name)
return self.phases[name]
phases_list = Phases()
| gpl-2.0 | -6,345,546,671,171,965,000 | 27.557895 | 79 | 0.56948 | false |
srinath-chakravarthy/ovito | tests/scripts/test_suite/wigner_seitz_modifier.py | 1 | 1252 | from ovito import *
from ovito.io import *
from ovito.modifiers import *
import numpy as np
node = import_file("../../files/NetCDF/sheared_aSi.nc")
modifier = WignerSeitzAnalysisModifier()
node.modifiers.append(modifier)
modifier.reference.load("../../files/NetCDF/sheared_aSi.nc")
dataset.anim.current_frame = 4
print("Parameter defaults:")
print(" eliminate_cell_deformation: {}".format(modifier.eliminate_cell_deformation))
modifier.eliminate_cell_deformation = True
print(" frame_offset: {}".format(modifier.frame_offset))
modifier.frame_offset = 0
print(" reference_frame: {}".format(modifier.reference_frame))
modifier.reference_frame = 0
print(" use_frame_offset: {}".format(modifier.use_frame_offset))
modifier.use_frame_offset = False
node.compute()
print("Output:")
print(" vacancy_count= {}".format(modifier.vacancy_count))
print(" interstitial_count= {}".format(modifier.interstitial_count))
print(" vacancy_count= {}".format(node.output.attributes['WignerSeitz.vacancy_count']))
print(" interstitial_count= {}".format(node.output.attributes['WignerSeitz.interstitial_count']))
print(node.output["Occupancy"].array)
assert(node.output.attributes['WignerSeitz.vacancy_count'] == 970)
assert(modifier.vacancy_count == 970)
| gpl-3.0 | -175,022,617,238,454,720 | 31.947368 | 98 | 0.749201 | false |
PopulationGenetics/pyucsc | ucsc/db.py | 1 | 3664 | """
UCSC Interface via SQLalchemy
=============================
"""
import os
import re
from sqlalchemy import sql
from sqlalchemy import orm
import sqlalchemy as sa
import logging; log = logging.getLogger(__name__)
import config
import model
Session = orm.sessionmaker()
session = Session()
initialized = False
meta = None
# Set up mappers
# ==============
class tables(object):
""" Namespace for tables """
pass
def abort_ro(*args,**kwargs):
return
def create_session(name, echo=False):
""" load UCSC table definitions and create session """
global initialized, meta, DBSNP
if initialized:
return
uri = config.get_database_uri(name)
log.info('connecting to UCSC at ' + uri)
engine = sa.create_engine(uri, echo=echo)
Session.configure(bind=engine)
conn = engine.connect()
# try:
# log.info('loading cached UCSC table definitions')
# table_file = os.path.join(os.path.split(__file__)[0], '.tables.pickle')
# meta = pickle.load(file(table_file))
# meta.bind = engine
# except IOError:
# print 'WARNING: could not load table metadata, please call cache_tables()'
meta = sa.MetaData()
meta.bind = conn
meta.reflect()
# populate tables namespace
for (name, table) in meta.tables.items():
if 'wgEncode' not in name:
setattr(tables, name, table)
# KGXref is one to one with knownGene, so we can safely always use this join
join_knowngene_xref = sql.join(tables.knownGene, tables.kgXref,
tables.kgXref.c.kgID==tables.knownGene.c.name
)
join_knowncanonical = join_knowngene_xref.join(tables.knownCanonical, # this join means known gene only returns canonical transcripts
tables.knownCanonical.c.transcript==tables.knownGene.c.name
)
# get the most recent snp table available
snp_tables = sorted([x for x in meta.tables if re.match('snp\d\d\d$', x)])
snp_table = snp_tables[-1]
DBSNP = meta.tables[snp_table]
model.Snp.table = DBSNP
orm.mapper(model.Snp, DBSNP, primary_key=DBSNP.c.name, properties={
'class_': DBSNP.c['class'],
})
if snp_table + 'Common' in meta.tables:
commonSnp = meta.tables[snp_table + 'Common']
model.CommonSnp.table = commonSnp
orm.mapper(model.CommonSnp, commonSnp, primary_key=commonSnp.c.name, properties={
'class_': commonSnp.c['class'],
})
# TODO: should remove this join?
orm.mapper(model.KnownGene, join_knowngene_xref, primary_key=tables.knownGene.c.name,
exclude_properties=[tables.knownCanonical.c.chrom]
)
orm.mapper(model.KnownCanonical, join_knowncanonical, primary_key=tables.knownGene.c.name,
exclude_properties=[tables.knownCanonical.c.chrom, tables.knownCanonical.c.transcript]
)
orm.mapper(model.CcdsGene, tables.ccdsGene, primary_key=tables.ccdsGene.c.name)
orm.mapper(model.RefGene, tables.refGene, primary_key=tables.refGene.c.name)
orm.mapper(model.ChainSelf, tables.chainSelf, primary_key=tables.chainSelf.c.id)
orm.mapper(model.ChainSelfLink, tables.chainSelfLink,
primary_key=[tables.chainSelfLink.c.qStart, tables.chainSelfLink.c.chainId],
properties={
'chain': orm.relationship(model.ChainSelf, backref='links',
primaryjoin=tables.chainSelfLink.c.chainId==tables.chainSelf.c.id,
foreign_keys=[tables.chainSelfLink.c.chainId],
lazy=False
),
}
)
# monkeypatch session to enforce readonly
session.flush = abort_ro
initialized = True
model.session = session
return session
| bsd-3-clause | -1,658,897,240,787,379,700 | 31.424779 | 137 | 0.660753 | false |
mathieubenoit/GDSII_Generator | generateWaferMap.py | 1 | 2385 | #!/usr/bin/python
import os
import numpy
import gdspy
ld_mask_edge = {'layer': 300, 'datatype': 0}
ld_kerf = {'layer': 200, 'datatype': 0}
ld_acfmask = {'layer': 100, 'datatype': 0}
ld_topmetal= {'layer': 81, 'datatype': 0}
ld_po= {'layer': 27, 'datatype': 1}
def GenerateCell(chipX = 14100., chipY=16210.,leftKerf=85.,rightKerf=15.,topKerf=465.,botKerf=15.,narray_X=13,narray_Y=11,mask_width=254000.,wafer_offset_x=-570.0,wafer_offset_y=2595.0,wafer_radius=100000) :
#Extract existing die mask top cell from GDS
gdsii = gdspy.current_library.read_gds(infile='Timepix3_top_ACF_Nometal.GDS',layers=ld_acfmask)
die = gdspy.current_library.extract("Timepix3_top")
die_ref = gdspy.CellReference(die,origin=(leftKerf,botKerf))
#Create top reticle cell
pixel_cell = gdspy.Cell("Reticle_top")
# Create a kerf layer for visualization
kerfWidth = leftKerf+rightKerf+chipX
kerfHeight = topKerf+botKerf+chipY
Kerf = gdspy.Rectangle((0,0), (kerfWidth, kerfHeight),**ld_kerf)
# Add cells to the top cell
pixel_cell.add(Kerf)
pixel_cell.add(die_ref.get_polygonsets())
pixel_cell.add(die_ref.get_paths())
#Fill the Kerf with Resist
pixel_cell.add(gdspy.Rectangle((0,0), (leftKerf, kerfHeight),**ld_acfmask))
pixel_cell.add(gdspy.Rectangle((0,0), (kerfWidth, botKerf),**ld_acfmask))
pixel_cell.add(gdspy.Rectangle((0,kerfHeight), (kerfWidth, kerfHeight-topKerf),**ld_acfmask))
pixel_cell.add(gdspy.Rectangle((kerfWidth-rightKerf,0), (kerfWidth, kerfHeight-topKerf),**ld_acfmask))
wafer_cell = gdspy.Cell('Wafer_Top')
mask_edge = gdspy.Rectangle((-mask_width/2,-mask_width/2), (mask_width/2., mask_width/2.),**ld_mask_edge)
array_origin_x = -narray_X*(leftKerf+rightKerf+chipX)/2. + wafer_offset_x
array_origin_y = -narray_Y*(botKerf+topKerf+chipY)/2. + wafer_offset_y
wafer_edge = gdspy.Path(1,(wafer_radius,0))
wafer_edge.arc(wafer_radius,0,360,layer=400)
wafer_cell.add(wafer_edge)
print kerfWidth,kerfHeight
wafer_cell.add(gdspy.CellArray(pixel_cell,narray_X,narray_Y,spacing=(kerfWidth,kerfHeight),origin=(array_origin_x,array_origin_y)))
wafer_cell.add(mask_edge)
# View the resulting cell
gdspy.LayoutViewer(cells=[wafer_cell],depth=1)
gdspy.write_gds("wafer_mask.gds",cells=[wafer_cell,pixel_cell])
if __name__ == '__main__':
GenerateCell() | lgpl-3.0 | 4,824,642,096,970,110,000 | 35.707692 | 207 | 0.690985 | false |
matthew-brett/draft-statsmodels | scikits/statsmodels/sandbox/bspline.py | 1 | 20284 | '''
Bspines and smoothing splines.
General references:
Craven, P. and Wahba, G. (1978) "Smoothing noisy data with spline functions.
Estimating the correct degree of smoothing by
the method of generalized cross-validation."
Numerische Mathematik, 31(4), 377-403.
Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical
Learning." Springer-Verlag. 536 pages.
Hutchison, M. and Hoog, F. "Smoothing noisy data with spline functions."
Numerische Mathematik, 47(1), 99-106.
'''
import numpy as np
import numpy.linalg as L
from scipy.linalg import solveh_banded
from scipy.optimize import golden
from models import _hbspline
# Issue warning regarding heavy development status of this module
import warnings
_msg = "The bspline code is technology preview and requires significant work\
on the public API and documentation. The API will likely change in the future"
warnings.warn(_msg, UserWarning)
def _band2array(a, lower=0, symmetric=False, hermitian=False):
"""
Take an upper or lower triangular banded matrix and return a
numpy array.
INPUTS:
a -- a matrix in upper or lower triangular banded matrix
lower -- is the matrix upper or lower triangular?
symmetric -- if True, return the original result plus its transpose
hermitian -- if True (and symmetric False), return the original
result plus its conjugate transposed
"""
n = a.shape[1]
r = a.shape[0]
_a = 0
if not lower:
for j in range(r):
_b = np.diag(a[r-1-j],k=j)[j:(n+j),j:(n+j)]
_a += _b
if symmetric and j > 0: _a += _b.T
elif hermitian and j > 0: _a += _b.conjugate().T
else:
for j in range(r):
_b = np.diag(a[j],k=j)[0:n,0:n]
_a += _b
if symmetric and j > 0: _a += _b.T
elif hermitian and j > 0: _a += _b.conjugate().T
_a = _a.T
return _a
def _upper2lower(ub):
"""
Convert upper triangular banded matrix to lower banded form.
INPUTS:
ub -- an upper triangular banded matrix
OUTPUTS: lb
lb -- a lower triangular banded matrix with same entries
as ub
"""
lb = np.zeros(ub.shape, ub.dtype)
nrow, ncol = ub.shape
for i in range(ub.shape[0]):
lb[i,0:(ncol-i)] = ub[nrow-1-i,i:ncol]
lb[i,(ncol-i):] = ub[nrow-1-i,0:i]
return lb
def _lower2upper(lb):
"""
Convert lower triangular banded matrix to upper banded form.
INPUTS:
lb -- a lower triangular banded matrix
OUTPUTS: ub
ub -- an upper triangular banded matrix with same entries
as lb
"""
ub = np.zeros(lb.shape, lb.dtype)
nrow, ncol = lb.shape
for i in range(lb.shape[0]):
ub[nrow-1-i,i:ncol] = lb[i,0:(ncol-i)]
ub[nrow-1-i,0:i] = lb[i,(ncol-i):]
return ub
def _triangle2unit(tb, lower=0):
"""
Take a banded triangular matrix and return its diagonal and the
unit matrix: the banded triangular matrix with 1's on the diagonal,
i.e. each row is divided by the corresponding entry on the diagonal.
INPUTS:
tb -- a lower triangular banded matrix
lower -- if True, then tb is assumed to be lower triangular banded,
in which case return value is also lower triangular banded.
OUTPUTS: d, b
d -- diagonal entries of tb
b -- unit matrix: if lower is False, b is upper triangular
banded and its rows of have been divided by d,
else lower is True, b is lower triangular banded
and its columns have been divieed by d.
"""
if lower: d = tb[0].copy()
else: d = tb[-1].copy()
if lower: return d, (tb / d)
else:
l = _upper2lower(tb)
return d, _lower2upper(l / d)
def _trace_symbanded(a, b, lower=0):
"""
Compute the trace(ab) for two upper or banded real symmetric matrices
stored either in either upper or lower form.
INPUTS:
a, b -- two banded real symmetric matrices (either lower or upper)
lower -- if True, a and b are assumed to be the lower half
OUTPUTS: trace
trace -- trace(ab)
"""
if lower:
t = _zero_triband(a * b, lower=1)
return t[0].sum() + 2 * t[1:].sum()
else:
t = _zero_triband(a * b, lower=0)
return t[-1].sum() + 2 * t[:-1].sum()
def _zero_triband(a, lower=0):
"""
Explicitly zero out unused elements of a real symmetric banded matrix.
INPUTS:
a -- a real symmetric banded matrix (either upper or lower hald)
lower -- if True, a is assumed to be the lower half
"""
nrow, ncol = a.shape
if lower:
for i in range(nrow): a[i,(ncol-i):] = 0.
else:
for i in range(nrow): a[i,0:i] = 0.
return a
class BSpline(object):
'''
Bsplines of a given order and specified knots.
Implementation is based on description in Chapter 5 of
Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical
Learning." Springer-Verlag. 536 pages.
INPUTS:
knots -- a sorted array of knots with knots[0] the lower boundary,
knots[1] the upper boundary and knots[1:-1] the internal
knots.
order -- order of the Bspline, default is 4 which yields cubic
splines
M -- number of additional boundary knots, if None it defaults
to order
coef -- an optional array of real-valued coefficients for the Bspline
of shape (knots.shape + 2 * (M - 1) - order,).
x -- an optional set of x values at which to evaluate the
Bspline to avoid extra evaluation in the __call__ method
'''
# FIXME: update parameter names, replace single character names
# FIXME: `order` should be actual spline order (implemented as order+1)
## FIXME: update the use of spline order in extension code (evaluate is recursively called)
# FIXME: eliminate duplicate M and m attributes (m is order, M is related to tau size)
def __init__(self, knots, order=4, M=None, coef=None, x=None):
knots = np.squeeze(np.unique(np.asarray(knots)))
if knots.ndim != 1:
raise ValueError, 'expecting 1d array for knots'
self.m = order
if M is None:
M = self.m
self.M = M
self.tau = np.hstack([[knots[0]]*(self.M-1), knots, [knots[-1]]*(self.M-1)])
self.K = knots.shape[0] - 2
if coef is None:
self.coef = np.zeros((self.K + 2 * self.M - self.m), np.float64)
else:
self.coef = np.squeeze(coef)
if self.coef.shape != (self.K + 2 * self.M - self.m):
raise ValueError, 'coefficients of Bspline have incorrect shape'
if x is not None:
self.x = x
def _setx(self, x):
self._x = x
self._basisx = self.basis(self._x)
def _getx(self):
return self._x
x = property(_getx, _setx)
def __call__(self, *args):
"""
Evaluate the BSpline at a given point, yielding
a matrix B and return
B * self.coef
INPUTS:
args -- optional arguments. If None, it returns self._basisx,
the BSpline evaluated at the x values passed in __init__.
Otherwise, return the BSpline evaluated at the
first argument args[0].
OUTPUTS: y
y -- value of Bspline at specified x values
BUGS:
If self has no attribute x, an exception will be raised
because self has no attribute _basisx.
"""
if not args:
b = self._basisx.T
else:
x = args[0]
b = np.asarray(self.basis(x)).T
return np.squeeze(np.dot(b, self.coef))
def basis_element(self, x, i, d=0):
"""
Evaluate a particular basis element of the BSpline,
or its derivative.
INPUTS:
x -- x values at which to evaluate the basis element
i -- which element of the BSpline to return
d -- the order of derivative
OUTPUTS: y
y -- value of d-th derivative of the i-th basis element
of the BSpline at specified x values
"""
x = np.asarray(x, np.float64)
_shape = x.shape
if _shape == ():
x.shape = (1,)
x.shape = (np.product(_shape,axis=0),)
if i < self.tau.shape[0] - 1:
## TODO: OWNDATA flags...
v = _hbspline.evaluate(x, self.tau, self.m, d, i, i+1)
else:
return np.zeros(x.shape, np.float64)
if (i == self.tau.shape[0] - self.m):
v = np.where(np.equal(x, self.tau[-1]), 1, v)
v.shape = _shape
return v
def basis(self, x, d=0, lower=None, upper=None):
"""
Evaluate the basis of the BSpline or its derivative.
If lower or upper is specified, then only
the [lower:upper] elements of the basis are returned.
INPUTS:
x -- x values at which to evaluate the basis element
i -- which element of the BSpline to return
d -- the order of derivative
lower -- optional lower limit of the set of basis
elements
upper -- optional upper limit of the set of basis
elements
OUTPUTS: y
y -- value of d-th derivative of the basis elements
of the BSpline at specified x values
"""
x = np.asarray(x)
_shape = x.shape
if _shape == ():
x.shape = (1,)
x.shape = (np.product(_shape,axis=0),)
if upper is None:
upper = self.tau.shape[0] - self.m
if lower is None:
lower = 0
upper = min(upper, self.tau.shape[0] - self.m)
lower = max(0, lower)
d = np.asarray(d)
if d.shape == ():
v = _hbspline.evaluate(x, self.tau, self.m, int(d), lower, upper)
else:
if d.shape[0] != 2:
raise ValueError, "if d is not an integer, expecting a jx2 \
array with first row indicating order \
of derivative, second row coefficient in front."
v = 0
for i in range(d.shape[1]):
v += d[1,i] * _hbspline.evaluate(x, self.tau, self.m, d[0,i], lower, upper)
v.shape = (upper-lower,) + _shape
if upper == self.tau.shape[0] - self.m:
v[-1] = np.where(np.equal(x, self.tau[-1]), 1, v[-1])
return v
def gram(self, d=0):
"""
Compute Gram inner product matrix, storing it in lower
triangular banded form.
The (i,j) entry is
G_ij = integral b_i^(d) b_j^(d)
where b_i are the basis elements of the BSpline and (d) is the
d-th derivative.
If d is a matrix then, it is assumed to specify a differential
operator as follows: the first row represents the order of derivative
with the second row the coefficient corresponding to that order.
For instance:
[[2, 3],
[3, 1]]
represents 3 * f^(2) + 1 * f^(3).
INPUTS:
d -- which derivative to apply to each basis element,
if d is a matrix, it is assumed to specify
a differential operator as above
OUTPUTS: gram
gram -- the matrix of inner products of (derivatives)
of the BSpline elements
"""
d = np.squeeze(d)
if np.asarray(d).shape == ():
self.g = _hbspline.gram(self.tau, self.m, int(d), int(d))
else:
d = np.asarray(d)
if d.shape[0] != 2:
raise ValueError, "if d is not an integer, expecting a jx2 \
array with first row indicating order \
of derivative, second row coefficient in front."
if d.shape == (2,):
d.shape = (2,1)
self.g = 0
for i in range(d.shape[1]):
for j in range(d.shape[1]):
self.g += d[1,i]* d[1,j] * _hbspline.gram(self.tau, self.m, int(d[0,i]), int(d[0,j]))
self.g = self.g.T
self.d = d
return np.nan_to_num(self.g)
class SmoothingSpline(BSpline):
penmax = 30.
method = "target_df"
target_df = 5
default_pen = 1.0e-03
optimize = True
'''
A smoothing spline, which can be used to smooth scatterplots, i.e.
a list of (x,y) tuples.
See fit method for more information.
'''
def fit(self, y, x=None, weights=None, pen=0.):
"""
Fit the smoothing spline to a set of (x,y) pairs.
INPUTS:
y -- response variable
x -- if None, uses self.x
weights -- optional array of weights
pen -- constant in front of Gram matrix
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline.
ALGORITHM:
Formally, this solves a minimization:
fhat = ARGMIN_f SUM_i=1^n (y_i-f(x_i))^2 + pen * int f^(2)^2
int is integral. pen is lambda (from Hastie)
See Chapter 5 of
Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical
Learning." Springer-Verlag. 536 pages.
for more details.
TODO:
Should add arbitrary derivative penalty instead of just
second derivative.
"""
banded = True
if x is None:
x = self._x
bt = self._basisx.copy()
else:
bt = self.basis(x)
if pen == 0.: # can't use cholesky for singular matrices
banded = False
if x.shape != y.shape:
raise ValueError, 'x and y shape do not agree, by default x are \
the Bspline\'s internal knots'
if pen >= self.penmax:
pen = self.penmax
if weights is not None:
self.weights = weights
else:
self.weights = 1.
_w = np.sqrt(self.weights)
bt *= _w
# throw out rows with zeros (this happens at boundary points!)
mask = np.flatnonzero(1 - np.alltrue(np.equal(bt, 0), axis=0))
bt = bt[:,mask]
y = y[mask]
self.df_total = y.shape[0]
bty = np.squeeze(np.dot(bt, _w * y))
self.N = y.shape[0]
if not banded:
self.btb = np.dot(bt, bt.T)
_g = _band2array(self.g, lower=1, symmetric=True)
self.coef, _, self.rank = L.lstsq(self.btb + pen*_g, bty)[0:3]
self.rank = min(self.rank, self.btb.shape[0])
del(_g)
else:
self.btb = np.zeros(self.g.shape, np.float64)
nband, nbasis = self.g.shape
for i in range(nbasis):
for k in range(min(nband, nbasis-i)):
self.btb[k,i] = (bt[i] * bt[i+k]).sum()
bty.shape = (1,bty.shape[0])
self.pen = pen
self.chol, self.coef = solveh_banded(self.btb +
pen*self.g,
bty, lower=1)
self.coef = np.squeeze(self.coef)
self.resid = y * self.weights - np.dot(self.coef, bt)
self.pen = pen
del(bty); del(mask); del(bt)
def smooth(self, y, x=None, weights=None):
if self.method == "target_df":
if hasattr(self, 'pen'):
self.fit(y, x=x, weights=weights, pen=self.pen)
else:
self.fit_target_df(y, x=x, weights=weights, df=self.target_df)
elif self.method == "optimize_gcv":
self.fit_optimize_gcv(y, x=x, weights=weights)
def gcv(self):
"""
Generalized cross-validation score of current fit.
Craven, P. and Wahba, G. "Smoothing noisy data with spline functions.
Estimating the correct degree of smoothing by
the method of generalized cross-validation."
Numerische Mathematik, 31(4), 377-403.
"""
norm_resid = (self.resid**2).sum()
return norm_resid / (self.df_total - self.trace())
def df_resid(self):
"""
Residual degrees of freedom in the fit.
self.N - self.trace()
where self.N is the number of observations of last fit.
"""
return self.N - self.trace()
def df_fit(self):
"""
How many degrees of freedom used in the fit?
self.trace()
"""
return self.trace()
def trace(self):
"""
Trace of the smoothing matrix S(pen)
TODO: addin a reference to Wahba, and whoever else I used.
"""
if self.pen > 0:
_invband = _hbspline.invband(self.chol.copy())
tr = _trace_symbanded(_invband, self.btb, lower=1)
return tr
else:
return self.rank
def fit_target_df(self, y, x=None, df=None, weights=None, tol=1.0e-03,
apen=0, bpen=1.0e-03):
"""
Fit smoothing spline with approximately df degrees of freedom
used in the fit, i.e. so that self.trace() is approximately df.
Uses binary search strategy.
In general, df must be greater than the dimension of the null space
of the Gram inner product. For cubic smoothing splines, this means
that df > 2.
INPUTS:
y -- response variable
x -- if None, uses self.x
df -- target degrees of freedom
weights -- optional array of weights
tol -- (relative) tolerance for convergence
apen -- lower bound of penalty for binary search
bpen -- upper bound of penalty for binary search
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline.
"""
df = df or self.target_df
olddf = y.shape[0] - self.m
if hasattr(self, "pen"):
self.fit(y, x=x, weights=weights, pen=self.pen)
curdf = self.trace()
if np.fabs(curdf - df) / df < tol:
return
if curdf > df:
apen, bpen = self.pen, 2 * self.pen
else:
apen, bpen = 0., self.pen
while True:
curpen = 0.5 * (apen + bpen)
self.fit(y, x=x, weights=weights, pen=curpen)
curdf = self.trace()
if curdf > df:
apen, bpen = curpen, 2 * curpen
else:
apen, bpen = apen, curpen
if apen >= self.penmax:
raise ValueError, "penalty too large, try setting penmax \
higher or decreasing df"
if np.fabs(curdf - df) / df < tol:
break
def fit_optimize_gcv(self, y, x=None, weights=None, tol=1.0e-03,
brack=(-100,20)):
"""
Fit smoothing spline trying to optimize GCV.
Try to find a bracketing interval for scipy.optimize.golden
based on bracket.
It is probably best to use target_df instead, as it is
sometimes difficult to find a bracketing interval.
INPUTS:
y -- response variable
x -- if None, uses self.x
df -- target degrees of freedom
weights -- optional array of weights
tol -- (relative) tolerance for convergence
brack -- an initial guess at the bracketing interval
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline.
"""
def _gcv(pen, y, x):
self.fit(y, x=x, pen=np.exp(pen))
a = self.gcv()
return a
a = golden(_gcv, args=(y,x), brack=bracket, tol=tol)
| bsd-3-clause | -464,578,479,505,752,400 | 29.640483 | 105 | 0.543581 | false |
quantopian/nose_xunit_gevent | nose_xunit_gevent.py | 1 | 6235 | """
Xunit for the nose_gevented_multiprocess plugin
The xunit plugin works by keeping ongoing stats on the test run as it
progresses, with hooks that are run before/after each test to update
the stats.
Unfortunately, when those hooks are called in subprocesses, it doesn't
work.
There's a nose_xunitmp plugin which claims to fix this (I haven't
verified that it does one way or the other), but it doesn't work with
the nose_gevented_multiprocess plugin, because it relies on
multiprocessing.Manager, but nose_gevented_multiprocess uses popen,
not multiprocessing, so that's no good.
This plugin fixes that by using temporary directories to collect stats
and error reports among the multiple test processes, before finally
aggregating them all at the end.
I doubt my implementation is particularly "pythonic," and maybe I
could have come up with a better scheme than temporary directories for
interprocess communication, but this works well enough for us.
"""
import codecs
import cPickle as pickle
import os
import tempfile
import uuid
from nose.plugins.base import Plugin
from nose.plugins.xunit import Xunit
from nose.pyversion import force_unicode
envvar_name = '_nose_xunit_gevent_dirs'
stat_names = ('errors', 'failures', 'passes', 'skipped')
class OnDiskCounter(object):
"""Creates a temporary directory in which to store updates to the
counter. Supports addition and value retrieval."""
def __init__(self, directory=None):
self.do_cleanup = not directory
self.directory = directory or tempfile.mkdtemp()
def __iadd__(self, y):
if not isinstance(y, (int, OnDiskCounter)):
raise TypeError
fn = '{0}/{1}'.format(self.directory, uuid.uuid1())
pickle.dump(int(y), open(fn, "w"))
return self
def __add__(self, y):
if not isinstance(y, (int, OnDiskCounter)):
raise TypeError
return int(self) + int(y)
def __radd__(self, y):
if not isinstance(y, (int, OnDiskCounter)):
raise TypeError
return int(self) + int(y)
def __int__(self):
val = 0
for f in sorted(os.listdir(self.directory)):
val += pickle.load(open('{0}/{1}'.format(self.directory, f)))
return val
def __str__(self):
return str(int(self))
def __repr__(self):
return str(int(self))
def __del__(self):
if not self.do_cleanup:
return
for f in os.listdir(self.directory):
os.remove('{0}/{1}'.format(self.directory, f))
os.rmdir(self.directory)
class OnDiskList(object):
"""Creates a temporary directory in which to store updates to the
list. Supports append and retrieval."""
def __init__(self, directory=None):
self.do_cleanup = not directory
self.directory = directory or tempfile.mkdtemp()
def append(self, item):
fn = '{0}/{1}'.format(self.directory, uuid.uuid1())
pickle.dump(item, open(fn, "w"))
def __iter__(self):
for f in sorted(os.listdir(self.directory)):
yield pickle.load(open('{0}/{1}'.format(self.directory, f)))
def __str__(self):
return str([i for i in self])
def __repr__(self):
return repr([i for i in self])
def __del__(self):
if not self.do_cleanup:
return
for f in os.listdir(self.directory):
os.remove('{0}/{1}'.format(self.directory, f))
os.rmdir(self.directory)
class XunitGevent(Xunit):
"""Test results in XUnit XML format when nose_gevented_multiprocess
is in use."""
name = 'xunit-gevent'
score = 2000
error_report_filename = None
error_report_file = None
def options(self, parser, env):
"""Sets additional command line options."""
Plugin.options(self, parser, env)
parser.add_option(
'--xunit-gevent-file', action='store',
dest='xunit_gevent_file', metavar="FILE",
default=env.get('NOSE_XUNIT_GEVENT_FILE', 'nosetests.xml'),
help=("Path to xml file to store the xunit report in. "
"Default is nosetests.xml in the working directory "
"[NOSE_XUNIT_GEVENT_FILE]"))
def configure(self, options, config):
"""Configures the xunit plugin."""
Plugin.configure(self, options, config)
self.config = config
if self.enabled:
try:
dirs = os.environ[envvar_name].split(',')
except KeyError:
dirs = [None for name in range(len(stat_names) + 1)]
self.stats = {name: OnDiskCounter(dirs.pop(0))
for name in stat_names}
self.errorlist = OnDiskList(directory=dirs.pop(0))
os.environ[envvar_name] = ','.join(
[self.stats[s].directory for s in stat_names] +
[self.errorlist.directory])
self.error_report_filename = options.xunit_gevent_file
def report(self, stream):
"""Writes an Xunit-formatted XML file
The file includes a report of test errors and failures.
"""
self.error_report_file = codecs.open(self.error_report_filename, 'w',
self.encoding, 'replace')
self.stats['encoding'] = self.encoding
self.stats['total'] = (self.stats['errors'] + self.stats['failures']
+ self.stats['passes'] + self.stats['skipped'])
self.error_report_file.write(
u'<?xml version="1.0" encoding="%(encoding)s"?>'
u'<testsuite name="nosetests" tests="%(total)d" '
u'errors="%(errors)d" failures="%(failures)d" '
u'skip="%(skipped)d">' % self.stats)
self.error_report_file.write(u''.join([
force_unicode(error)
for error
in self.errorlist
]))
self.error_report_file.write(u'</testsuite>')
self.error_report_file.close()
if self.config.verbosity > 1:
stream.writeln("-" * 70)
stream.writeln("XML: %s" % self.error_report_file.name)
# So that temporary directories are cleaned up
del self.stats
del self.errorlist
| apache-2.0 | -3,738,876,490,507,157,000 | 32.702703 | 78 | 0.6085 | false |
CrispyMcToast/bkup | src/fs/Scanner.py | 1 | 4092 | #!/usr/bin/python
import threading
import os
import hashlib
import time
MAX_THREADS = 5
thread_count = 0
tc_lock = threading.Lock()
def inc_count():
global thread_count
tc_lock.acquire()
thread_count += 1
tc_lock.release()
def dec_count():
global thread_count
tc_lock.acquire()
thread_count -= 1
tc_lock.release()
def get_count():
global thread_count
tc_lock.acquire()
count = thread_count
tc_lock.release()
return count
hash_lock = threading.Lock()
scanned_hash = {}
def merge_hash(addative):
hash_lock.acquire()
scanned_hash.update(addative)
hash_lock.release()
def get_hash():
#TODO: is this ok?
hash_lock.acquire()
h = scanned_hash
hash_lock.release()
return h
class Scanner(threading.Thread):
def __init__(self, base_path, master=True, appendable=""):
threading.Thread.__init__(self)
self.runnable = threading.Event()
self.base_path = base_path
self.total_processed = 0
self.hashed_files = {}
self.subthreads = []
self.thread_lock = threading.Lock()
self.appendable = appendable
self.exit = False
self.master = master
self.complete = False
def run(self):
self.runnable.set()
inc_count()
self.scan()
dec_count()
while self.master and get_count() != 0:
time.sleep(1)
self.complete = True
def finished(self):
c = self.complete
self.thread_lock.acquire()
for s in self.subthreads:
c = c & s.finished()
print(s.finished())
self.thread_lock.release()
return c
def get_total(self):
return self.total_processed
def scan(self):
path = ""
for root, subdir, files in os.walk(self.base_path):
path = root
self.total_processed += 1
self.hashed_files[self.appendable+"/"] = 0
while get_count() < MAX_THREADS and len(subdir) > 0:
appended_path = self.appendable+"/"+subdir[0]
s = Scanner(root+"/"+subdir[0], master=False,
appendable=appended_path)
self.thread_lock.acquire()
self.subthreads.append(s)
self.thread_lock.release()
s.start()
del subdir[0]
for f in files:
try:
self.runnable.wait()
if self.exit:
return
fpath = path + "/" + f
if not os.path.islink(fpath):
h = self.hash(fpath)
filep = self.remove_base(fpath)
self.total_processed += 1
self.hashed_files[self.appendable+"/"+filep] = h
except PermissionError as e:
#ignore
continue
except OSError as e:
#ignore
continue
merge_hash(self.hashed_files)
self.hashed_files={}
def remove_base(self, path):
return path[len(self.base_path)+1:]
def hash(self, path, blocksize=65536):
f = open(path, "rb")
hasher = hashlib.sha256()
buf = f.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(blocksize)
return hasher.hexdigest()
def pause(self):
self.thread_lock.acquire()
for s in self.subthreads:
s.pause()
self.thread_lock.release()
self.runnable.clear()
def unpause(self):
self.thread_lock.acquire()
for s in self.subthreads:
s.unpause()
self.thread_lock.release()
self.runnable.set()
def stop(self):
self.thread_lock.acquire()
for s in self.subthreads:
s.stop()
self.thread_lock.release()
self.exit = True
self.runnable.clear()
| gpl-3.0 | -2,186,930,800,156,176,000 | 23.357143 | 72 | 0.516129 | false |
Nettacker/Nettacker | lib/payload/shellcode/stack/engine.py | 1 | 3693 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import binascii
from core.alert import error
from core.compatible import version
def shellcoder(shellcode):
n = 0
xshellcode = '\\x'
for w in shellcode:
n += 1
xshellcode += str(w)
if n == 2:
n = 0
xshellcode += str('\\x')
return xshellcode[:-2]
def st(data):
if version() ==2:
return str(binascii.b2a_hex(data[::-1]))
if version() ==3:
return (binascii.b2a_hex(data[::-1].encode('latin-1'))).decode('latin-1')
def generate(data, register, gtype):
length = len(data)
if gtype == 'int':
flag_8 = True
try:
data = hex(int(data, 8))
except:
flag_8 = False
if flag_8 is False:
try:
data = hex(int(data, 16))
except:
error('hex or digit required!\nExit\n')
if gtype == 'string':
data = st(data)
if length <= 3:
if gtype == 'string':
data = str('0x') + str(data)
if len(data) % 2 != 0:
data = data.replace('0x', '0x0')
if len(data) == 8:
data = data + '90\npop %s\nshr $0x8,%s\npush %s\n' % (
register, register, register)
if len(data) == 6:
data = data + '9090\npop %s\nshr $0x10,%s\npush %s\n' % (
register, register, register)
if len(data) == 4:
data = data + '909090\npop %s\nshr $0x10,%s\nshr $0x8,%s\npush %s\n' % (
register, register, register, register)
data = str('push $') + str(data)
if length >= 4:
if gtype == 'int':
data = data[2:]
stack_content = data
shr_counter = len(stack_content) % 8
shr = None
if shr_counter == 2:
shr = '\npop %s\nshr $0x10,%s\nshr $0x8,%s\npush %s\n' % (
register, register, register, register)
stack_content = stack_content[0:2] + '909090' + stack_content[2:]
if shr_counter == 4:
shr = '\npop %s\nshr $0x10,%s\npush %s\n' % (register, register,
register)
stack_content = stack_content[0:4] + '9090' + stack_content[4:]
if shr_counter == 6:
shr = '\npop %s\nshr $0x8,%s\npush %s\n' % (register, register,
register)
stack_content = stack_content[0:6] + '90' + stack_content[6:]
zshr = shr
m = int(len(stack_content))
n = int(len(stack_content) / 8)
file_shellcode = ''
if (len(stack_content) % 8) == 0:
shr_n = 0
r = ''
while (n != 0):
if shr is not None:
shr_n += 1
zx = m - 8
file_shellcode = 'push $0x' + str(stack_content[
zx:m]) + '\n' + file_shellcode
m -= 8
n = n - 1
shr = None
if shr is None:
shr_n += 1
zx = m - 8
file_shellcode = 'push $0x' + str(stack_content[
zx:m]) + '\n' + file_shellcode
m -= 8
n = n - 1
if zshr is None:
file_z = file_shellcode
if zshr is not None:
rep1 = file_shellcode[:16]
rep2 = rep1 + zshr
file_z = file_shellcode.replace(rep1, rep2)
data = file_z
return data
| gpl-3.0 | -6,920,994,318,472,593,000 | 33.839623 | 84 | 0.426753 | false |
destijl/grr | grr/gui/http_api.py | 1 | 21177 | #!/usr/bin/env python
"""HTTP API logic that ties API call handlers with HTTP routes."""
import json
import time
import traceback
import urllib2
# pylint: disable=g-bad-import-order,unused-import
from grr.gui import django_lib
# pylint: enable=g-bad-import-order,unused-import
from django import http
from werkzeug import exceptions as werkzeug_exceptions
from werkzeug import routing
from google.protobuf import json_format
from google.protobuf import symbol_database
import logging
from grr.gui import api_auth_manager
from grr.gui import api_call_handler_base
from grr.gui import api_call_router
from grr.gui import api_value_renderers
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import log
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
from grr.lib import utils
from grr.lib.aff4_objects import users as aff4_users
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import api_pb2
class Error(Exception):
pass
class PostRequestParsingError(Error):
pass
class UnsupportedHttpMethod(Error):
pass
class AdditionalArgsProcessingError(Error):
pass
class UnexpectedResultTypeError(Error):
pass
class ApiCallRouterNotFoundError(Error):
pass
class RouterMatcher(object):
"""Matches requests to routers (and caches them)."""
def __init__(self):
self._routing_maps_cache = utils.FastStore()
def _BuildHttpRoutingMap(self, router_cls):
"""Builds a werkzeug routing map out of a given router class."""
if not issubclass(router_cls, api_call_router.ApiCallRouter):
raise ValueError("Router has to be an instance of ApiCallRouter.")
routing_map = routing.Map()
# Note: we traverse methods of the base class (ApiCallRouter) to avoid
# potential problems caused by child router classes using the @Http
# annotation (thus adding additional unforeseen HTTP paths/methods). We
# don't want the HTTP API to depend on a particular router implementation.
for _, metadata in router_cls.GetAnnotatedMethods().items():
for http_method, path, unused_options in metadata.http_methods:
routing_map.add(
routing.Rule(
path, methods=[http_method], endpoint=metadata))
# This adds support for the next version of the API that uses
# standartized JSON protobuf serialization.
routing_map.add(
routing.Rule(
path.replace("/api/", "/api/v2/"),
methods=[http_method],
endpoint=metadata))
return routing_map
def _GetRoutingMap(self, router):
"""Returns a routing map for a given router instance."""
try:
routing_map = self._routing_maps_cache.Get(router.__class__)
except KeyError:
routing_map = self._BuildHttpRoutingMap(router.__class__)
self._routing_maps_cache.Put(router.__class__, routing_map)
return routing_map
def _SetField(self, args, type_info, value):
"""Sets fields on the arg rdfvalue object."""
if hasattr(type_info, "enum"):
try:
coerced_obj = type_info.enum[value.upper()]
except KeyError:
# A bool is an enum but serializes to "1" / "0" which are both not in
# enum or reverse_enum.
coerced_obj = type_info.type.FromSerializedString(value)
else:
coerced_obj = type_info.type.FromSerializedString(value)
args.Set(type_info.name, coerced_obj)
def _GetArgsFromRequest(self, request, method_metadata, route_args):
"""Builds args struct out of HTTP request."""
format_mode = GetRequestFormatMode(request, method_metadata)
if request.method in ["GET", "HEAD"]:
if method_metadata.args_type:
unprocessed_request = request.GET
if hasattr(unprocessed_request, "dict"):
unprocessed_request = unprocessed_request.dict()
args = method_metadata.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
self._SetField(args, type_info, route_args[type_info.name])
elif type_info.name in unprocessed_request:
self._SetField(args, type_info, unprocessed_request[type_info.name])
else:
args = None
elif request.method in ["POST", "DELETE", "PATCH"]:
try:
args = method_metadata.args_type()
for type_info in args.type_infos:
if type_info.name in route_args:
self._SetField(args, type_info, route_args[type_info.name])
if request.META["CONTENT_TYPE"].startswith("multipart/form-data;"):
payload = json.loads(request.POST["_params_"])
args.FromDict(payload)
for name, fd in request.FILES.items():
args.Set(name, fd.read())
elif format_mode == JsonMode.PROTO3_JSON_MODE:
# NOTE: Arguments rdfvalue has to be a protobuf-based RDFValue.
args_proto = args.protobuf()
json_format.Parse(request.body or "{}", args_proto)
args.ParseFromString(args_proto.SerializeToString())
else:
payload = json.loads(request.body or "{}")
if payload:
args.FromDict(payload)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error while parsing POST request %s (%s): %s",
request.path, request.method, e)
raise PostRequestParsingError(e)
else:
raise UnsupportedHttpMethod("Unsupported method: %s." % request.method)
return args
def MatchRouter(self, request):
"""Returns a router for a given HTTP request."""
router = api_auth_manager.API_AUTH_MGR.GetRouterForUser(request.user)
routing_map = self._GetRoutingMap(router)
matcher = routing_map.bind("%s:%s" % (request.environ["SERVER_NAME"],
request.environ["SERVER_PORT"]))
try:
match = matcher.match(request.path, request.method)
except werkzeug_exceptions.NotFound:
raise ApiCallRouterNotFoundError("No API router was found for (%s) %s" %
(request.path, request.method))
router_method_metadata, route_args_dict = match
return (router, router_method_metadata,
self._GetArgsFromRequest(request, router_method_metadata,
route_args_dict))
class JSONEncoderWithRDFPrimitivesSupport(json.JSONEncoder):
"""Custom JSON encoder that encodes handlers output.
Custom encoder is required to facilitate usage of primitive values -
booleans, integers and strings - in handlers responses.
If handler references an RDFString, RDFInteger or and RDFBOol when building a
response, it will lead to JSON encoding failure when response encoded,
unless this custom encoder is used. Another way to solve this issue would be
to explicitly call api_value_renderers.RenderValue on every value returned
from the renderer, but it will make the code look overly verbose and dirty.
"""
def default(self, obj):
if isinstance(obj, (rdfvalue.RDFInteger, rdfvalue.RDFBool,
rdfvalue.RDFString)):
return obj.SerializeToDataStore()
return json.JSONEncoder.default(self, obj)
class JsonMode(object):
"""Enum class for various JSON encoding modes."""
PROTO3_JSON_MODE = 0
GRR_JSON_MODE = 1
GRR_ROOT_TYPES_STRIPPED_JSON_MODE = 2
GRR_TYPE_STRIPPED_JSON_MODE = 3
def GetRequestFormatMode(request, method_metadata):
"""Returns JSON format mode corresponding to a given request and method."""
if request.path.startswith("/api/v2/"):
return JsonMode.PROTO3_JSON_MODE
if hasattr(request, "GET") and request.GET.get("strip_type_info", ""):
return JsonMode.GRR_TYPE_STRIPPED_JSON_MODE
for http_method, unused_url, options in method_metadata.http_methods:
if (http_method == request.method and
options.get("strip_root_types", False)):
return JsonMode.GRR_ROOT_TYPES_STRIPPED_JSON_MODE
return JsonMode.GRR_JSON_MODE
class HttpRequestHandler(object):
"""Handles HTTP requests."""
@staticmethod
def BuildToken(request, execution_time):
"""Build an ACLToken from the request."""
# The request.GET dictionary will also be filled on HEAD calls.
if request.method in ["GET", "HEAD"]:
reason = request.GET.get("reason", "")
elif request.method in ["POST", "DELETE", "PATCH"]:
# The header X-GRR-REASON is set in api-service.js, which django converts
# to HTTP_X_GRR_REASON.
reason = utils.SmartUnicode(
urllib2.unquote(request.META.get("HTTP_X_GRR_REASON", "")))
# We assume that request.user contains the username that we can trust.
# No matter what authentication method is used, the WebAuthManager is
# responsible for authenticating the userand setting request.user to
# a correct value (see gui/webauth.py).
#
# The token that's built here will be later used to find an API router,
# get the ApiCallHandler from the router, and then to call the handler's
# Handle() method. API router will be responsible for all the ACL checks.
token = access_control.ACLToken(
username=request.user,
reason=reason,
process="GRRAdminUI",
expiry=rdfvalue.RDFDatetime.Now() + execution_time)
for field in ["REMOTE_ADDR", "HTTP_X_FORWARDED_FOR"]:
remote_addr = request.META.get(field, "")
if remote_addr:
token.source_ips.append(remote_addr)
return token
def _FormatResultAsJson(self, result, format_mode=None):
if result is None:
return dict(status="OK")
if format_mode == JsonMode.PROTO3_JSON_MODE:
return json.loads(json_format.MessageToJson(result.AsPrimitiveProto()))
elif format_mode == JsonMode.GRR_ROOT_TYPES_STRIPPED_JSON_MODE:
result_dict = {}
for field, value in result.ListSetFields():
if isinstance(field, (rdf_structs.ProtoDynamicEmbedded,
rdf_structs.ProtoEmbedded,
rdf_structs.ProtoList)):
result_dict[field.name] = api_value_renderers.RenderValue(value)
else:
result_dict[field.name] = api_value_renderers.RenderValue(value)[
"value"]
return result_dict
elif format_mode == JsonMode.GRR_TYPE_STRIPPED_JSON_MODE:
rendered_data = api_value_renderers.RenderValue(result)
return api_value_renderers.StripTypeInfo(rendered_data)
elif format_mode == JsonMode.GRR_JSON_MODE:
return api_value_renderers.RenderValue(result)
else:
raise ValueError("Invalid format_mode: %s", format_mode)
@staticmethod
def CallApiHandler(handler, args, token=None):
"""Handles API call to a given handler with given args and token."""
result = handler.Handle(args, token=token)
expected_type = handler.result_type
if expected_type is None:
expected_type = None.__class__
if result.__class__ != expected_type:
raise UnexpectedResultTypeError("Expected %s, but got %s." %
(expected_type.__name__,
result.__class__.__name__))
return result
def __init__(self, router_matcher=None):
self._router_matcher = router_matcher or RouterMatcher()
def _BuildResponse(self,
status,
rendered_data,
method_name=None,
headers=None,
token=None,
no_audit_log=False):
"""Builds HTTPResponse object from rendered data and HTTP status."""
response = http.HttpResponse(
status=status, content_type="application/json; charset=utf-8")
response["Content-Disposition"] = "attachment; filename=response.json"
response["X-Content-Type-Options"] = "nosniff"
if token and token.reason:
response["X-GRR-Reason"] = utils.SmartStr(token.reason)
if method_name:
response["X-API-Method"] = method_name
if no_audit_log:
response["X-No-Log"] = "True"
for key, value in (headers or {}).items():
response[key] = value
response.write(")]}'\n") # XSSI protection
# To avoid IE content sniffing problems, escape the tags. Otherwise somebody
# may send a link with malicious payload that will be opened in IE (which
# does content sniffing and doesn't respect Content-Disposition header) and
# IE will treat the document as html and executre arbitrary JS that was
# passed with the payload.
str_data = json.dumps(
rendered_data, cls=JSONEncoderWithRDFPrimitivesSupport)
response.write(str_data.replace("<", r"\u003c").replace(">", r"\u003e"))
return response
def _BuildStreamingResponse(self, binary_stream, method_name=None):
"""Builds HTTPResponse object for streaming."""
response = http.StreamingHttpResponse(
streaming_content=binary_stream.GenerateContent(),
content_type="binary/octet-stream")
response["Content-Disposition"] = ("attachment; filename=%s" %
binary_stream.filename)
if method_name:
response["X-API-Method"] = method_name
if binary_stream.content_length:
response["Content-Length"] = binary_stream.content_length
return response
def HandleRequest(self, request):
"""Handles given HTTP request."""
impersonated_username = config_lib.CONFIG["AdminUI.debug_impersonate_user"]
if impersonated_username:
logging.info("Overriding user as %s", impersonated_username)
request.user = config_lib.CONFIG["AdminUI.debug_impersonate_user"]
if not aff4_users.GRRUser.IsValidUsername(request.user):
return self._BuildResponse(
403, dict(message="Invalid username: %s" % request.user))
try:
router, method_metadata, args = self._router_matcher.MatchRouter(request)
except access_control.UnauthorizedAccess as e:
logging.exception("Access denied to %s (%s): %s", request.path,
request.method, e)
additional_headers = {
"X-GRR-Unauthorized-Access-Reason": utils.SmartStr(e.message),
"X-GRR-Unauthorized-Access-Subject": utils.SmartStr(e.subject)
}
return self._BuildResponse(
403,
dict(
message="Access denied by ACL: %s" % utils.SmartStr(e.message),
subject=utils.SmartStr(e.subject)),
headers=additional_headers)
except ApiCallRouterNotFoundError as e:
return self._BuildResponse(404, dict(message=e.message))
except werkzeug_exceptions.MethodNotAllowed as e:
return self._BuildResponse(405, dict(message=e.message))
except Error as e:
logging.exception("Can't match URL to router/method: %s", e)
return self._BuildResponse(
500, dict(
message=str(e), traceBack=traceback.format_exc()))
# SetUID() is called here so that ACL checks done by the router do not
# clash with datastore ACL checks.
# TODO(user): increase token expiry time.
token = self.BuildToken(request, 60).SetUID()
handler = None
try:
# ACL checks are done here by the router. If this method succeeds (i.e.
# does not raise), then handlers run without further ACL checks (they're
# free to do some in their own implementations, though).
handler = getattr(router, method_metadata.name)(args, token=token)
if handler.args_type != method_metadata.args_type:
raise RuntimeError("Handler args type doesn't match "
"method args type: %s vs %s" %
(handler.args_type, method_metadata.args_type))
binary_result_type = (
api_call_router.RouterMethodMetadata.BINARY_STREAM_RESULT_TYPE)
if (handler.result_type != method_metadata.result_type and
not (handler.result_type is None and
method_metadata.result_type == binary_result_type)):
raise RuntimeError("Handler result type doesn't match "
"method result type: %s vs %s" %
(handler.result_type, method_metadata.result_type))
# HEAD method is only used for checking the ACLs for particular API
# methods.
if request.method == "HEAD":
# If the request would return a stream, we add the Content-Length
# header to the response.
if (method_metadata.result_type ==
method_metadata.BINARY_STREAM_RESULT_TYPE):
binary_stream = handler.Handle(args, token=token)
headers = None
if binary_stream.content_length:
headers = {"Content-Length": binary_stream.content_length}
return self._BuildResponse(
200, {"status": "OK"},
method_name=method_metadata.name,
headers=headers,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
else:
return self._BuildResponse(
200, {"status": "OK"},
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
if (method_metadata.result_type ==
method_metadata.BINARY_STREAM_RESULT_TYPE):
binary_stream = handler.Handle(args, token=token)
return self._BuildStreamingResponse(
binary_stream, method_name=method_metadata.name)
else:
format_mode = GetRequestFormatMode(request, method_metadata)
result = self.CallApiHandler(handler, args, token=token)
rendered_data = self._FormatResultAsJson(
result, format_mode=format_mode)
return self._BuildResponse(
200,
rendered_data,
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except access_control.UnauthorizedAccess as e:
logging.exception("Access denied to %s (%s) with %s: %s", request.path,
request.method, method_metadata.name, e)
additional_headers = {
"X-GRR-Unauthorized-Access-Reason": utils.SmartStr(e.message),
"X-GRR-Unauthorized-Access-Subject": utils.SmartStr(e.subject)
}
return self._BuildResponse(
403,
dict(
message="Access denied by ACL: %s" % e.message,
subject=utils.SmartStr(e.subject)),
headers=additional_headers,
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except api_call_handler_base.ResourceNotFoundError as e:
return self._BuildResponse(
404,
dict(message=e.message),
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except NotImplementedError as e:
return self._BuildResponse(
501,
dict(message=e.message),
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error while processing %s (%s) with %s: %s",
request.path, request.method,
handler.__class__.__name__, e)
return self._BuildResponse(
500,
dict(
message=str(e), traceBack=traceback.format_exc()),
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
def RenderHttpResponse(request):
"""Renders HTTP response to a given HTTP request."""
start_time = time.time()
response = HTTP_REQUEST_HANDLER.HandleRequest(request)
total_time = time.time() - start_time
log.LOGGER.LogHttpApiCall(request, response)
method_name = response.get("X-API-Method", "unknown")
if response.status_code == 200:
status = "SUCCESS"
elif response.status_code == 403:
status = "FORBIDDEN"
elif response.status_code == 404:
status = "NOT_FOUND"
elif response.status_code == 501:
status = "NOT_IMPLEMENTED"
else:
status = "SERVER_ERROR"
if request.method == "HEAD":
metric_name = "api_access_probe_latency"
else:
metric_name = "api_method_latency"
stats.STATS.RecordEvent(
metric_name, total_time, fields=(method_name, "http", status))
return response
HTTP_REQUEST_HANDLER = None
class HttpApiInitHook(registry.InitHook):
"""Register HTTP API handlers."""
def RunOnce(self):
global HTTP_REQUEST_HANDLER
HTTP_REQUEST_HANDLER = HttpRequestHandler()
db = symbol_database.Default()
# Register api_pb2.DESCRIPTOR in the database, so that all API-related
# protos are recognized when Any messages are unpacked.
db.RegisterFileDescriptor(api_pb2.DESCRIPTOR)
stats.STATS.RegisterEventMetric(
"api_method_latency",
fields=[("method_name", str), ("protocol", str), ("status", str)])
stats.STATS.RegisterEventMetric(
"api_access_probe_latency",
fields=[("method_name", str), ("protocol", str), ("status", str)])
| apache-2.0 | -6,772,753,926,901,431,000 | 35.893728 | 80 | 0.651934 | false |
bgroff/kala-app | django_kala/auth/views/settings/avatar.py | 1 | 2011 | from django.conf import settings
from django.contrib import messages
from auth.forms.settings.avatar import AvatarForm
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView
from PIL import Image
from io import BytesIO
size = 32, 32
class AvatarView(LoginRequiredMixin, TemplateView):
template_name = 'accounts/settings/avatar.html'
def get_context_data(self, **kwargs):
return {
'form': self.form,
'user': self.user,
}
def dispatch(self, request, pk, *args, **kwargs):
self.user = get_object_or_404(get_user_model().objects.all(), pk=pk)
if not request.user.is_superuser and request.user != self.user:
raise PermissionDenied(_('You do not have permission to edit this user.'))
self.form = AvatarForm(request.POST or None, request.FILES or None)
return super(AvatarView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if self.form.is_valid():
try:
avatar_content = request.FILES['avatar'].read()
avatar = Image.open(BytesIO(avatar_content))
avatar = avatar.resize(size)
avatar_out = BytesIO()
avatar.save(avatar_out, format='PNG')
avatar_out.seek(0)
manager = settings.PLATFORM_MANAGER()
manager.upload_avatar(avatar_out, self.user)
except Exception as exception:
print(exception)
messages.success(request, _('The avatar has been updated.'))
return redirect(reverse('users:avatar', args=[self.user.pk]))
return self.render_to_response(self.get_context_data())
| mit | -1,904,576,512,794,497,300 | 34.910714 | 86 | 0.652909 | false |
errx/django | tests/cache/tests.py | 1 | 82056 | # -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
from __future__ import unicode_literals
import os
import re
import copy
import shutil
import tempfile
import threading
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core.cache import (cache, caches, CacheKeyWarning,
InvalidCacheBackendError, DEFAULT_CACHE_ALIAS)
from django.db import connection, router, transaction
from django.core.cache.utils import make_template_fragment_key
from django.http import HttpResponse, StreamingHttpResponse
from django.middleware.cache import (FetchFromCacheMiddleware,
UpdateCacheMiddleware, CacheMiddleware)
from django.template import Template
from django.template.response import TemplateResponse
from django.test import TestCase, TransactionTestCase, RequestFactory, override_settings
from django.test.utils import IgnorePendingDeprecationWarningsMixin
from django.utils import six
from django.utils import timezone
from django.utils import translation
from django.utils.cache import (patch_vary_headers, get_cache_key,
learn_cache_key, patch_cache_control, patch_response_headers)
from django.utils.encoding import force_text
from django.views.decorators.cache import cache_page
try: # Use the same idiom as in cache backends
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpickable(object):
def __getstate__(self):
raise pickle.PickleError()
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertEqual(cache.get("key"), None)
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertEqual(result, True)
self.assertEqual(cache.get("addkey1"), None)
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertEqual(cache.get("does_not_exist"), None)
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), None)
cache.delete("key1")
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertEqual(cache.has_key("hello1"), False)
self.assertEqual(cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in cache, False)
self.assertEqual("goodbye2" in cache, False)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr, 'answer')
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr, 'answer')
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), None)
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(cache.get("expire1"), None)
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), None)
self.assertEqual(cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), None)
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
cache.set_many({'a': 1, 'b': 2})
cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1')
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.incr_version, 'answer')
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
self.assertRaises(ValueError, cache.decr_version, 'answer')
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
setting = dict((k, base.copy()) for k in _caches_setting_base.keys())
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def setUp(self):
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertEqual(result, False)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertEqual(cache.get("does_not_exist"), None)
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set('a', 'a')
cache.set('b', 'b')
cache.set('c', 'c')
cache.set('d', 'd')
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set("key1", "spam")
cache.set("key2", "eggs")
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertEqual(cache.has_key("hello1"), True)
self.assertEqual(cache.has_key("goodbye1"), False)
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in cache, True)
self.assertEqual("goodbye2" in cache, False)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
self.assertRaises(ValueError, cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(cache.get("expire1"), None)
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertEqual(cache.has_key("expire3"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.set("key3", "ham")
cache.delete_many(["key1", "key2"])
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set("key1", "spam")
cache.set("key2", "eggs")
cache.clear()
self.assertEqual(cache.get("key1"), None)
self.assertEqual(cache.get("key2"), None)
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
'''
Passing in None into timeout results in a value that is cached forever
'''
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_zero_timeout(self):
'''
Passing in zero into timeout results in a value that is not cached
'''
cache.set('key1', 'eggs', 0)
self.assertEqual(cache.get('key1'), None)
cache.add('key2', 'ham', 0)
self.assertEqual(cache.get('key2'), None)
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertEqual(cache.get('key3'), None)
self.assertEqual(cache.get('key4'), None)
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached does not allow whitespace or control characters in keys
cache.set('key with spaces', 'value')
self.assertEqual(len(w), 2)
self.assertIsInstance(w[0].message, CacheKeyWarning)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# memcached limits key length to 250
cache.set('a' * 251, 'value')
self.assertEqual(len(w), 1)
self.assertIsInstance(w[0].message, CacheKeyWarning)
finally:
cache.key_func = old_func
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertEqual(cache.get('answer1', version=2), None)
self.assertEqual(caches['v2'].get('answer1'), None)
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertEqual(caches['v2'].get('answer1', version=2), None)
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertEqual(cache.get('answer2'), None)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertEqual(cache.get('answer3'), None)
self.assertEqual(cache.get('answer3', version=1), None)
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertEqual(caches['v2'].get('answer3', version=1), None)
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertEqual(cache.get('answer4', version=2), None)
self.assertEqual(caches['v2'].get('answer4'), None)
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertEqual(caches['v2'].get('answer4', version=2), None)
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertEqual(cache.get('answer2', version=1), None)
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), None)
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), None)
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertEqual(cache.get('answer1', version=1), None)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), None)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), None)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), None)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1),
{'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']),
{'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2),
{'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']),
{'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2),
{'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1),
{'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.get('answer', version=3), None)
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), None)
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2', version=3), None)
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertEqual(caches['v2'].get('answer2'), None)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), None)
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
self.assertRaises(ValueError, cache.incr_version, 'does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertEqual(cache.get('answer'), None)
self.assertEqual(cache.get('answer', version=1), None)
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertEqual(cache.get('answer', version=2), None)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertEqual(caches['v2'].get('answer2', version=1), None)
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertEqual(caches['v2'].get('answer2'), None)
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertEqual(caches['v2'].get('answer2', version=2), None)
self.assertRaises(ValueError, cache.decr_version, 'does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(caches['custom_key'].get('answer1'), None)
self.assertEqual(caches['custom_key2'].get('answer1'), None)
caches['custom_key'].set('answer2', 42)
self.assertEqual(cache.get('answer2'), None)
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpickable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode('utf-8'))
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.add('unpickable', Unpickable())
def test_set_fail_on_pickleerror(self):
"See https://code.djangoproject.com/ticket/21200"
with self.assertRaises(pickle.PickleError):
cache.set('unpickable', Unpickable())
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super(DBCacheTests, self).setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super(DBCacheTests, self).tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0, interactive=False)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
stdout = six.StringIO()
management.call_command(
'createcachetable',
stdout=stdout
)
self.assertEqual(stdout.getvalue(),
"Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
stdout = six.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=stdout
)
self.assertEqual(stdout.getvalue(),
"Cache table 'test cache table' created.\n")
def test_clear_commits_transaction(self):
# Ensure the database transaction is committed (#19896)
cache.set("key1", "spam")
cache.clear()
transaction.rollback()
self.assertEqual(cache.get("key1"), None)
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter(object):
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_migrate(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
multi_db = True
def test_createcachetable_observes_database_router(self):
old_routers = router.routers
try:
router.routers = [DBCacheRouter()]
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable',
database='default',
verbosity=0, interactive=False)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create the table
# 3: create the index
with self.assertNumQueries(3, using='other'):
management.call_command('createcachetable',
database='other',
verbosity=0, interactive=False)
finally:
router.routers = old_routers
class PicklingSideEffect(object):
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
if self.cache._lock.active_writers:
self.locked = True
return {}
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super(LocMemCacheTests, self).setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Check that multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertEqual(caches['other'].get('value'), None)
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
memcached_params = {}
for _cache_params in settings.CACHES.values():
if _cache_params['BACKEND'].startswith('django.core.cache.backends.memcached.'):
memcached_params = _cache_params
@unittest.skipUnless(memcached_params, "memcached not available")
@override_settings(CACHES=caches_setting_for_tests(base=memcached_params))
class MemcachedCacheTests(BaseCacheTests, TestCase):
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, cache.set, 'a' * 251, 'value')
# Explicitly display a skipped test if no configured cache uses MemcachedCache
@unittest.skipUnless(
memcached_params.get('BACKEND') == 'django.core.cache.backends.memcached.MemcachedCache',
"cache with python-memcached library not available")
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key, cache in settings.CACHES.items():
if cache['BACKEND'] == 'django.core.cache.backends.memcached.MemcachedCache':
self.assertEqual(caches[cache_key]._cache.pickleProtocol,
pickle.HIGHEST_PROTOCOL)
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super(FileBasedCacheTests, self).setUp()
self.dirname = tempfile.mkdtemp()
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
def tearDown(self):
shutil.rmtree(self.dirname)
super(FileBasedCacheTests, self).tearDown()
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
os.path.exists(self.dirname)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class GetCacheTests(IgnorePendingDeprecationWarningsMixin, TestCase):
def test_simple(self):
from django.core.cache import caches, get_cache
self.assertIsInstance(
caches[DEFAULT_CACHE_ALIAS],
get_cache('default').__class__
)
cache = get_cache(
'django.core.cache.backends.dummy.DummyCache',
**{'TIMEOUT': 120}
)
self.assertEqual(cache.default_timeout, 120)
self.assertRaises(InvalidCacheBackendError, get_cache, 'does_not_exist')
def test_close(self):
from django.core import signals
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_deprecated(self):
from django.core.cache import get_cache
from django.core import signals
cache = get_cache('cache.closeable_cache.CacheClass')
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(TestCase):
"""Tests that verify that settings having Cache arguments with a TIMEOUT
set to `None` will create Caches that will set non-expiring keys.
This fixes ticket #22085.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined inside the __init__() method of the
:class:`django.core.cache.backends.base.BaseCache` type.
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIs(None, cache.default_timeout)
self.assertEqual(None, cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertNotEqual(None, cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def text_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertEqual(None, cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class CacheUtils(TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.host = 'www.example.com'
self.path = '/cache/test/'
self.factory = RequestFactory(HTTP_HOST=self.host)
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertTrue(get_cache_key(request1) != get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, set(['private'])),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, set(['private'])),
('private', {'public': True}, set(['public'])),
('public', {'public': True}, set(['public'])),
('public', {'private': True}, set(['private'])),
('must-revalidate,max-age=60,private', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])),
('must-revalidate,max-age=60,public', {'private': True}, set(['must-revalidate', 'max-age=60', 'private'])),
('must-revalidate,max-age=60', {'public': True}, set(['must-revalidate', 'max-age=60', 'public'])),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertNotEqual(get_cache_data, None)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=(
('en', 'English'),
('es', 'Spanish'),
),
)
class CacheI18nTest(TestCase):
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
# This is tightly coupled to the implementation,
# but it's the most straightforward way to test the key.
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = force_text(timezone.get_current_timezone_name(), errors='ignore')
tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_with_non_ascii_tzname(self):
# Regression test for #17476
class CustomTzName(timezone.UTC):
name = ''
def tzname(self, dt):
return self.name
request = self.factory.get(self.path)
response = HttpResponse()
with timezone.override(CustomTzName()):
CustomTzName.name = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
CustomTzName.name = 'Hora estándar de Argentina' # unicode
sanitized_name = 'Hora_estndar_de_Argentina'
self.assertIn(sanitized_name, learn_cache_key(request, response),
"Cache keys should include the time zone name when time zones are active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertEqual(get_cache_data, None)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data, None)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertNotEqual(get_cache_data, None)
self.assertEqual(get_cache_data.content, en_message.encode())
# Check that we use etags
self.assertTrue(get_cache_data.has_header('ETag'))
# Check that we can disable etags
with self.settings(USE_ETAGS=False):
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertFalse(get_cache_data.has_header('ETag'))
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_ETAGS=True,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# This test passes on Python < 3.3 even without the corresponding code
# in UpdateCacheMiddleware, because pickling a StreamingHttpResponse
# fails (http://bugs.python.org/issue14288). LocMemCache silently
# swallows the exception and doesn't store the response in cache.
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(TestCase):
def setUp(self):
super(CacheMiddlewareTest, self).setUp()
self.factory = RequestFactory()
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super(CacheMiddlewareTest, self).tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
self.assertEqual(as_view_decorator.cache_alias, 'default') # Value of DEFAULT_CACHE_ALIAS from django.core.cache
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertEqual(result, None)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertNotEqual(result, None)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertEqual(result, None)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertNotEqual(result, None)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(TestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the Etag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
def setUp(self):
self.path = '/cache/test/'
self.factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = TemplateResponse(HttpResponse(), Template("This is a test"))
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = TemplateResponse(HttpResponse(), Template("This is a test"))
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# Verify that a specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = TemplateResponse(HttpResponse(), Template("This is a test"))
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
# Verify that the querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
@override_settings(USE_ETAGS=False)
def test_without_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertFalse(response.has_header('ETag'))
@override_settings(USE_ETAGS=True)
def test_with_etag(self):
response = TemplateResponse(HttpResponse(), Template("This is a test"))
self.assertFalse(response.has_header('ETag'))
patch_response_headers(response)
self.assertFalse(response.has_header('ETag'))
response = response.render()
self.assertTrue(response.has_header('ETag'))
class TestEtagWithAdmin(TestCase):
# See https://code.djangoproject.com/ticket/16003
urls = "admin_views.urls"
def test_admin(self):
with self.settings(USE_ETAGS=False):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertFalse(response.has_header('ETag'))
with self.settings(USE_ETAGS=True):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 302)
self.assertTrue(response.has_header('ETag'))
class TestMakeTemplateFragmentKey(TestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key,
'template.cache.foo.900150983cd24fb0d6963f7d28e17f72')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key,
'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key,
'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469')
class CacheHandlerTest(TestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertTrue(cache1 is cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertFalse(c[0] is c[1])
| bsd-3-clause | 8,306,885,958,215,502,000 | 38.638473 | 121 | 0.623275 | false |
jamespcole/home-assistant | homeassistant/components/modbus/switch.py | 1 | 7154 | """Support for Modbus switches."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_COMMAND_OFF, CONF_COMMAND_ON, CONF_NAME, CONF_SLAVE, STATE_ON)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.restore_state import RestoreEntity
from . import CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_COIL = 'coil'
CONF_COILS = 'coils'
CONF_REGISTER = 'register'
CONF_REGISTER_TYPE = 'register_type'
CONF_REGISTERS = 'registers'
CONF_STATE_OFF = 'state_off'
CONF_STATE_ON = 'state_on'
CONF_VERIFY_REGISTER = 'verify_register'
CONF_VERIFY_STATE = 'verify_state'
DEPENDENCIES = ['modbus']
REGISTER_TYPE_HOLDING = 'holding'
REGISTER_TYPE_INPUT = 'input'
REGISTERS_SCHEMA = vol.Schema({
vol.Required(CONF_COMMAND_OFF): cv.positive_int,
vol.Required(CONF_COMMAND_ON): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_REGISTER): cv.positive_int,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_REGISTER_TYPE, default=REGISTER_TYPE_HOLDING):
vol.In([REGISTER_TYPE_HOLDING, REGISTER_TYPE_INPUT]),
vol.Optional(CONF_SLAVE): cv.positive_int,
vol.Optional(CONF_STATE_OFF): cv.positive_int,
vol.Optional(CONF_STATE_ON): cv.positive_int,
vol.Optional(CONF_VERIFY_REGISTER): cv.positive_int,
vol.Optional(CONF_VERIFY_STATE, default=True): cv.boolean,
})
COILS_SCHEMA = vol.Schema({
vol.Required(CONF_COIL): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SLAVE): cv.positive_int,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
})
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_COILS, CONF_REGISTERS),
PLATFORM_SCHEMA.extend({
vol.Optional(CONF_COILS): [COILS_SCHEMA],
vol.Optional(CONF_REGISTERS): [REGISTERS_SCHEMA],
}))
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Read configuration and create Modbus devices."""
switches = []
if CONF_COILS in config:
for coil in config.get(CONF_COILS):
hub_name = coil.get(CONF_HUB)
hub = hass.data[MODBUS_DOMAIN][hub_name]
switches.append(ModbusCoilSwitch(
hub, coil.get(CONF_NAME), coil.get(CONF_SLAVE),
coil.get(CONF_COIL)))
if CONF_REGISTERS in config:
for register in config.get(CONF_REGISTERS):
hub_name = register.get(CONF_HUB)
hub = hass.data[MODBUS_DOMAIN][hub_name]
switches.append(ModbusRegisterSwitch(
hub,
register.get(CONF_NAME),
register.get(CONF_SLAVE),
register.get(CONF_REGISTER),
register.get(CONF_COMMAND_ON),
register.get(CONF_COMMAND_OFF),
register.get(CONF_VERIFY_STATE),
register.get(CONF_VERIFY_REGISTER),
register.get(CONF_REGISTER_TYPE),
register.get(CONF_STATE_ON),
register.get(CONF_STATE_OFF)))
add_entities(switches)
class ModbusCoilSwitch(ToggleEntity, RestoreEntity):
"""Representation of a Modbus coil switch."""
def __init__(self, hub, name, slave, coil):
"""Initialize the coil switch."""
self._hub = hub
self._name = name
self._slave = int(slave) if slave else None
self._coil = int(coil)
self._is_on = None
async def async_added_to_hass(self):
"""Handle entity which will be added."""
state = await self.async_get_last_state()
if not state:
return
self._is_on = state.state == STATE_ON
@property
def is_on(self):
"""Return true if switch is on."""
return self._is_on
@property
def name(self):
"""Return the name of the switch."""
return self._name
def turn_on(self, **kwargs):
"""Set switch on."""
self._hub.write_coil(self._slave, self._coil, True)
def turn_off(self, **kwargs):
"""Set switch off."""
self._hub.write_coil(self._slave, self._coil, False)
def update(self):
"""Update the state of the switch."""
result = self._hub.read_coils(self._slave, self._coil, 1)
try:
self._is_on = bool(result.bits[0])
except AttributeError:
_LOGGER.error(
'No response from hub %s, slave %s, coil %s',
self._hub.name, self._slave, self._coil)
class ModbusRegisterSwitch(ModbusCoilSwitch):
"""Representation of a Modbus register switch."""
# pylint: disable=super-init-not-called
def __init__(self, hub, name, slave, register, command_on, command_off,
verify_state, verify_register, register_type, state_on,
state_off):
"""Initialize the register switch."""
self._hub = hub
self._name = name
self._slave = slave
self._register = register
self._command_on = command_on
self._command_off = command_off
self._verify_state = verify_state
self._verify_register = (
verify_register if verify_register else self._register)
self._register_type = register_type
if state_on is not None:
self._state_on = state_on
else:
self._state_on = self._command_on
if state_off is not None:
self._state_off = state_off
else:
self._state_off = self._command_off
self._is_on = None
def turn_on(self, **kwargs):
"""Set switch on."""
self._hub.write_register(self._slave, self._register, self._command_on)
if not self._verify_state:
self._is_on = True
def turn_off(self, **kwargs):
"""Set switch off."""
self._hub.write_register(
self._slave, self._register, self._command_off)
if not self._verify_state:
self._is_on = False
def update(self):
"""Update the state of the switch."""
if not self._verify_state:
return
value = 0
if self._register_type == REGISTER_TYPE_INPUT:
result = self._hub.read_input_registers(
self._slave, self._register, 1)
else:
result = self._hub.read_holding_registers(
self._slave, self._register, 1)
try:
value = int(result.registers[0])
except AttributeError:
_LOGGER.error(
"No response from hub %s, slave %s, register %s",
self._hub.name, self._slave, self._verify_register)
if value == self._state_on:
self._is_on = True
elif value == self._state_off:
self._is_on = False
else:
_LOGGER.error(
"Unexpected response from hub %s, slave %s "
"register %s, got 0x%2x",
self._hub.name, self._slave, self._verify_register, value)
| apache-2.0 | -460,554,690,327,611,300 | 32.745283 | 79 | 0.599385 | false |
google/mobly | tests/mobly/controllers/android_device_lib/callback_handler_test.py | 1 | 5977 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from mobly.controllers.android_device_lib import callback_handler
from mobly.controllers.android_device_lib import jsonrpc_client_base
MOCK_CALLBACK_ID = "1-0"
MOCK_RAW_EVENT = {
'callbackId': '2-1',
'name': 'AsyncTaskResult',
'time': 20460228696,
'data': {
'exampleData': "Here's a simple event.",
'successful': True,
'secretNumber': 12
}
}
class CallbackHandlerTest(unittest.TestCase):
"""Unit tests for mobly.controllers.android_device_lib.callback_handler.
"""
def test_timeout_value(self):
self.assertGreaterEqual(jsonrpc_client_base._SOCKET_READ_TIMEOUT,
callback_handler.MAX_TIMEOUT)
def test_callback_id_property(self):
mock_event_client = mock.Mock()
handler = callback_handler.CallbackHandler(callback_id=MOCK_CALLBACK_ID,
event_client=mock_event_client,
ret_value=None,
method_name=None,
ad=mock.Mock())
self.assertEqual(handler.callback_id, MOCK_CALLBACK_ID)
with self.assertRaisesRegex(AttributeError, "can't set attribute"):
handler.callback_id = 'ha'
def test_event_dict_to_snippet_event(self):
mock_event_client = mock.Mock()
mock_event_client.eventWaitAndGet = mock.Mock(return_value=MOCK_RAW_EVENT)
handler = callback_handler.CallbackHandler(callback_id=MOCK_CALLBACK_ID,
event_client=mock_event_client,
ret_value=None,
method_name=None,
ad=mock.Mock())
event = handler.waitAndGet('ha')
self.assertEqual(event.name, MOCK_RAW_EVENT['name'])
self.assertEqual(event.creation_time, MOCK_RAW_EVENT['time'])
self.assertEqual(event.data, MOCK_RAW_EVENT['data'])
self.assertEqual(event.callback_id, MOCK_RAW_EVENT['callbackId'])
def test_wait_and_get_timeout(self):
mock_event_client = mock.Mock()
java_timeout_msg = ('com.google.android.mobly.snippet.event.'
'EventSnippet$EventSnippetException: timeout.')
mock_event_client.eventWaitAndGet = mock.Mock(
side_effect=jsonrpc_client_base.ApiError(mock.Mock(), java_timeout_msg))
handler = callback_handler.CallbackHandler(callback_id=MOCK_CALLBACK_ID,
event_client=mock_event_client,
ret_value=None,
method_name=None,
ad=mock.Mock())
expected_msg = 'Timed out after waiting .*s for event "ha" .*'
with self.assertRaisesRegex(callback_handler.TimeoutError, expected_msg):
handler.waitAndGet('ha')
def test_wait_for_event(self):
mock_event_client = mock.Mock()
mock_event_client.eventWaitAndGet = mock.Mock(return_value=MOCK_RAW_EVENT)
handler = callback_handler.CallbackHandler(callback_id=MOCK_CALLBACK_ID,
event_client=mock_event_client,
ret_value=None,
method_name=None,
ad=mock.Mock())
def some_condition(event):
return event.data['successful']
event = handler.waitForEvent('AsyncTaskResult', some_condition, 0.01)
def test_wait_for_event_negative(self):
mock_event_client = mock.Mock()
mock_event_client.eventWaitAndGet = mock.Mock(return_value=MOCK_RAW_EVENT)
handler = callback_handler.CallbackHandler(callback_id=MOCK_CALLBACK_ID,
event_client=mock_event_client,
ret_value=None,
method_name=None,
ad=mock.Mock())
expected_msg = (
'Timed out after 0.01s waiting for an "AsyncTaskResult" event that'
' satisfies the predicate "some_condition".')
def some_condition(event):
return False
with self.assertRaisesRegex(callback_handler.TimeoutError, expected_msg):
handler.waitForEvent('AsyncTaskResult', some_condition, 0.01)
def test_wait_for_event_max_timeout(self):
"""waitForEvent should not raise the timeout exceed threshold error.
"""
mock_event_client = mock.Mock()
mock_event_client.eventWaitAndGet = mock.Mock(return_value=MOCK_RAW_EVENT)
handler = callback_handler.CallbackHandler(callback_id=MOCK_CALLBACK_ID,
event_client=mock_event_client,
ret_value=None,
method_name=None,
ad=mock.Mock())
def some_condition(event):
return event.data['successful']
big_timeout = callback_handler.MAX_TIMEOUT * 2
# This line should not raise.
event = handler.waitForEvent('AsyncTaskResult',
some_condition,
timeout=big_timeout)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 958,428,415,450,603,900 | 42.948529 | 80 | 0.575205 | false |
jgardner1/Python-Metrics | setup.py | 1 | 1247 | from setuptools import setup, find_packages
import sys, os
version = '0.2'
setup(name='pymetrics',
version=version,
description="A metrics library to time and count what happens during a process.",
long_description="""\
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Pylons',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Utilities',
],
keywords='metrics timer',
author='Jonathan Gardner',
author_email='[email protected]',
url='https://github.com/jgardner1/Python-Metrics',
license='GNU Affero General Public License v3',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| agpl-3.0 | 4,747,061,692,150,908,000 | 32.702703 | 87 | 0.590217 | false |
nke001/attention-lvcsr | libs/Theano/theano/gof/cc.py | 1 | 71042 | """
Defines Linkers that deal with C implementations.
"""
from __future__ import print_function
# Python imports
from copy import copy
import os
import sys
import logging
import numpy
import theano
from theano import config
from theano.compat import PY3
from theano.compat import izip
from six import string_types, reraise
from six.moves import StringIO, xrange
# Note that we need to do this before importing cutils, since when there is
# no theano cache dir initialized yet, importing cutils may require compilation
# of cutils_ext.
from theano.configparser import AddConfigVar, StrParam
# gof imports
from theano.gof import graph
from theano.gof import link
from theano.gof import utils
from theano.gof import cmodule
from theano.gof.compilelock import get_lock, release_lock
from theano.gof.callcache import CallCache
AddConfigVar('gcc.cxxflags',
"Extra compiler flags for gcc",
StrParam(""))
_logger = logging.getLogger("theano.gof.cc")
run_cthunk = None # Will be imported only when needed.
def get_module_cache(init_args=None):
"""
:param init_args: If not None, the (k, v) pairs in this dictionary will
be forwarded to the ModuleCache constructor as keyword arguments.
"""
return cmodule.get_module_cache(config.compiledir, init_args=init_args)
_persistent_module_cache = None
def get_persistent_module_cache():
global _persistent_module_cache
if _persistent_module_cache is None:
_persistent_module_cache = CallCache(os.path.join(config.compiledir,
'persistent_cache'))
return _persistent_module_cache
class CodeBlock:
"""WRITEME
Represents a computation unit composed of declare, behavior, and cleanup.
@ivar declare: C code that declares variables for use by the computation
@ivar behavior: C code that performs the computation
@ivar cleanup: C code that cleans up things allocated or incref-ed
in behavior
"""
def __init__(self, declare, behavior, cleanup, sub):
"""
Initialize a L{CodeBlock} with templatized declare, behavior
and cleanup. The sub parameter will be used in the other
arguments' templates. sub should contain a key called 'id'
that maps to an identifier for this block.
The identifier will be used to determine the failure code and
a label to jump to. It should also contain a key called
'failure_var' that contains the name of the variable that
contains the error code.
"""
self.declare = declare
self.behavior = behavior
# the dummy is because gcc throws an error when a label's
# right next to a closing brace (maybe there's an ignore flag
# for that...)
# we need the label even if cleanup is empty because the
# behavior block jumps there on failure
self.cleanup = ("__label_%(id)i:\n" % sub + cleanup +
"\ndouble __DUMMY_%(id)i;\n" % sub) # % sub
def failure_code(sub):
"""Code contained in sub['fail'], usually substituted for %(fail)s.
It sets information about current error, then goto the code
actually handling the failure, which is defined in struct_gen().
"""
return '''{
%(failure_var)s = %(id)s;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_%(id)i;}''' % sub
def failure_code_init(sub):
"Code for failure in the struct init."
return '''{
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
return %(id)d;
}''' % sub
def code_gen(blocks):
"""WRITEME From a list of L{CodeBlock} instances, returns a string
that executes them all in sequence. eg for C{(decl1, task1,
cleanup1)} and C{(decl2, task2, cleanup2)} the returned string
will be of the form::
decl1
decl2
{
task1
{
task2
cleanup2
}
cleanup1
}
"""
decl = ""
head = ""
tail = ""
for block in blocks:
decl += block.declare
head = head + ("\n{\n%s" % block.behavior)
tail = ("%s\n}\n" % block.cleanup) + tail
return decl + head + tail
def struct_gen(args, struct_builders, blocks, sub):
"""WRITEME
Generates a struct conforming to the following specifications:
* args -> all of the PyObject* type, stored in the struct
they represent the storage and must be length 1 python lists.
* struct_builders -> list of L{CodeBlock} instances such that
* declarations are in the struct
* behavior is in the constructor
* cleanup is in the destructor
* blocks -> list of CodeBlock instances such that
* declarations, behavior and cleanup are in the run()
method of the struct
* sub -> dictionary used to template the struct.
* failure_var -> must contain a variable name to use for
the failure code.
In a nutshell, this returns code for a struct that represents
a function with state. The state's initialization and destruction
are handled by struct_builders and the actual behavior of the
function is handled by blocks.
"""
struct_decl = ""
struct_init_head = ""
struct_init_tail = ""
struct_cleanup = ""
for block in struct_builders:
# decl are declarations that go in the struct
# init_head are in the constructor
# init_tail and cleanup do the same thing, but the former will
# be executed if any step in the constructor fails and the
# latter only at destruction time.
struct_decl += block.declare
struct_init_head = struct_init_head + ("\n%s" % block.behavior)
struct_cleanup += block.cleanup
behavior = code_gen(blocks)
# declares the storage
storage_decl = "\n".join(["PyObject* %s;" % arg for arg in args])
# in the constructor, sets the storage to the arguments
storage_set = "\n".join(["this->%s = %s;" % (arg, arg) for arg in args])
# increments the storage's refcount in the constructor
storage_incref = "\n".join(["Py_XINCREF(%s);" % arg for arg in args])
# decrements the storage's refcount in the destructor
storage_decref = "\n".join(["Py_XDECREF(this->%s);" % arg for arg in args])
args_names = ", ".join(args)
args_decl = ", ".join(["PyObject* %s" % arg for arg in args])
# The following code stores the exception data in __ERROR, which
# is a special field of the struct. __ERROR is a list of length 3
# that holds the type, the value and the traceback. After storing
# the error, we return the failure code so we know which code
# block failed.
do_return = """
if (%(failure_var)s) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return %(failure_var)s;
""" % sub
sub = dict(sub)
sub.update(locals())
# TODO: add some error checking to make sure storage_<x> are
# 1-element lists and __ERROR is a 3-elements list.
struct_code = """
namespace {
struct %(name)s {
PyObject* __ERROR;
%(storage_decl)s
%(struct_decl)s
%(name)s() {}
~%(name)s(void) {
cleanup();
}
int init(PyObject* __ERROR, %(args_decl)s) {
%(storage_incref)s
%(storage_set)s
%(struct_init_head)s
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
%(struct_cleanup)s
%(storage_decref)s
}
int run(void) {
int %(failure_var)s = 0;
%(behavior)s
%(do_return)s
}
};
}
""" % sub
return struct_code
# The get_<x> functions complete the return value of r.get_<x>()
# with handling of the py_<name> variable.
def get_nothing(r, name, sub):
"""WRITEME"""
return ""
def get_c_declare(r, name, sub):
"""Wrapper around c_declare that declares py_name"""
# The declaration will be used by the Apply node that
# is computing it (`r.owner`), and by each of the clients.
# If some of these have `check_input=True` in their `.op`,
# it means they need `r`'s dtype to be declared, so
# we have to pass `check_input=True` to `c_declare`.
if ((any([getattr(c.op, 'check_input', config.check_input)
for (c, _) in r.clients
if not isinstance(c, string_types)]) or
(r.owner and
getattr(r.owner.op, 'check_input', config.check_input)))):
c_declare = r.type.c_declare(name, sub, True)
else:
c_declare = r.type.c_declare(name, sub, False)
pre = """
PyObject* py_%(name)s;
""" % locals()
return pre + c_declare
def get_c_init(r, name, sub):
"""Wrapper around c_init that initializes py_name to Py_None"""
pre = "" """
py_%(name)s = Py_None;
{Py_XINCREF(py_%(name)s);}
""" % locals()
return pre + r.type.c_init(name, sub)
def get_c_extract(r, name, sub):
"""Wrapper around c_extract that initializes py_name from storage."""
# `c_extract` is called when getting the value of an apply node's
# input from the compute map, before being used by its clients.
# If one of the clients has `check_input=True`, we need to perform
# checks on the variable.
# However that code is not used by C code of the apply node creating
# this variable, so there is no need to check `r.owner.op.check_input`.
if any([getattr(c.op, 'check_input', config.check_input)
for (c, _) in r.clients
if not isinstance(c, string_types)]):
# check_broadcast is just an hack to easily remove just the
# broadcast check on the old GPU back-end. This check isn't
# done in the new GPU back-end or on the CPU.
if any([getattr(c.op, 'check_broadcast', True)
for (c, _) in r.clients
if not isinstance(c, string_types)]):
c_extract = r.type.c_extract(name, sub, True)
else:
try:
c_extract = r.type.c_extract(
name, sub, True,
check_broadcast=False)
except TypeError as e:
c_extract = r.type.c_extract(name, sub, True)
else:
c_extract = r.type.c_extract(name, sub, False)
pre = """
py_%(name)s = PyList_GET_ITEM(storage_%(name)s, 0);
{Py_XINCREF(py_%(name)s);}
""" % locals()
return pre + c_extract
def get_c_extract_out(r, name, sub):
"""Wrapper around c_extract_out that initializes py_name from storage."""
# `c_extract_out` is used to extract an output variable from
# the compute map, to be used as pre-allocated memory for `r`
# before its value gets computed.
# If the node producing `r` has `check_inputs=True`, it may
# also perform type checks on the initial value of the output,
# so we need to pass `check_input=True` to `c_extract_out`.
# However, that code is not used by potential clients of `r`,
# so we do not need to check them.
check_input = getattr(r.owner.op, 'check_input', config.check_input)
# check_broadcast is just an hack to easily remove just the
# broadcast check on the old GPU back-end. This check isn't
# done in the new GPU back-end or on the CPU.
if getattr(r.owner.op, 'check_broadcast', True):
c_extract = r.type.c_extract_out(name, sub, check_input)
else:
try:
c_extract = r.type.c_extract_out(name, sub, check_input,
check_broadcast=False)
except TypeError as e:
c_extract = r.type.c_extract_out(name, sub, check_input)
pre = """
py_%(name)s = PyList_GET_ITEM(storage_%(name)s, 0);
{Py_XINCREF(py_%(name)s);}
""" % locals()
return pre + c_extract
def get_c_cleanup(r, name, sub):
"""Wrapper around c_cleanup that decrefs py_name"""
post = """
{Py_XDECREF(py_%(name)s);}
""" % locals()
return r.type.c_cleanup(name, sub) + post
def get_c_sync(r, name, sub):
"""Wrapper around c_sync that syncs py_name with storage."""
return """
if (!%(failure_var)s) {
%(sync)s
PyObject* old = PyList_GET_ITEM(storage_%(name)s, 0);
{Py_XINCREF(py_%(name)s);}
PyList_SET_ITEM(storage_%(name)s, 0, py_%(name)s);
{Py_XDECREF(old);}
}
""" % dict(sync=r.type.c_sync(name, sub), name=name, **sub)
def apply_policy(policy, r, name, sub):
"""WRITEME
@param policy: list of functions that map a L{Variable} to a string,
or a single such function
@type r: L{Variable}
@return: C{policy[0](r) + policy[1](r) + ...}
"""
if isinstance(policy, (list, tuple)):
ret = ""
for sub_policy in policy:
ret += sub_policy(r, name, sub)
return ret
return policy(r, name, sub)
def struct_variable_codeblocks(variable, policies, id, symbol_table, sub):
"""WRITEME
variable -> a Variable
policies -> a pair of tuples ((declare_policy, behavior_policy,
cleanup_policy), -- at construction
(declare_policy, behavior_policy,
cleanup_policy)) -- at execution
the first list will produce an element of the
'struct_builders' argument in struct_gen the second
list will produce an element of the 'blocks' argument
in struct_gen
id -> the id assigned to this variable's task in the computation
symbol_table -> a dict that maps variables to variable names. It
is not read by this function but a variable name for the
variable is computed and added to the table.
sub -> dictionary for use by L{CodeBlock}.
"""
name = "V%i" % id
symbol_table[variable] = name
sub = dict(sub)
# sub['name'] = name
sub['id'] = id
sub['fail'] = failure_code_init(sub)
sub['py_ptr'] = "py_%s" % name
sub['stor_ptr'] = "storage_%s" % name
# struct_declare, struct_behavior, struct_cleanup, sub)
struct_builder = CodeBlock(*[apply_policy(policy, variable, name, sub)
for policy in policies[0]] + [sub])
sub['id'] = id + 1
sub['fail'] = failure_code(sub)
sub['py_ptr'] = "py_%s" % name
sub['stor_ptr'] = "storage_%s" % name
# run_declare, run_behavior, run_cleanup, sub)
block = CodeBlock(*[apply_policy(policy, variable, name, sub)
for policy in policies[1]] + [sub])
return struct_builder, block
class CLinker(link.Linker):
"""WRITEME
Creates C code for an fgraph, compiles it and returns callables
through make_thunk and make_function that make use of the compiled
code.
no_recycling can contain a list of Variables that belong to the fgraph.
If a Variable is in no_recycling, CLinker will clear the output storage
associated to it during the computation (to avoid reusing it).
"""
def __init__(self, schedule=None):
self.fgraph = None
if schedule:
self.schedule = schedule
def accept(self, fgraph, no_recycling=None):
"""WRITEME"""
if no_recycling is None:
no_recycling = []
if self.fgraph is not None and self.fgraph is not fgraph:
return type(self)().accept(fgraph, no_recycling)
# raise Exception("Cannot accept from a Linker that is already"
# " tied to another FunctionGraph.")
self.fgraph = fgraph
self.fetch_variables()
self.no_recycling = no_recycling
return self
def fetch_variables(self):
"""WRITEME
Fills the inputs, outputs, variables, orphans,
temps and node_order fields.
"""
fgraph = self.fgraph
self.inputs = fgraph.inputs
self.outputs = fgraph.outputs
self.node_order = self.schedule(fgraph)
# list(fgraph.variables)
# We need to include the unused inputs in our variables,
# otherwise we can't pass them to the module.
self.variables = [var for var in self.inputs if not len(var.clients)]
self.variables += graph.variables(self.inputs, self.outputs)
# This adds a hidden input which is the context for each node
# that needs it
self.contexts = dict()
for node in self.node_order:
ctx = node.run_context()
if ctx is not graph.NoContext:
# try to avoid creating more than one variable for the
# same context.
if ctx in self.contexts:
var = self.contexts[ctx]
assert var.type == node.context_type
var.clients.append((node, 'context'))
else:
var = graph.Constant(node.context_type, ctx)
var.clients = [(node, 'context')]
self.contexts[ctx] = var
self.variables.append(var)
# The orphans field is listified to ensure a consistent order.
# list(fgraph.orphans.difference(self.outputs))
self.orphans = list(r for r in self.variables
if isinstance(r, graph.Constant) and
r not in self.inputs)
self.temps = list(set(self.variables).difference(
self.inputs).difference(self.outputs).difference(self.orphans))
self.consts = []
def code_gen(self):
"""WRITEME
Generates code for a struct that does the computation of the fgraph and
stores it in the struct_code field of the instance.
If reuse_storage is True, outputs and temporaries will be stored in
the struct so they can be reused each time a function returned by
make_function is called, which means that the output of a call will
be invalidated by the next. If reuse_storage is False, that problem
is avoided.
This method caches its computations.
"""
if getattr(self, 'struct_code', False):
return self.struct_code
no_recycling = self.no_recycling
self.consts = []
c_support_code_apply = []
c_init_code_apply = []
symbol = {}
# (init_)tasks contains a list of pairs (Op/Variable, task_name)
# e.g. (x, 'get') or (x+y, 'code')
init_tasks = []
tasks = []
# (init_)blocks contain CodeBlock instances. There is a direct
# correspondance with (init_)tasks.
init_blocks = []
blocks = []
failure_var = "__failure"
id = 1
for variable in self.variables:
sub = dict(failure_var=failure_var)
# it might be possible to inline constant variables as C literals
# policy = [[what to declare in the struct,
# what to do at construction,
# what to do at destruction],
# [what to declare in each run,
# what to do at the beginning of each run,
# what to do at the end of each run]]
if variable in self.inputs:
# We need to extract the new inputs at each run
# they do not need to be relayed to Python, so we don't sync.
# If the variable is both an input and an output, there is
# no need to synchronize either, it is already up-to-date.
policy = [[get_nothing, get_nothing, get_nothing],
[get_c_declare, get_c_extract, get_c_cleanup]]
elif variable in self.orphans:
if not isinstance(variable, graph.Constant):
raise TypeError("All orphans to CLinker must be Constant"
" instances.", variable)
if isinstance(variable, graph.Constant):
try:
symbol[variable] = ("(" + variable.type.c_literal(
variable.data) + ")")
self.consts.append(variable)
self.orphans.remove(variable)
continue
except (utils.MethodNotDefined, NotImplementedError):
pass
# orphans are not inputs so we'll just get fetch them
# when we initialize the struct and assume they stay
# the same
policy = [[get_c_declare, get_c_extract, get_c_cleanup],
[get_nothing, get_nothing, get_nothing]]
elif variable in self.temps:
# temps don't need to be extracted from Python, so we
# call c_init rather than c_extract they do not need
# to be relayed to Python, so we don't sync
if variable.type.c_is_simple() or variable in no_recycling:
policy = [[get_nothing, get_nothing, get_nothing],
[get_c_declare, get_c_init, get_c_cleanup]]
else:
# it is useful for complex temps to reuse storage
# at each run, so we only clean up in the
# destructor
policy = [[get_c_declare, get_c_init, get_c_cleanup],
[get_nothing, get_nothing, get_nothing]]
elif variable in self.outputs:
if variable.type.c_is_simple() or variable in no_recycling:
# Do not extract output from Python
policy = [[get_nothing, get_nothing, get_nothing],
[get_c_declare, get_c_init,
(get_c_sync, get_c_cleanup)]]
else:
# We try to use the output that is pre-allocated.
# The linker will usually just reuse the storage
# from last run, but in the first execution,
# it will be None.
# We clean-up at each run to enable garbage collection
# in the Linker.
policy = [[get_nothing, get_nothing, get_nothing],
[get_c_declare, get_c_extract_out,
(get_c_sync, get_c_cleanup)]]
else:
raise Exception("what the fuck")
builder, block = struct_variable_codeblocks(variable, policy,
id, symbol, sub)
# each Variable generates two CodeBlocks, one to
# declare/initialize/destroy struct variables and the
# other to declare/extract/cleanup each time the function
# is run.
# Typically, only one of the two actually does anything
# (see all the possible combinations above)
init_tasks.append((variable, 'init', id))
init_blocks.append(builder)
tasks.append((variable, 'get', id + 1))
blocks.append(block)
id += 2
for node_num, node in enumerate(self.node_order):
sub = dict(failure_var=failure_var)
ctx = node.run_context()
if ctx is not graph.NoContext:
context_var = symbol[self.contexts[ctx]]
# The placeholder will be replaced by a hash of the entire
# code (module + support code) in DynamicModule.code.
# This ensures that, when defining functions in support code,
# we cannot have two different functions, in different modules,
# that have the same name.
# It was problematic, in particular, on Mac OS X (10.6 and 10.7)
# when defining CUDA kernels (with Cuda 4.2 and 5.0). See gh-1172.
name = "node_<<<<HASH_PLACEHOLDER>>>>_%i" % node_num
isyms = [symbol[r] for r in node.inputs]
osyms = [symbol[r] for r in node.outputs]
# Make the CodeBlock for c_code
sub['id'] = id
sub['fail'] = failure_code(sub)
if ctx is not graph.NoContext:
sub['context'] = context_var
sub_struct = dict()
sub_struct['id'] = id + 1
sub_struct['fail'] = failure_code_init(sub)
if ctx is not graph.NoContext:
# Since context inputs are always constants they are
# guaranteed to be available in the struct init code.
sub_struct['context'] = context_var
struct_support = ""
struct_init = ""
struct_cleanup = ""
op = node.op
# type-specific support code
try:
c_support_code_apply.append(op.c_support_code_apply(node,
name))
except utils.MethodNotDefined:
pass
else:
# The following will be executed if the "try" block succeeds
assert isinstance(c_support_code_apply[-1], string_types), (
str(node.op) +
" didn't return a string for c_support_code_apply")
try:
c_init_code_apply.append(op.c_init_code_apply(node, name))
except utils.MethodNotDefined:
pass
else:
assert isinstance(c_init_code_apply[-1], string_types), (
str(node.op) +
" didn't return a string for c_init_code_apply")
try:
struct_init = op.c_init_code_struct(node, name, sub_struct)
assert isinstance(struct_init, string_types), (
str(node.op) +
" didn't return a string for c_init_code_struct")
except utils.MethodNotDefined:
pass
try:
struct_support = op.c_support_code_struct(node, name)
assert isinstance(struct_support, string_types), (
str(node.op) +
" didn't return a string for c_support_code_struct")
except utils.MethodNotDefined:
pass
try:
struct_cleanup = op.c_cleanup_code_struct(node, name)
assert isinstance(struct_cleanup, string_types), (
str(node.op) +
" didn't return a string for c_cleanup_code_struct")
except utils.MethodNotDefined:
pass
# emit c_code
try:
behavior = op.c_code(node, name, isyms, osyms, sub)
except utils.MethodNotDefined:
raise NotImplementedError("%s cannot produce C code" % op)
assert isinstance(behavior, string_types), (
str(node.op) + " didn't return a string for c_code")
# To help understand what is following. It help read the c code.
# This prevent different op that generate the same c code
# to be merged, I suppose this won't happen...
behavior = ("// Op class " + node.op.__class__.__name__ + "\n" +
behavior)
try:
cleanup = op.c_code_cleanup(node, name, isyms, osyms, sub)
except utils.MethodNotDefined:
cleanup = ""
_logger.info('compiling un-versioned Apply %s', str(node))
blocks.append(CodeBlock("", behavior, cleanup, sub))
tasks.append((node, 'code', id))
id += 1
init_blocks.append(CodeBlock(struct_support, struct_init,
struct_cleanup, {'id': id}))
init_tasks.append((node, 'init', id))
id += 1
# List of arg names for use in struct_gen. Note the call to
# uniq: duplicate inputs must only be passed once because they
# are mapped to the same name. Duplicates are defined by (a
# is b), rather than (a==b) since Constant instances can
# compare equal to equivalent Constant instances.
args = []
args += ["storage_%s" % symbol[variable] for variable
in utils.uniq(self.inputs + self.outputs + self.orphans)]
# <<<<HASH_PLACEHOLDER>>>> will be replaced by a hash of the whole
# code in the file, including support code, in DynamicModule.code.
struct_name = '__struct_compiled_op_%s' % '<<<<HASH_PLACEHOLDER>>>>'
struct_code = struct_gen(args, init_blocks, blocks,
dict(failure_var=failure_var,
name=struct_name))
self.struct_code = struct_code
self.struct_name = struct_name
self.args = args
self.r2symbol = symbol
self.init_blocks = init_blocks
self.init_tasks = init_tasks
self.blocks = blocks
self.tasks = tasks
all_info = self.inputs + self.outputs + self.orphans
self.c_support_code_apply = c_support_code_apply
self.c_init_code_apply = c_init_code_apply
if (self.init_tasks, self.tasks) != self.get_init_tasks():
print("init_tasks\n", self.init_tasks, file=sys.stderr)
print(self.get_init_tasks()[0], file=sys.stderr)
print("tasks\n", self.tasks, file=sys.stderr)
print(self.get_init_tasks()[1], file=sys.stderr)
assert (self.init_tasks, self.tasks) == self.get_init_tasks()
# List of indices that should be ignored when passing the arguments
# (basically, everything that the previous call to uniq eliminated)
self.dupidx = [i for i, x in enumerate(all_info)
if all_info.count(x) > 1 and all_info.index(x) != i]
return self.struct_code
def support_code(self):
"""WRITEME
Returns a list of support code strings that are needed by
one or more Variables or Ops. The support code from Variables is
added before the support code from Ops.
This might contain duplicates.
"""
ret = []
# generic support code
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret.append(x.c_support_code())
except utils.MethodNotDefined:
pass
return ret
def compile_args(self):
"""WRITEME
Returns a list of compile args that are needed by one
or more Variables or Ops.
This might contain duplicates.
"""
ret = ["-O3"]
# this is the param the -ffast-math activate. I put the explicitly as
# FillMissing must disable some of them. Putting -ffast-math would
# make it disable all other parameter at the same time.
ret += ["-fno-math-errno",
# "-funsafe-math-optimizations",
# "-fno-signaling-nans",
# "-fcx-limited-range",
# "-fno-rounding-math",
# "-ffinite-math-only",
# the current code generate label event if they are not used.
# Could use gcc attribute for those label only
"-Wno-unused-label",
"-Wno-unused-variable", # idem as the precedent
"-Wno-write-strings", # generated by our code generator...
]
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_compile_args()
except utils.MethodNotDefined:
pass
c_compiler = self.c_compiler()
ret = utils.uniq(ret) # to remove duplicate
# The args set by the compiler include the user flags. We do not want
# to reorder them
ret += c_compiler.compile_args()
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
for i in x.c_no_compile_args():
try:
ret.remove(i)
except ValueError:
pass # in case the value is not there
except utils.MethodNotDefined:
pass
return ret
def headers(self):
"""WRITEME
Returns a list of headers that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_headers()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
def init_code(self):
"""
Return a list of code snippets that have to be inserted
in the module initialization code.
The return value will not contain duplicates.
"""
ret = []
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_init_code()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
def c_compiler(self):
c_compiler = None
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
if hasattr(x, 'c_compiler'):
x_compiler = x.c_compiler()
else:
continue
if c_compiler is None:
c_compiler = x_compiler
else:
if x_compiler and (x_compiler != c_compiler):
raise Exception('Nodes have requested specific'
' different compilers',
(c_compiler, x_compiler))
if (c_compiler is None):
return cmodule.GCC_compiler
else:
return c_compiler
def header_dirs(self):
"""WRITEME
Returns a list of lib directories that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_header_dirs()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
def libraries(self):
"""WRITEME
Returns a list of libraries that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_libraries()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
def lib_dirs(self):
"""WRITEME
Returns a list of lib directories that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_lib_dirs()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
def __compile__(self, input_storage=None,
output_storage=None, keep_lock=False):
"""WRITEME
Compiles this linker's fgraph.
@type input_storage: list or None
@param input_storage: list of lists of length 1. In order to use
the thunk returned by __compile__, the inputs must be put in
that storage. If None, storage will be allocated.
@param output_storage: list of lists of length 1. The thunk returned
by __compile__ will put the variables of the computation in these
lists. If None, storage will be allocated.
Returns: thunk, input_storage, output_storage, error_storage
"""
error_storage = [None, None, None]
if input_storage is None:
input_storage = tuple([None] for variable in self.inputs)
if output_storage is None:
map = {}
output_storage = []
# Initialize the map with the inputs, as some outputs may
# be inputs as well.
for i, variable in enumerate(self.inputs):
map[variable] = input_storage[i]
for variable in self.outputs:
if variable not in map:
map[variable] = [None]
output_storage.append(map[variable])
input_storage = tuple(input_storage)
output_storage = tuple(output_storage)
thunk = self.cthunk_factory(error_storage,
input_storage,
output_storage,
keep_lock=keep_lock)
return (thunk,
[link.Container(input, storage) for input, storage in
izip(self.fgraph.inputs, input_storage)],
[link.Container(output, storage, True) for output, storage in
izip(self.fgraph.outputs, output_storage)],
error_storage)
def get_init_tasks(self):
init_tasks = []
tasks = []
id = 1
for v in self.variables:
if v in self.consts:
continue
if v in self.orphans and isinstance(v, graph.Constant):
try:
# constant will be inlined, no need to get
v.type.c_literal(v.data)
continue
except (utils.MethodNotDefined, NotImplementedError):
pass
init_tasks.append((v, 'init', id))
tasks.append((v, 'get', id + 1))
id += 2
for node in self.node_order:
tasks.append((node, 'code', id))
init_tasks.append((node, 'init', id + 1))
id += 2
return init_tasks, tasks
def make_thunk(self, input_storage=None, output_storage=None,
keep_lock=False):
"""WRITEME
Compiles this linker's fgraph and returns a function to perform the
computations, as well as lists of storage cells for both the
inputs and outputs.
@type input_storage: list or None
@param input_storage: list of lists of length 1. In order to use
the thunk returned by __compile__, the inputs must be put in
that storage. If None, storage will be allocated.
@param output_storage: list of lists of length 1. The thunk returned
by __compile__ will put the variables of the computation in these
lists. If None, storage will be allocated.
Returns: thunk, input_storage, output_storage
The return values can be used as follows:
f, istor, ostor = clinker.make_thunk()
istor[0].data = first_input
istor[1].data = second_input
f()
first_output = ostor[0].data
"""
init_tasks, tasks = self.get_init_tasks()
cthunk, in_storage, out_storage, error_storage = self.__compile__(
input_storage, output_storage,
keep_lock=keep_lock)
res = _CThunk(cthunk, init_tasks, tasks, error_storage)
res.nodes = self.node_order
return res, in_storage, out_storage
def cmodule_key(self):
"""Return a complete hashable signature of the module we compiled.
This function must have the property that no two programs that
compute different things yield the same key.
The key returned by this function is of the form (version, signature)
The signature has the following form:
{{{
'CLinker.cmodule_key', compilation args, libraries,
header_dirs, numpy ABI version, config md5,
(op0, input_signature0, output_signature0),
(op1, input_signature1, output_signature1),
...
(opK, input_signatureK, output_signatureK),
}}}
The signature is a tuple, some elements of which are sub-tuples.
The outer tuple has a brief header, containing the compilation options
passed to the compiler, the libraries to link against, an md5 hash
of theano.config (for all config options where "in_c_key" is True).
It is followed by elements for every node in the
topological ordering of `self.fgraph`.
If the Op of any Apply in the FunctionGraph does not have
c_code_cache_ok()==True, then this function raises a KeyError
exception.
Input Signature
---------------
Each input signature is a tuple with an element for each input
to the corresponding Apply node. Each element identifies the
type of the node input, and the nature of that input in the
graph.
The nature of a typical variable is encoded by integer pairs
``((a,b),c)``:
``a`` is the topological position of the input's owner
(-1 for graph inputs),
``b`` is the index of the variable in the owner's output list.
``c`` is a flag indicating whether the variable is in the
no_recycling set.
If a variable is also a graph output, then its position in the
outputs list is also bundled with this tuple (after the b).
The nature of a Constant instance is defined as its signature,
together with two integers: the topological position of the
first Apply using that Constant instance, and the lowest index
into that Apply's inputs that refers to that Constant. (These
two integers are a surrogate for the id() of the Constant.
The integers are important because merge-able constants have
the same signature, but require separate containers in C
code.) The membership in no_recycling is also included in the
signature.
Output Signature
----------------
The outputs of a node are entirely determined by the node's Op
and the nature of the inputs, but the set of outputs that may
be re-used by the computation (the elements of
self.no_recycling) can affect the code that is generated.
The format of each Op's output signature is a (version, no_recycle)
pair, where version is incremented if codegen() changes how it
handles the outputs, and no_recycle is simply a list of
booleans, indicating whether each output is in the
no_recycling set. Older versions of compiled modules only have the
no_recycle list.
"""
return self.cmodule_key_(self.fgraph, self.no_recycling,
compile_args=self.compile_args(),
libraries=self.libraries(),
header_dirs=self.header_dirs(),
c_compiler=self.c_compiler(),
)
def cmodule_key_(self, fgraph, no_recycling, compile_args=None,
libraries=None, header_dirs=None, insert_config_md5=True,
c_compiler=None):
"""
Do the actual computation of cmodule_key in a static method
to allow it to be reused in scalar.Composite.__eq__
"""
if compile_args is None:
compile_args = []
if libraries is None:
libraries = []
if header_dirs is None:
header_dirs = []
order = self.schedule(fgraph)
# set of variables that have been computed by nodes we have
# seen 'so far' in the loop below
fgraph_computed_set = set()
fgraph_inputs_dict = dict((i, (-1, pos)) for pos, i in
enumerate(fgraph.inputs))
constant_ids = dict()
op_pos = {} # Apply -> topological position
# First we put the header, compile_args, library names and config md5
# into the signature.
sig = ['CLinker.cmodule_key'] # will be cast to tuple on return
if compile_args is not None:
# We must sort it as the order from a set is not guaranteed.
# In particular, 2 sets with the same content can give different
# order depending on the order you put data in it.
# Sets are used to remove duplicate elements.
args = sorted(compile_args)
args = tuple(args)
sig.append(args)
if libraries is not None:
# see comments for compile_args
args = sorted(libraries)
args = tuple(args)
sig.append(args)
if header_dirs is not None:
args = sorted(header_dirs)
args = tuple(args)
sig.append(args)
# We must always add the numpy ABI version here as
# DynamicModule always add the include <numpy/arrayobject.h>
sig.append('NPY_ABI_VERSION=0x%X' %
numpy.core.multiarray._get_ndarray_c_version())
if c_compiler:
sig.append('c_compiler_str=' + c_compiler.version_str())
# IMPORTANT: The 'md5' prefix is used to isolate the compilation
# parameters from the rest of the key. If you want to add more key
# elements, they should be before this md5 hash if and only if they
# can lead to a different compiled file with the same source code.
if insert_config_md5:
sig.append('md5:' + theano.configparser.get_config_md5())
else:
sig.append('md5: <omitted>')
error_on_play = [False]
def in_sig(i, topological_pos, i_idx):
# assert that every input to every node is one of'
# - an fgraph input
# - an output from a node in the FunctionGraph
# - a Constant
# It is important that a variable (i)
# yield a 'position' that reflects its role in code_gen()
if isinstance(i, graph.Constant): # orphans
if id(i) not in constant_ids:
isig = (i.signature(), topological_pos, i_idx)
# If the Theano constant provides a strong hash
# (no collision for transpose, 2, 1, 0, -1, -2,
# 2 element swapped...) we put this hash in the signature
# instead of the value. This makes the key file much
# smaller for big constant arrays. Before this, we saw key
# files up to 80M.
if hasattr(isig[0], "theano_hash"):
isig = (isig[0].theano_hash(), topological_pos, i_idx)
try:
hash(isig)
except Exception:
# generic constants don't have a hashable signature
error_on_play[0] = True
return None
constant_ids[id(i)] = isig
else:
isig = constant_ids[id(i)]
# print 'SIGNATURE', i.signature()
# return i.signature()
elif i in fgraph_inputs_dict: # inputs
isig = fgraph_inputs_dict[i]
else:
if i.owner is None:
assert all(all(out is not None for out in o.outputs)
for o in order)
assert all(input.owner is None for input in fgraph.inputs)
raise Exception('what is this?', (i, type(i), i.clients,
fgraph))
if i in fgraph.outputs:
isig = (op_pos[i.owner], # outputs
i.owner.outputs.index(i),
fgraph.outputs.index(i))
else:
isig = (op_pos[i.owner], i.owner.outputs.index(i)) # temps
return (isig, i in no_recycling)
version = []
for node_pos, node in enumerate(order):
try:
# Pure Ops do not have a c_code_cache_version_apply ...
version.append(node.op.c_code_cache_version_apply(node))
except AttributeError:
pass
for i in node.inputs:
version.append(i.type.c_code_cache_version())
for o in node.outputs:
version.append(o.type.c_code_cache_version())
# add the signature for this node
sig.append((
node.op,
tuple((i.type, in_sig(i, node_pos, ipos))
for ipos, i in enumerate(node.inputs)),
(1, # Increment if cmodule change its handling of outputs
tuple(o in no_recycling for o in node.outputs))))
if error_on_play[0]:
# if one of the signatures is not hashable
# then bypass the cache mechanism and
# compile fresh every time
return None
op_pos[node] = node_pos
fgraph_computed_set.update(node.outputs)
# Add not used input in the key
for ipos, var in [(i, var) for i, var in enumerate(fgraph.inputs)
if not len(var.clients)]:
sig.append((var.type, in_sig(var, -1, ipos)))
# crystalize the signature and version
sig = tuple(sig)
version = tuple(version)
for v in version:
if not v:
# one of the ops or types here is unversioned,
# so this fgraph is entirely unversioned
return ((), sig)
return version, sig
def get_src_code(self):
mod = self.get_dynamic_module()
return mod.code()
def compile_cmodule(self, location=None):
"""
This compiles the source code for this linker and returns a
loaded module.
"""
if location is None:
location = cmodule.dlimport_workdir(config.compiledir)
mod = self.get_dynamic_module()
c_compiler = self.c_compiler()
libs = self.libraries()
preargs = self.compile_args()
compiler_name = c_compiler.__name__
if compiler_name == 'NVCC_compiler' and config.lib.amdlibm:
# This lib does not work correctly with nvcc in device code.
# and newer version of g++ as 4.5.1.
# example of errors: "/usr/lib/gcc/x86_64-redhat-linux/4.5.1/
# include/mmintrin.h(49): error: identifier
# "__builtin_ia32_emms" is undefined"
if '<amdlibm.h>' in mod.includes:
mod.includes.remove('<amdlibm.h>')
if '-DREPLACE_WITH_AMDLIBM' in preargs:
preargs.remove('-DREPLACE_WITH_AMDLIBM')
if 'amdlibm' in libs:
libs.remove('amdlibm')
# We want to compute the code without the lock
src_code = mod.code()
get_lock()
try:
_logger.debug("LOCATION %s", str(location))
module = c_compiler.compile_str(
module_name=mod.code_hash,
src_code=src_code,
location=location,
include_dirs=self.header_dirs(),
lib_dirs=self.lib_dirs(),
libs=libs,
preargs=preargs)
except Exception as e:
e.args += (str(self.fgraph),)
raise
finally:
release_lock()
return module
def get_dynamic_module(self):
"""Return a cmodule.DynamicModule instance full of the code
for our fgraph.
This method is cached on the first call so it can be called
multiple times without penalty.
"""
if not hasattr(self, '_mod'):
self.code_gen()
mod = cmodule.DynamicModule()
# The code of instantiate
# the 1 is for error_storage
code = self.instantiate_code(1 + len(self.args))
instantiate = cmodule.ExtFunction('instantiate', code,
method=cmodule.METH_VARARGS)
# ['error_storage'] + argnames,
# local_dict = d,
# global_dict = {})
# Static methods that can run and destroy the struct built by
# instantiate.
if PY3:
static = """
static int {struct_name}_executor({struct_name} *self) {{
return self->run();
}}
static void {struct_name}_destructor(PyObject *capsule) {{
{struct_name} *self = ({struct_name} *)PyCapsule_GetContext(capsule);
delete self;
}}
""".format(struct_name=self.struct_name)
else:
static = """
static int %(struct_name)s_executor(%(struct_name)s* self) {
return self->run();
}
static void %(struct_name)s_destructor(void* executor, void* self) {
delete ((%(struct_name)s*)self);
}
""" % dict(struct_name=self.struct_name)
# We add all the support code, compile args, headers and libs we need.
for support_code in self.support_code() + self.c_support_code_apply:
mod.add_support_code(support_code)
mod.add_support_code(self.struct_code)
mod.add_support_code(static)
mod.add_function(instantiate)
for header in self.headers():
mod.add_include(header)
for init_code_block in self.init_code() + self.c_init_code_apply:
mod.add_init_code(init_code_block)
self._mod = mod
return self._mod
def cthunk_factory(self, error_storage, in_storage, out_storage,
keep_lock=False):
"""WRITEME
error_storage -> list of length 3
in_storage -> list of lists of length 1, one per input
out_storage -> list of lists of length 1, one per output
Returns a thunk that points to an instance of a C struct that
can carry on the computation of this linker's fgraph. That thunk,
when executed, will fetch its inputs from in_storage, put its
outputs in out_storage and if an error occurs will put the
type, value and traceback of the exception in error_storage.
"""
try:
key = self.cmodule_key()
except KeyError:
key = None
if key is None:
# If we can't get a key, then forget the cache mechanism.
module = self.compile_cmodule()
else:
module = get_module_cache().module_from_key(
key=key, lnk=self, keep_lock=keep_lock)
vars = self.inputs + self.outputs + self.orphans
# List of indices that should be ignored when passing the arguments
# (basically, everything that the previous call to uniq eliminated)
dupidx = [i for i, x in enumerate(vars)
if vars.count(x) > 1 and vars.index(x) != i]
out_storage = [x for i, x in enumerate(out_storage)
if (i + len(in_storage)) not in dupidx]
in_storage = [x for i, x in enumerate(in_storage) if i not in dupidx]
orphd = [[orphan.data] for orphan in self.orphans]
ret = module.instantiate(error_storage,
*(in_storage + out_storage + orphd))
return ret
def instantiate_code(self, n_args):
code = StringIO()
struct_name = self.struct_name
print("static PyObject * instantiate(PyObject * self, PyObject *argtuple) {", file=code)
print(' assert(PyTuple_Check(argtuple));', file=code)
print(' if (%(n_args)i != PyTuple_Size(argtuple)){ ' % locals(), file=code)
print(' PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected %(n_args)i, got %%i", (int)PyTuple_Size(argtuple));' % locals(), file=code)
print(' return NULL;', file=code)
print(' }', file=code)
print(' %(struct_name)s* struct_ptr = new %(struct_name)s();' % locals(), file=code)
print(' if (struct_ptr->init(', ','.join('PyTuple_GET_ITEM(argtuple, %i)' % n for n in xrange(n_args)), ') != 0) {', file=code)
print(' delete struct_ptr;', file=code)
print(' return NULL;', file=code)
print(' }', file=code)
if PY3:
print("""\
PyObject* thunk = PyCapsule_New((void*)(&{struct_name}_executor), NULL, {struct_name}_destructor);
if (thunk != NULL && PyCapsule_SetContext(thunk, struct_ptr) != 0) {{
PyErr_Clear();
Py_DECREF(thunk);
thunk = NULL;
}}
""".format(**locals()), file=code)
else:
print(' PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&%(struct_name)s_executor), struct_ptr, %(struct_name)s_destructor);' % locals(), file=code)
print(" return thunk; }", file=code)
return code.getvalue()
class _CThunk(object):
"""
A thunk with a C implementation
"""
def __init__(self, cthunk, init_tasks, tasks, error_storage):
"""
Parameters
----------
cthunk: the CObject pointer used by run_cthunk
init_tasks: WRITEME
tasks: WRITEME
error_storage: WRITEME
"""
global run_cthunk
if run_cthunk is None:
# Lazy import to avoid compilation when importing theano.
from theano.gof.cutils import run_cthunk # noqa
self.cthunk = cthunk
self.init_tasks = init_tasks
self.tasks = tasks
self.error_storage = error_storage
def find_task(self, failure_code):
"""
Maps a failure code to the task that is associated to it.
"""
failure_code -= 1
n = len(self.init_tasks)
# note that the failure code is distributed in two lists
if failure_code < 2 * n:
return [self.init_tasks, self.tasks][
failure_code % 2][failure_code // 2]
else:
return self.tasks[failure_code - n]
def __call__(self):
failure = run_cthunk(self.cthunk)
if failure:
task, taskname, id = self.find_task(failure)
try:
trace = task.trace
except AttributeError:
trace = ()
try:
exc_type, _exc_value, exc_trace = self.error_storage
if task in self.nodes:
self.position_of_error = self.nodes.index(task)
# this can be used to retrieve the location the Op was declared
exc_value = exc_type(_exc_value)
exc_value.__thunk_trace__ = trace
except Exception:
print(('ERROR retrieving error_storage.'
'Was the error set in the c code?'),
end=' ', file=sys.stderr)
print(self.error_storage, file=sys.stderr)
raise
reraise(exc_type, exc_value, exc_trace)
class OpWiseCLinker(link.LocalLinker):
"""WRITEME
Uses CLinker on the individual Ops that comprise an fgraph and loops
over them in Python. The variable is slower than a compiled version of
the whole fgraph, but saves on compilation time because small changes
in the computation graph won't necessarily trigger any recompilation,
only local changes in the Variables or Ops that are used.
If fallback_on_perform is True, OpWiseCLinker will use an op's
perform method if no C version can be generated.
no_recycling can contain a list of Variables that belong to the fgraph.
If a Variable is in no_recycling, CLinker will clear the output storage
associated to it prior to computation (to avoid reusing it).
:note: This is in a sense the 'default' linker for Theano. The
overhead of using the OpWiseCLinker as compared with the CLinker
is only noticeable for graphs of very small tensors (such as 20
elements or less)
"""
__cache__ = {}
def __init__(self,
fallback_on_perform=True,
allow_gc=None,
nice_errors=True,
schedule=None):
if allow_gc is None:
allow_gc = config.allow_gc
self.fgraph = None
self.fallback_on_perform = fallback_on_perform
self.nice_errors = nice_errors
self.allow_gc = allow_gc
if schedule:
self.schedule = schedule
def accept(self, fgraph, no_recycling=None):
if no_recycling is None:
no_recycling = []
if self.fgraph is not None and self.fgraph is not fgraph:
return type(self)(
fallback_on_perform=self.fallback_on_perform,
allow_gc=self.allow_gc,
nice_errors=self.nice_errors
).accept(fgraph, no_recycling)
# raise Exception("Cannot accept from a Linker that is
# already tied to another FunctionGraph.")
self.fgraph = fgraph
self.no_recycling = no_recycling
return self
def make_all(self, profiler=None, input_storage=None, output_storage=None):
# The lock will be acquired when we compile the first
# C code. We will keep the lock untill all the function
# compilation will be finished. This allow to don't
# require the lock when all c code are already compiled!
orig_n_lock = getattr(get_lock, "n_lock", 0)
try:
fgraph = self.fgraph
order = self.schedule(fgraph)
no_recycling = self.no_recycling
input_storage, output_storage, storage_map = link.map_storage(
fgraph, order, input_storage, output_storage)
if self.allow_gc:
computed, last_user = link.gc_helper(order)
post_thunk_old_storage = []
else:
post_thunk_old_storage = None
compute_map = {}
for k in storage_map:
compute_map[k] = [k.owner is None]
thunks = []
for node in order:
# Maker sure we use the C version of the code whenever
# possible
# There are ops that don't have _op_use_c_code property
# for example ifelse (or any ops that come with their own
# make_thunk
old_value = getattr(node.op, '_op_use_c_code', False)
try:
if theano.config.cxx:
node.op._op_use_c_code = True
thunks += [node.op.make_thunk(node,
storage_map,
compute_map,
no_recycling)]
thunks[-1].inputs = [storage_map[v] for v in node.inputs]
thunks[-1].outputs = [storage_map[v] for v in node.outputs]
finally:
node.op._op_use_c_code = old_value
for node in order:
if self.allow_gc:
post_thunk_old_storage.append(
[storage_map[input] for input in node.inputs
if ((input in computed) and
(input not in fgraph.outputs) and
node == last_user[input])])
if no_recycling is True:
no_recycling = list(storage_map.values())
no_recycling = utils.difference(no_recycling, input_storage)
else:
no_recycling = [storage_map[r]
for r in no_recycling if r not in fgraph.inputs]
f = link.streamline(fgraph, thunks, order,
post_thunk_old_storage,
no_recycling=no_recycling,
nice_errors=self.nice_errors)
f.allow_gc = self.allow_gc
finally:
# Release lock on compilation directory.
if getattr(get_lock, "n_lock", 0) > orig_n_lock:
release_lock()
assert get_lock.n_lock == orig_n_lock
return (f,
[link.Container(input, storage)
for input, storage in izip(fgraph.inputs, input_storage)],
[link.Container(output, storage, True)
for output, storage in izip(fgraph.outputs, output_storage)],
thunks,
order)
def _default_checker(x, y):
"""WRITEME
Default checker for DualLinker. This checks that the
variables contain the same data using ==.
"""
if x[0] != y[0]:
raise Exception("Output mismatch.",
{'performlinker': x[0], 'clinker': y[0]})
class DualLinker(link.Linker):
"""WRITEME
Runs the fgraph in parallel using PerformLinker and CLinker.
The thunk/function produced by DualLinker uses PerformLinker as the
"main" implementation: the inputs and outputs are fed to/taken from
the Ops' perform. However, DualLinker also instantiates a copy of
the fgraph on which it runs OpWiseCLinker. At each step, the variables
of perform and of the C implementation are verified using a checker
function.
"""
def __init__(self, checker=_default_checker, schedule=None):
"""
Initialize a DualLinker.
The checker argument must be a function that takes two lists
of length 1. The first one passed will contain the output
computed by PerformLinker and the second one the output
computed by OpWiseCLinker. The checker should compare the data
fields of the two variables to see if they match. By default,
DualLinker uses ==. A custom checker can be provided to
compare up to a certain error tolerance.
If a mismatch occurs, the checker should raise an exception to
halt the computation. If it does not, the computation will
carry on and errors will snowball. The checker can sidestep
the problem by fiddling with the data, but it should be
careful not to share data between the two outputs (or inplace
operations that use them will interfere).
no_recycling can contain a list of Variables that belong to the fgraph.
If a Variable is in no_recycling, CLinker will clear the output storage
associated to it during the computation (to avoid reusing it).
"""
self.fgraph = None
self.checker = checker
if schedule:
self.schedule = schedule
def accept(self, fgraph, no_recycling=None):
if no_recycling is None:
no_recycling = []
if self.fgraph is not None and self.fgraph is not fgraph:
return type(self)(self.checker).accept(fgraph, no_recycling)
self.fgraph = fgraph
self.no_recycling = no_recycling
return self
def make_thunk(self, **kwargs):
fgraph = self.fgraph
no_recycling = self.no_recycling
_f, i1, o1, thunks1, order1 = (
link.PerformLinker(schedule=self.schedule).accept(
fgraph, no_recycling=no_recycling).make_all(**kwargs))
kwargs.pop('input_storage', None)
_f, i2, o2, thunks2, order2 = (
OpWiseCLinker(schedule=self.schedule).accept(
fgraph, no_recycling=no_recycling).make_all(**kwargs))
def f():
for input1, input2 in izip(i1, i2):
# Set the inputs to be the same in both branches.
# The copy is necessary in order for inplace ops not to
# interfere.
input2.storage[0] = copy(input1.storage[0])
for thunk1, thunk2, node1, node2 in izip(thunks1, thunks2,
order1, order2):
for output, storage in izip(node1.outputs, thunk1.outputs):
if output in no_recycling:
storage[0] = None
for output, storage in izip(node2.outputs, thunk2.outputs):
if output in no_recycling:
storage[0] = None
try:
thunk1()
thunk2()
for output1, output2 in izip(thunk1.outputs,
thunk2.outputs):
self.checker(output1, output2)
except Exception:
link.raise_with_op(node1)
return f, i1, o1
class HideC(object):
def __hide(*args):
raise utils.MethodNotDefined()
c_code = __hide
c_code_cleanup = __hide
c_headers = __hide
c_header_dirs = __hide
c_libraries = __hide
c_lib_dirs = __hide
c_support_code = __hide
c_support_code_apply = __hide
c_compile_args = __hide
c_no_compile_args = __hide
c_init_code = __hide
c_init_code_apply = __hide
c_init_code_struct = __hide
c_support_code_struct = __hide
c_cleanup_code_struct = __hide
def c_code_cache_version(self):
return ()
def c_code_cache_version_apply(self, node):
return self.c_code_cache_version()
| mit | -3,159,184,381,697,750,500 | 38.380266 | 168 | 0.556333 | false |
Chaffelson/whoville | whoville/cloudbreak/apis/v1proxyconfigs_api.py | 1 | 47332 | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V1proxyconfigsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def delete_private_proxy_config(self, name, **kwargs):
"""
delete private proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_private_proxy_config(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_private_proxy_config_with_http_info(name, **kwargs)
else:
(data) = self.delete_private_proxy_config_with_http_info(name, **kwargs)
return data
def delete_private_proxy_config_with_http_info(self, name, **kwargs):
"""
delete private proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_private_proxy_config_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_private_proxy_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_private_proxy_config`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/user/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProxyConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_proxy_config(self, id, **kwargs):
"""
delete proxy configuration by id
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_proxy_config(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_proxy_config_with_http_info(id, **kwargs)
else:
(data) = self.delete_proxy_config_with_http_info(id, **kwargs)
return data
def delete_proxy_config_with_http_info(self, id, **kwargs):
"""
delete proxy configuration by id
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_proxy_config_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_proxy_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_proxy_config`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProxyConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_public_proxy_config(self, name, **kwargs):
"""
delete public (owned) or private proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_public_proxy_config(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_public_proxy_config_with_http_info(name, **kwargs)
else:
(data) = self.delete_public_proxy_config_with_http_info(name, **kwargs)
return data
def delete_public_proxy_config_with_http_info(self, name, **kwargs):
"""
delete public (owned) or private proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_public_proxy_config_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_public_proxy_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_public_proxy_config`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/account/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProxyConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_private_proxy_config(self, name, **kwargs):
"""
retrieve a private proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_private_proxy_config(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_private_proxy_config_with_http_info(name, **kwargs)
else:
(data) = self.get_private_proxy_config_with_http_info(name, **kwargs)
return data
def get_private_proxy_config_with_http_info(self, name, **kwargs):
"""
retrieve a private proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_private_proxy_config_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_private_proxy_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_private_proxy_config`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/user/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProxyConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_privates_proxy_config(self, **kwargs):
"""
retrieve private proxy configurations
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_privates_proxy_config(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[ProxyConfigResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_privates_proxy_config_with_http_info(**kwargs)
else:
(data) = self.get_privates_proxy_config_with_http_info(**kwargs)
return data
def get_privates_proxy_config_with_http_info(self, **kwargs):
"""
retrieve private proxy configurations
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_privates_proxy_config_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[ProxyConfigResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_privates_proxy_config" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/user', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ProxyConfigResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_proxy_config(self, id, **kwargs):
"""
retrieve proxy configuration by id
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_proxy_config(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_proxy_config_with_http_info(id, **kwargs)
else:
(data) = self.get_proxy_config_with_http_info(id, **kwargs)
return data
def get_proxy_config_with_http_info(self, id, **kwargs):
"""
retrieve proxy configuration by id
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_proxy_config_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_proxy_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_proxy_config`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProxyConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_public_proxy_config(self, name, **kwargs):
"""
retrieve a public or private (owned) proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_public_proxy_config(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_public_proxy_config_with_http_info(name, **kwargs)
else:
(data) = self.get_public_proxy_config_with_http_info(name, **kwargs)
return data
def get_public_proxy_config_with_http_info(self, name, **kwargs):
"""
retrieve a public or private (owned) proxy configuration by name
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_public_proxy_config_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_public_proxy_config" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_public_proxy_config`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/account/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProxyConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_publics_proxy_config(self, **kwargs):
"""
retrieve public and private (owned) proxy configurations
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_publics_proxy_config(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[ProxyConfigResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_publics_proxy_config_with_http_info(**kwargs)
else:
(data) = self.get_publics_proxy_config_with_http_info(**kwargs)
return data
def get_publics_proxy_config_with_http_info(self, **kwargs):
"""
retrieve public and private (owned) proxy configurations
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_publics_proxy_config_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[ProxyConfigResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_publics_proxy_config" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/account', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[ProxyConfigResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_private_proxy_config(self, **kwargs):
"""
create proxy configuration as private resource
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_private_proxy_config(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ProxyConfigRequest body:
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.post_private_proxy_config_with_http_info(**kwargs)
else:
(data) = self.post_private_proxy_config_with_http_info(**kwargs)
return data
def post_private_proxy_config_with_http_info(self, **kwargs):
"""
create proxy configuration as private resource
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_private_proxy_config_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ProxyConfigRequest body:
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_private_proxy_config" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/user', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProxyConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_public_proxy_config(self, **kwargs):
"""
create proxy configuration as public resource
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_public_proxy_config(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ProxyConfigRequest body:
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.post_public_proxy_config_with_http_info(**kwargs)
else:
(data) = self.post_public_proxy_config_with_http_info(**kwargs)
return data
def post_public_proxy_config_with_http_info(self, **kwargs):
"""
create proxy configuration as public resource
An proxy Configuration describe a connection to an external proxy server which provides internet access cluster members. It's applied for package manager and Ambari too
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_public_proxy_config_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ProxyConfigRequest body:
:return: ProxyConfigResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_public_proxy_config" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v1/proxyconfigs/account', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProxyConfigResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| apache-2.0 | 6,885,332,943,980,451,000 | 42.866543 | 984 | 0.561269 | false |
quantifiedcode/checkmate | checkmate/management/commands/sync.py | 1 | 2051 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .base import BaseCommand
from collections import defaultdict
import sys
import os
import random
import os.path
import copy
import json
import time
import pprint
import hashlib
import logging
logger = logging.getLogger(__name__)
from checkmate.lib.code import CodeEnvironment
from checkmate.helpers.issue import group_issues_by_fingerprint
from checkmate.lib.models import Snapshot,FileRevision,Issue,IssueOccurrence
class Command(BaseCommand):
"""
Synchronizes the following objects with an external backend:
* Snapshot (+FileRevisions) <- needs a unique ID
* Diff (+FileRevisions) <- needs a unique ID
* FileRevision () <- needs a unique ID
* Issue <- needs a unique ID (analyzer, code, fingerprint)
* IssueOccurrence (+Issue,FileRevision) (issue.id, file_revision.id, )
* eventually other models from plugin (e.g. GitSnapshot, GitBranch)
Strategy:
* First, for all objects send a list with (pk, updated_at) values to the server
* The server responds with a list of unknown/outdated objects
* The client send these objects to the server, where they are imported
* The import order should always match the dependencies between the models (i.e. first FileRevision objects, then Snapshot, then Diff objects)
The server will check for primary key conflicts and might mingle/alter the PK values of the entities
Maybe it would be a better strategy to define unique identifiers for each object, which the server can use for checking.
* Snapshot (sha/...)
* Diff (snapshot_a.id, snapshot_b.id)
* FileRevision: (fr_pk)
* Issue (analyzer, code, fingerprint)
* IssueOccurrence (issue.id, file_revision.id, from_row, from_col, to_row, to_col, sequence)
* Snapshot:
"""
def run(self):
settings = self.project.settings
logger.info("Synchronizing analysis results...")
"""
1.)
"""
| mit | 329,443,048,359,415,500 | 29.553846 | 146 | 0.68942 | false |
southampton/unimatrix | deskctl/lib/errors.py | 1 | 3040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from deskctl import app
from flask import g, render_template, make_response, session, request
import traceback
################################################################################
## standard error (uses render_template and thus standard page layout)
def stderr(title,message,code=200,template="error.html"):
"""This function is called by other error functions to show the error to the
end user. It takes an error title and an error message.
"""
# Should we show a traceback?
if app.debug:
debug = traceback.format_exc()
else:
debug = ""
return render_template(template,title=title,message=message,debug=debug), code
################################################################################
## fatal error (returns HTML from python code - which is more likely to work)
def fatalerr(title=u"fatal error ☹",message="Whilst processing your request an unexpected error occured which the application could not recover from",debug=None):
# Should we show a traceback?
if debug is None:
if app.debug:
debug = traceback.format_exc()
else:
debug = "Please ask your administrator to consult the error log for more information."
# Build the response. Not using a template here to prevent any Jinja
# issues from causing this to fail.
html = u"""
<!doctype html>
<html>
<head>
<title>Fatal Error</title>
<meta charset="utf-8" />
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<style type="text/css">
body {
background-color: #8B1820;
color: #FFFFFF;
margin: 0;
padding: 0;
font-family: "Open Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;
}
h1 {
font-size: 4em;
font-weight: normal;
margin: 0px;
}
div {
width: 80%%;
margin: 5em auto;
padding: 50px;
border-radius: 0.5em;
}
@media (max-width: 900px) {
div {
width: auto;
margin: 0 auto;
border-radius: 0;
padding: 1em;
}
}
</style>
</head>
<body>
<div>
<h1>%s</h1>
<p>%s</p>
<pre>%s</pre>
</div>
</body>
</html>
""" % (title,message,debug)
return make_response(html, 500)
################################################################################
## log a full error to the python logger
def logerr():
# Get the username
if 'username' in session:
username = session['username']
else:
username = 'Not logged in'
## Log the critical error (so that it goes to e-mail)
app.logger.error("""Request details:
HTTP Path: %s
HTTP Method: %s
Client IP Address: %s
User Agent: %s
User Platform: %s
User Browser: %s
User Browser Version: %s
Username: %s
Traceback:
%s
""" % (
request.path,
request.method,
request.remote_addr,
request.user_agent.string,
request.user_agent.platform,
request.user_agent.browser,
request.user_agent.version,
username,
traceback.format_exc(),
))
| gpl-3.0 | 6,697,952,601,230,375,000 | 23.699187 | 162 | 0.602041 | false |
lynchnf/maneki-neko-web | socialmedia/models.py | 1 | 1095 | from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from django.db import models
ICON_CHOICES = (
("fa-delicious", _("Delicious")),
("fa-digg", _("Digg")),
("fa-facebook", _("Facebook")),
("fa-flickr", _("Flickr")),
("fa-google-plus", _("Google+")),
("fa-instagram", _("Instagram")),
("fa-linkedin", _("LinkedIn")),
("fa-map-marker", _("Map")),
("fa-pinterest", _("Pinterest")),
("fa-rss", _("RSS feed")),
("fa-reddit", _("reddit")),
("fa-spotify", _("Spotify")),
("fa-stumbleupon", _("StumbleUpon")),
("fa-tumblr", _("Tumblr")),
("fa-twitter", _("Twitter")),
("fa-youtube-play", _("YouTube")))
SIZE_CHOICES = [(i,i) for i in range(6)]
class SocialLink(CMSPlugin):
icon = models.CharField("Social Network Icon", max_length=20, choices=ICON_CHOICES)
size = models.IntegerField("Icon Size", default=0, choices=SIZE_CHOICES)
url = models.URLField("URL")
def __unicode__(self):
return self.url | mit | -454,674,423,884,372,100 | 33.25 | 87 | 0.552511 | false |
HUGG/NGWM2016-modelling-course | Lessons/06-Rheology-of-the-lithosphere/scripts/solutions/strength-envelope-uniform-crust.py | 1 | 7747 | '''
strength-envelope-uniform-crust.py
This script can be used for plotting strength envelopes for a lithosphere with
a uniform crust. The script includes a function sstemp() that can be used for
calculating the lithospheric temperature as a function of the input material
properties
dwhipp 01.16 (modified from code written by L. Kaislaniemi)
'''
# --- USER INPUT PARAMETERS ---
# Model geometry
z_surf = 0.0 # Elevation of upper surface [km]
z_bott = 100.0 # Elevation of bottom boundary [km]
nz = 100 # Number of grid points
# Boundary conditions
T_surf = 0.0 # Temperature of the upper surface [deg C]
q_surf = 65.0 # Surface heat flow [mW/m^2]
# Thermal conductivity (constant across model thickness)
k = 2.75 # Thermal conductivity [W/m K]
# Deformation rate
edot = 1.0e-15 # Reference strain rate [1/s]
# Constants
g = 9.81 # Gravitational acceleration [m/s^2]
R = 8.314 # Gas constant
# MATERIAL PROPERTY DEFINITIONS
# Crust (Wet quartzite - Gleason and Tullis, 1995)
mat1 = 'Wet quartzite'
L1 = 35.0 # Thickness of layer one [km]
A1 = 1.1 # Average heat production rate for crust [uW/m^3]
rho1 = 2800.0 # Rock density [kg/m^3]
Avisc1 = 1.1e-4 # Viscosity constant [MPa^-n s^-1]
Q1 = 223.0 # Activation energy [kJ/mol]
n1 = 4.0 # Power-law exponent
mu1 = 0.85 # Friction coefficient
C1 = 0.0 # Cohesion [MPa]
# Mantle (Wet olivine - Hirth and Kohlstedt, 1996)
mat2 = 'Wet olivine'
A2 = 0.02 # Heat production rate for mantle [uW/m^3]
rho2 = 3300.0 # Rock density [kg/m^3]
Avisc2 = 4.876e6 # Viscosity constant [MPa^-n s^-1]
Q2 = 515.0 # Activation energy [kJ/mol]
n2 = 3.5 # Power-law exponent
mu2 = 0.6 # Friction coefficient
C2 = 60.0 # Cohesion [MPa]
# END MATERIAL PROPERTY DEFINITIONS
# --- END USER INPUTS ---
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
# Define function to calculate temperatures (DO NOT MODIFY)
def sstemp(A,k,dz,nz,T_surf,q_surf):
# Generate an empty array for temperature values
T = np.zeros(nz)
# Set boundary conditions
# the upper surface temperature and the temperature at one grid point below
T[0] = T_surf
## Grid point one needs special handling as T[-1] is not available
# Calculate "ghost point" outside the model domain, where grid point -1
# would be, assuming surface heat flow q_surf
Tghost = T[0] - q_surf * dz / k # = "T[-1]"
# Use the same finite difference formula to calculate T as for
# the inner points, but replace "T[-1]" by ghost point value
T[1] = -A[1] * dz**2 / k - Tghost + 2*T[0]
# Calculate temperatures across specified thickness
for i in range(2, nz): # NB! Grid points 0 and 1 omitted as they cannot be calculated
T[i] = -A[i] * dz**2 / k - T[i-2] + 2*T[i-1]
return T
# Define conversion factors
km2m = 1.0e3 # [km] to [m]
mW2W = 1.0e-3 # [mW] to [W]
uW2W = 1.0e-6 # [uW] to [W]
MPa2Pa = 1.0e6 # [MPa] to [Pa]
kJ2J = 1.0e3 # [kJ] to [J]
# Convert material property units to SI
z_surf = z_surf * km2m
z_bott = z_bott * km2m
q_surf = q_surf * mW2W
A1 = A1 * uW2W
A2 = A2 * uW2W
L1 = L1 * km2m
Avisc1 = Avisc1 / MPa2Pa**n1
Avisc2 = Avisc2 / MPa2Pa**n2
Q1 = Q1 * kJ2J
Q2 = Q2 * kJ2J
C1 = C1 * MPa2Pa
C2 = C2 * MPa2Pa
# Generate the grid
# Regular grid is used, so that in FD calculations
# only dz is needed. Array z is used only for plotting.
dz = (z_bott - z_surf) / (nz - 1)
z = np.linspace(z_surf, z_bott, nz)
# Generate the material properties arrays
A = np.zeros(nz)
rho = np.zeros(nz)
Avisc = np.zeros(nz)
Q = np.zeros(nz)
n = np.zeros(nz)
mu = np.zeros(nz)
C = np.zeros(nz)
for i in range(nz):
# Fill material property arrays for depths in the crust
if z[i] <= L1:
A[i] = A1
rho[i] = rho1
Avisc[i] = Avisc1
Q[i] = Q1
n[i] = n1
mu[i] = mu1
C[i] = C1
# Fill material property arrays for depths in the mantle
else:
A[i] = A2
rho[i] = rho2
Avisc[i] = Avisc2
Q[i] = Q2
n[i] = n2
mu[i] = mu2
C[i] = C2
# Call function to get temperatures
T = sstemp(A,k,dz,nz,T_surf,q_surf)
T = T + 273.15 # Convert to Kelvins
# Initialize arrays
P = np.zeros(nz)
frict = np.zeros(nz)
visc = np.zeros(nz)
strength = np.zeros(nz)
# Calculate lithostatic pressure
for i in range(1, nz):
P[i] = P[i-1] + rho[i] * g * dz
# Loop over all points and calculate frictional and viscous strengths
for i in range(nz):
# Calculate frictional shear strength using Coulomb criterion
frict[i] = mu[i] * P[i] + C[i]
# Calculate viscous strength using Dorn's law
visc[i] = (edot/Avisc[i])**((1./n[i]))*np.exp(Q[i]/(n[i]*R*T[i]))
# Use logical statements to make sure the stored strength value is the
# smaller of the two calculated above for each point
if frict[i] <= visc[i]:
strength[i] = frict[i]
else:
strength[i] = visc[i]
# Rescale values for plotting
T = T - 273.15
z = z / km2m
strength = strength / MPa2Pa
z_bott = z_bott / km2m
# Create figure window for plot
plt.figure()
# PLOT #1 - Left panel, temperature versus depth
plt.subplot(121)
# Plot temperature on left subplot
plt.plot(T, z, "ro-")
# Invert y axis
plt.gca().invert_yaxis()
# Label axes
plt.xlabel("Temperature [$^{\circ}$C]")
plt.ylabel("Depth [km]")
# PLOT #2 - Right panel, strength versus depth
plt.subplot(122)
# Plot strength versus deprh
plt.plot(strength, z, "ko-") # minus sign is placed to make z axis point down
# Invert y axis
plt.gca().invert_yaxis()
# Label axes
plt.xlabel("Strength [MPa]")
# Add text labels for materials
plt.text(0.2*max(strength), 0.8*z_bott, "Layer 1: "+mat1)
plt.text(0.2*max(strength), 0.85*z_bott, "Layer 2: "+mat2)
plt.show()
| mit | -5,469,655,399,814,648,000 | 36.97549 | 141 | 0.47954 | false |
librato/librato-python-web | librato_python_web/librato_config.py | 1 | 1685 | # Copyright (c) 2015. Librato, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Librato, Inc. nor the names of project contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL LIBRATO, INC. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from librato_python_web.tools.configure import execute as configure
def execute():
configure()
if __name__ == '__main__':
execute()
| bsd-3-clause | -4,723,268,181,559,105,000 | 48.558824 | 81 | 0.75727 | false |
vascotenner/holoviews | tests/testcomparisondimension.py | 1 | 6477 | """
Test cases for Dimension and Dimensioned object comparison.
"""
from holoviews.core import Dimension, Dimensioned
from holoviews.element.comparison import ComparisonTestCase
class DimensionsComparisonTestCase(ComparisonTestCase):
def setUp(self):
super(DimensionsComparisonTestCase, self).setUp()
self.dimension1 = Dimension('dim1', range=(0,1))
self.dimension2 = Dimension('dim2', range=(0,1))
self.dimension3 = Dimension('dim1', range=(0,2))
self.dimension4 = Dimension('dim1')
self.dimension5 = Dimension('dim1', cyclic=True)
self.dimension6 = Dimension('dim1', cyclic=True, range=(0,1))
self.dimension7 = Dimension('dim1', cyclic=True, range=(0,1), unit='ms')
self.dimension8 = Dimension('dim1', values=['a', 'b'])
self.dimension9 = Dimension('dim1', type=int)
self.dimension10 = Dimension('dim1', type=float)
def test_dimension_comparison_equal1(self):
self.assertEqual(self.dimension1, self.dimension1)
def test_dimension_comparison_equal2(self):
self.assertEqual(self.dimension1,
Dimension('dim1', range=(0,1)))
def test_dimension_comparison_equal3(self):
self.assertEqual(self.dimension7,
Dimension('dim1', cyclic=True, range=(0,1), unit='ms'))
def test_dimension_comparison_names_unequal(self):
try:
self.assertEqual(self.dimension1, self.dimension2)
except AssertionError as e:
self.assertEqual(str(e), 'Dimension names mismatched: dim1 != dim2')
def test_dimension_comparison_range_unequal1(self):
try:
self.assertEqual(self.dimension1, self.dimension3)
except AssertionError as e:
self.assertEqual(str(e), 'Dimension ranges mismatched: (0, 1) != (0, 2)')
def test_dimension_comparison_cyclic_unequal(self):
try:
self.assertEqual(self.dimension4, self.dimension5)
except AssertionError as e:
self.assertEqual(str(e), 'Dimension cyclic declarations mismatched.')
def test_dimension_comparison_range_unequal2(self):
try:
self.assertEqual(self.dimension5, self.dimension6)
except AssertionError as e:
self.assertEqual(str(e), 'Dimension ranges mismatched: (None, None) != (0, 1)')
def test_dimension_comparison_units_unequal(self):
try:
self.assertEqual(self.dimension6, self.dimension7)
except AssertionError as e:
self.assertEqual(str(e), 'Dimension unit declarations mismatched: None != ms')
def test_dimension_comparison_values_unequal(self):
try:
self.assertEqual(self.dimension4, self.dimension8)
except AssertionError as e:
self.assertEqual(str(e), "Dimension value declarations mismatched: [] != ['a', 'b']")
def test_dimension_comparison_types_unequal(self):
try:
self.assertEqual(self.dimension9, self.dimension10)
except AssertionError as e:
self.assertEqual(str(e)[:39], "Dimension type declarations mismatched:")
class DimensionedComparisonTestCase(ComparisonTestCase):
def setUp(self):
super(DimensionedComparisonTestCase, self).setUp()
# Value dimension lists
self.value_list1 = [Dimension('val1')]
self.value_list2 = [Dimension('val2')]
# Key dimension lists
self.key_list1 = [Dimension('key1')]
self.key_list2 = [Dimension('key2')]
# Dimensioned instances
self.dimensioned1 = Dimensioned('data1', vdims=self.value_list1,
kdims=self.key_list1)
self.dimensioned2 = Dimensioned('data2', vdims=self.value_list2,
kdims=self.key_list1)
self.dimensioned3 = Dimensioned('data3', vdims=self.value_list1,
kdims=self.key_list2)
self.dimensioned4 = Dimensioned('data4', vdims=[],
kdims=self.key_list1)
self.dimensioned5 = Dimensioned('data5', vdims=self.value_list1,
kdims=[])
# Value / Label comparison tests
self.dimensioned6 = Dimensioned('data6', group='foo',
vdims=self.value_list1,
kdims=self.key_list1)
self.dimensioned7 = Dimensioned('data7', group='foo', label='bar',
vdims=self.value_list1,
kdims=self.key_list1)
def test_dimensioned_comparison_equal(self):
"Note that the data is not compared at the Dimensioned level"
self.assertEqual(self.dimensioned1,
Dimensioned('other_data',
vdims=self.value_list1,
kdims=self.key_list1))
def test_dimensioned_comparison_unequal_value_dims(self):
try:
self.assertEqual(self.dimensioned1, self.dimensioned2)
except AssertionError as e:
self.assertEqual(str(e), "Dimension names mismatched: val1 != val2")
def test_dimensioned_comparison_unequal_key_dims(self):
try:
self.assertEqual(self.dimensioned1, self.dimensioned3)
except AssertionError as e:
self.assertEqual(str(e), 'Dimension names mismatched: key1 != key2')
def test_dimensioned_comparison_unequal_value_dim_lists(self):
try:
self.assertEqual(self.dimensioned1, self.dimensioned4)
except AssertionError as e:
self.assertEqual(str(e), "Value dimension list mismatched")
def test_dimensioned_comparison_unequal_key_dim_lists(self):
try:
self.assertEqual(self.dimensioned1, self.dimensioned5)
except AssertionError as e:
self.assertEqual(str(e), 'Key dimension list mismatched')
def test_dimensioned_comparison_unequal_group(self):
try:
self.assertEqual(self.dimensioned1, self.dimensioned6)
except AssertionError as e:
self.assertEqual(str(e), 'Group labels mismatched.')
def test_dimensioned_comparison_unequal_label(self):
try:
self.assertEqual(self.dimensioned6, self.dimensioned7)
except AssertionError as e:
self.assertEqual(str(e), 'Labels mismatched.')
| bsd-3-clause | -5,668,580,000,063,761,000 | 40.787097 | 97 | 0.614173 | false |
sony/nnabla | python/src/nnabla/backward_function/log_softmax.py | 1 | 1273 | # Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla.functions as F
from .utils import no_grad, positive_axis, get_output
def log_softmax_backward(inputs, axis=None):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dy = inputs[0]
x0 = inputs[1]
y0 = get_output(x0, "LogSoftmax")
D = len(x0.shape)
axis = positive_axis(axis, D)
dx0 = dy - F.exp(y0) * F.sum(dy, axis=axis, keepdims=True)
return dx0
| apache-2.0 | 1,820,603,704,380,515,000 | 34.361111 | 86 | 0.716418 | false |
thonkify/thonkify | src/lib/future/backports/email/iterators.py | 1 | 2346 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Various types of useful iterators and generators."""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
yield subsubpart
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
for line in StringIO(payload):
yield line
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level + 1, include_default)
| mit | -7,011,586,712,797,445,000 | 30.702703 | 75 | 0.650469 | false |
crossroadchurch/paul | tests/resources/projector/data.py | 1 | 2518 | # -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2015 OpenLP Developers #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`tests.resources.projector.data file contains test data
"""
import os
from openlp.core.lib.projector.db import Projector
# Test data
TEST_DB = os.path.join('tmp', 'openlp-test-projectordb.sql')
TEST1_DATA = Projector(ip='111.111.111.111',
port='1111',
pin='1111',
name='___TEST_ONE___',
location='location one',
notes='notes one')
TEST2_DATA = Projector(ip='222.222.222.222',
port='2222',
pin='2222',
name='___TEST_TWO___',
location='location two',
notes='notes two')
TEST3_DATA = Projector(ip='333.333.333.333',
port='3333',
pin='3333',
name='___TEST_THREE___',
location='location three',
notes='notes three')
| gpl-2.0 | 3,210,781,646,622,776,000 | 48.372549 | 79 | 0.426926 | false |
eladnoor/equilibrator | gibbs/forms.py | 1 | 5821 | from django import forms
from util import constants
import haystack.forms
class ListFormField(forms.MultipleChoiceField):
"""
A form field for a list of values that are unchecked.
The Django MultipleChoiceField does *almost* what we want, except
it validates that each choice is in a supplied list of choices,
even when that list is empty. We simply override the validation.
"""
def valid_value(self, value):
return True
class EnzymeForm(forms.Form):
ec = forms.CharField(max_length=50)
# Convenience accessors for clean data with defaults.
cleaned_ec = property(lambda self: self.cleaned_data['ec'])
class BaseSearchForm(haystack.forms.SearchForm):
def _GetWithDefault(self, key, default):
if (key not in self.cleaned_data or self.cleaned_data[key] is None):
return default
return self.cleaned_data[key]
class SuggestForm(BaseSearchForm):
query = forms.CharField(max_length=2048, required=False)
cleaned_query = property(lambda self: self._GetWithDefault('query', ''))
class SearchForm(BaseSearchForm):
query = forms.CharField(max_length=2048, required=False)
ph = forms.FloatField(required=False)
pmg = forms.FloatField(required=False)
ionic_strength = forms.FloatField(required=False)
electronReductionPotential = forms.FloatField(required=False)
max_priority = forms.IntegerField(required=False)
mode = forms.ChoiceField(required=False,
choices=[('BA', 'basic'), ('AD', 'advanced')])
# Convenience accessors for clean data with defaults.
cleaned_query = property(lambda self: self._GetWithDefault('query', ''))
cleaned_ph = property(lambda self: self._GetWithDefault('ph', None))
cleaned_pmg = property(lambda self: self._GetWithDefault('pmg', None))
cleaned_ionic_strength = property(
lambda self: self._GetWithDefault('ionic_strength', None))
cleaned_e_reduction_potential = property(
lambda self: self._GetWithDefault('electronReductionPotential', None))
cleaned_max_priority = property(
lambda self: self._GetWithDefault('max_priority', 0))
cleaned_mode = property(
lambda self: self._GetWithDefault('mode', ''))
class BaseReactionForm(SearchForm):
def GetReactantConcentrations(self):
prefactors = map(float,
self.cleaned_data['reactantsConcentrationPrefactor'])
for f, c in zip(prefactors,
self.cleaned_data['reactantsConcentration']):
try:
conc = f * float(c)
if conc <= 0:
yield 1e-9
else:
yield conc
except ValueError:
yield 1e-9
reactantsPhase = forms.MultipleChoiceField(required=False,
choices=constants.PHASE_CHOICES)
reactantsConcentration = ListFormField(required=False)
reactantsConcentrationPrefactor = ListFormField(required=False)
# Convenience accessors for clean data with defaults.
cleaned_reactantsPhase = property(
lambda self: self.cleaned_data['reactantsPhase'])
cleaned_reactantsConcentration = property(GetReactantConcentrations)
class ReactionForm(BaseReactionForm):
reactionId = forms.CharField(required=False)
reactantsId = ListFormField(required=False)
reactantsCoeff = ListFormField(required=False)
reactantsName = ListFormField(required=False)
submit = forms.ChoiceField(required=False,
choices=[('Update', 'update'),
('Save', 'save'),
('Reverse', 'reverse'),
('Reset', 'reset')])
# Convenience accessors for clean data with defaults.
cleaned_reactionId = property(
lambda self: self.cleaned_data['reactionId'])
cleaned_reactantsId = property(
lambda self: self.cleaned_data['reactantsId'])
cleaned_reactantsCoeff = property(
lambda self: [float(c) for c in self.cleaned_data['reactantsCoeff']])
cleaned_reactantsName = property(
lambda self: self.cleaned_data['reactantsName'])
cleaned_submit = property(
lambda self: self._GetWithDefault('submit', 'Update'))
class ReactionGraphForm(ReactionForm):
vary_ph = forms.BooleanField(required=False)
vary_is = forms.BooleanField(required=False)
vary_pmg = forms.BooleanField(required=False)
# Convenience accessors for clean data with defaults.
cleaned_vary_ph = property(
lambda self: self._GetWithDefault('vary_ph', False))
cleaned_vary_pmg = property(
lambda self: self._GetWithDefault('vary_pmg', False))
cleaned_vary_is = property(
lambda self: self._GetWithDefault('vary_is', False))
class CompoundForm(BaseReactionForm):
compoundId = forms.CharField(max_length=50)
submit = forms.ChoiceField(required=False,
choices=[('Update', 'update'),
('Reset', 'reset')])
# Convenience accessors for clean data with defaults.
cleaned_compoundId = property(lambda self: self.cleaned_data['compoundId'])
# we need to create the following properties in order for this form
# to impersonate a reaction_form (something we need to do for creating
# a Reaction object using .FromForm(form))
cleaned_reactionId = property(lambda self: None)
cleaned_reactantsId = property(lambda self: [self.cleaned_compoundId])
cleaned_reactantsCoeff = property(lambda self: [1])
cleaned_reactantsName = property(lambda self: [None])
cleaned_submit = property(
lambda self: self._GetWithDefault('submit', 'Update'))
| mit | 2,429,056,739,897,362,400 | 37.549669 | 79 | 0.655214 | false |
USGSDenverPychron/pychron | pychron/spectrometer/local_mftable_history_view.py | 1 | 3951 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import HasTraits, Float, List
from traitsui.api import View, UItem, VGroup, HSplit, TabularEditor
from traitsui.editors import TextEditor
from traitsui.group import HGroup
from traitsui.tabular_adapter import TabularAdapter
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.git_archive.history import GitArchiveHistory, GitArchiveHistoryView, DiffView
def left_group():
return VGroup(HGroup(UItem('left_message', style='readonly'),
UItem('left_date', style='readonly')),
UItem('left',
style='custom',
editor=TextEditor(read_only=True)))
def right_group():
return VGroup(HGroup(UItem('right_message', style='readonly'),
UItem('right_date', style='readonly')),
UItem('right',
style='custom',
editor=TextEditor(read_only=True)))
class ItemAdapter(TabularAdapter):
pass
class FieldItem(HasTraits):
pass
class MFTableDiffView(DiffView):
diff_items = List
def __init__(self, *args, **kw):
super(MFTableDiffView, self).__init__(*args, **kw)
self._load_diff()
def _load_diff(self):
lkeys, lvalues = self._parse_txt(self.left)
rkeys, rvalues = self._parse_txt(self.right)
self.item_adapter = ItemAdapter()
if lkeys == rkeys:
cols = [(v, v) for v in lkeys]
self.item_adapter.columns = cols
for lv in lvalues:
iso = lv[0]
rv = next((ri for ri in rvalues if ri[0] == iso))
d = FieldItem(iso=iso)
for i, k in enumerate(lkeys[1:]):
dv = float(lv[i + 1]) - float(rv[i + 1])
d.add_trait(k, Float(dv))
self.diff_items.append(d)
def _parse_txt(self, txt):
lines = txt.split('\n')
keys = lines[0].split(',')
data = [line.split(',') for line in lines[1:] if line]
return keys, data
def traits_view(self):
v = View(VGroup(HSplit(left_group(), right_group()),
UItem('diff_items', editor=TabularEditor(editable=False,
adapter=self.item_adapter))),
title='Diff',
width=900,
buttons=['OK'],
kind='livemodal',
resizable=True)
return v
class LocalMFTableHistory(GitArchiveHistory):
diff_klass = MFTableDiffView
class LocalMFTableHistoryView(GitArchiveHistoryView):
pass
# if __name__ == '__main__':
# r = '/Users/ross/Sandbox/gitarchive'
# gh = LocalMFTableHistory(r, '/Users/ross/Sandbox/ga_test.txt')
#
# gh.load_history('ga_test.txt')
#
# gh.selected = [gh.items[5], gh.items[6]]
# gh._diff_button_fired()
# # ghv = LocalMFTableHistoryView(model=gh)
# # ghv.configure_traits(kind='livemodal')
# ============= EOF =============================================
| apache-2.0 | 8,448,448,464,310,305,000 | 33.060345 | 94 | 0.539863 | false |
yglazko/socorro | socorro/unittest/external/es/base.py | 3 | 27601 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
import random
import uuid
from distutils.version import LooseVersion
from elasticsearch.helpers import bulk
from functools import wraps
from configman import ConfigurationManager, environment
from nose import SkipTest
from socorro.external.es.index_creator import IndexCreator
from socorro.middleware.middleware_app import MiddlewareApp
from socorro.unittest.testbase import TestCase
DEFAULT_VALUES = {
'elasticsearch.elasticsearch_class': (
'socorro.external.es.connection_context.ConnectionContext'
),
'resource.elasticsearch.elasticsearch_default_index': (
'socorro_integration_test'
),
'resource.elasticsearch.elasticsearch_index': (
'socorro_integration_test_reports'
),
'resource.elasticsearch.elasticsearch_timeout': 10,
}
CRON_JOB_EXTA_VALUES = {
'resource.elasticsearch.backoff_delays': [1],
}
SUPERSEARCH_FIELDS = {
'signature': {
'name': 'signature',
'in_database_name': 'signature',
'data_validation_type': 'str',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': True,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'multi_field',
'fields': {
'signature': {
'type': 'string'
},
'full': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
},
'product': {
'name': 'product',
'in_database_name': 'product',
'data_validation_type': 'enum',
'query_type': 'enum',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': True,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'multi_field',
'fields': {
'product': {
'type': 'string'
},
'full': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
},
'version': {
'name': 'version',
'in_database_name': 'version',
'data_validation_type': 'enum',
'query_type': 'enum',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string',
'analyzer': 'keyword'
},
},
'platform': {
'name': 'platform',
'in_database_name': 'os_name',
'data_validation_type': 'enum',
'query_type': 'enum',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': True,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'multi_field',
'fields': {
'os_name': {
'type': 'string'
},
'full': {
'type': 'string',
'index': 'not_analyzed'
}
}
},
},
'release_channel': {
'name': 'release_channel',
'in_database_name': 'release_channel',
'data_validation_type': 'enum',
'query_type': 'enum',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string'
},
},
'date': {
'name': 'date',
'in_database_name': 'date_processed',
'data_validation_type': 'datetime',
'query_type': 'date',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'date',
'format': 'yyyy-MM-dd\'T\'HH:mm:ssZZ||yyyy-MM-dd\'T\'HH:mm:ss.SSSSSSZZ'
},
},
'address': {
'name': 'address',
'in_database_name': 'address',
'data_validation_type': 'str',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string'
},
},
'build_id': {
'name': 'build_id',
'in_database_name': 'build',
'data_validation_type': 'int',
'query_type': 'number',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'long'
},
},
'reason': {
'name': 'reason',
'in_database_name': 'reason',
'data_validation_type': 'str',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': [],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string'
},
},
'email': {
'name': 'email',
'in_database_name': 'email',
'data_validation_type': 'str',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': ['crashstats.view_pii'],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string',
'analyzer': 'keyword'
},
},
'url': {
'name': 'url',
'in_database_name': 'url',
'data_validation_type': 'str',
'query_type': 'str',
'namespace': 'processed_crash',
'form_field_choices': None,
'permissions_needed': ['crashstats.view_pii'],
'default_value': None,
'has_full_version': False,
'is_exposed': True,
'is_returned': True,
'is_mandatory': False,
'storage_mapping': {
'type': 'string',
'analyzer': 'keyword'
},
},
'uuid': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'uuid',
'is_exposed': False,
'is_mandatory': False,
'is_returned': True,
'name': 'uuid',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'index': 'not_analyzed',
'type': 'string'
}
},
'process_type': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': [
'any', 'browser', 'plugin', 'content', 'all'
],
'has_full_version': False,
'in_database_name': 'process_type',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'process_type',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'type': 'string'
}
},
'user_comments': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'user_comments',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'user_comments',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'fields': {
'full': {
'index': 'not_analyzed',
'type': 'string'
},
'user_comments': {
'type': 'string'
}
},
'type': 'multi_field'
}
},
'accessibility': {
'data_validation_type': 'bool',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'Accessibility',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'accessibility',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'bool',
'storage_mapping': {
'type': 'boolean'
}
},
'b2g_os_version': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'B2G_OS_Version',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'b2g_os_version',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'analyzer': 'keyword',
'type': 'string'
}
},
'bios_manufacturer': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'BIOS_Manufacturer',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'bios_manufacturer',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'analyzer': 'keyword',
'type': 'string'
}
},
'vendor': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'Vendor',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'vendor',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'type': 'string'
}
},
'useragent_locale': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'useragent_locale',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'useragent_locale',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'analyzer': 'keyword',
'type': 'string'
}
},
'is_garbage_collecting': {
'data_validation_type': 'bool',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'IsGarbageCollecting',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'is_garbage_collecting',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'bool',
'storage_mapping': {
'type': 'boolean'
}
},
'available_virtual_memory': {
'data_validation_type': 'int',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'AvailableVirtualMemory',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'available_virtual_memory',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'number',
'storage_mapping': {
'type': 'long'
}
},
'install_age': {
'data_validation_type': 'int',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'install_age',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'install_age',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'number',
'storage_mapping': {
'type': 'long'
}
},
'plugin_filename': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'PluginFilename',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'plugin_filename',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'fields': {
'PluginFilename': {
'index': 'analyzed',
'type': 'string'
},
'full': {
'index': 'not_analyzed',
'type': 'string'
}
},
'type': 'multi_field'
}
},
'plugin_name': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'PluginName',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'plugin_name',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'fields': {
'PluginName': {
'index': 'analyzed',
'type': 'string'
},
'full': {
'index': 'not_analyzed',
'type': 'string'
}
},
'type': 'multi_field'
}
},
'plugin_version': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'PluginVersion',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'plugin_version',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'fields': {
'PluginVersion': {
'index': 'analyzed',
'type': 'string'
},
'full': {
'index': 'not_analyzed',
'type': 'string'
}
},
'type': 'multi_field'
}
},
'android_model': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'Android_Model',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'android_model',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'fields': {
'Android_Model': {
'type': 'string'
},
'full': {
'index': 'not_analyzed',
'type': 'string'
}
},
'type': 'multi_field'
}
},
'dump': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'dump',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'dump',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'index': 'not_analyzed',
'type': 'string'
}
},
'cpu_info': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': True,
'in_database_name': 'cpu_info',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'cpu_info',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'fields': {
'cpu_info': {
'analyzer': 'standard',
'index': 'analyzed',
'type': 'string'
},
'full': {
'index': 'not_analyzed',
'type': 'string'
}
},
'type': 'multi_field'
}
},
'dom_ipc_enabled': {
'data_validation_type': 'bool',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'DOMIPCEnabled',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'dom_ipc_enabled',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'bool',
'storage_mapping': {
'null_value': False,
'type': 'boolean'
}
},
'app_notes': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'app_notes',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'app_notes',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'type': 'string'
}
},
'hang_type': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': [
'any', 'crash', 'hang', 'all'
],
'has_full_version': False,
'in_database_name': 'hang_type',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'hang_type',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'enum',
'storage_mapping': {
'type': 'short'
}
},
'exploitability': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': [
'high', 'normal', 'low', 'none', 'unknown', 'error'
],
'has_full_version': False,
'in_database_name': 'exploitability',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'exploitability',
'namespace': 'processed_crash',
'permissions_needed': [
'crashstats.view_exploitability'
],
'query_type': 'enum',
'storage_mapping': {
'type': 'string'
}
},
'platform_version': {
'data_validation_type': 'str',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'os_version',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'platform_version',
'namespace': 'processed_crash',
'permissions_needed': [],
'query_type': 'string',
'storage_mapping': {
'type': 'string'
}
},
'write_combine_size': {
'data_validation_type': 'int',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'write_combine_size',
'is_exposed': True,
'is_mandatory': False,
'is_returned': True,
'name': 'write_combine_size',
'namespace': 'processed_crash.json_dump',
'permissions_needed': [],
'query_type': 'number',
'storage_mapping': {
'type': 'long'
}
},
'fake_field': {
'data_validation_type': 'enum',
'default_value': None,
'form_field_choices': None,
'has_full_version': False,
'in_database_name': 'fake_field',
'is_exposed': True,
'is_mandatory': False,
'is_returned': False,
'name': 'fake_field',
'namespace': 'raw_crash',
'permissions_needed': [],
'query_type': 'enum',
},
}
def minimum_es_version(minimum_version):
"""Skip the test if the Elasticsearch version is less than specified.
:arg minimum_version: string; the minimum Elasticsearch version required
"""
def decorated(test):
"""Decorator to only run the test if ES version is greater or
equal than specified.
"""
@wraps(test)
def test_with_version(self):
"Only run the test if ES version is not less than specified."
actual_version = self.connection.info()['version']['number']
if LooseVersion(actual_version) >= LooseVersion(minimum_version):
test(self)
else:
raise SkipTest
return test_with_version
return decorated
class ElasticsearchTestCase(TestCase):
"""Base class for Elastic Search related unit tests. """
def __init__(self, *args, **kwargs):
super(ElasticsearchTestCase, self).__init__(*args, **kwargs)
self.config = self.get_mware_config()
es_context = self.config.elasticsearch.elasticsearch_class(
config=self.config.elasticsearch
)
creator_config = self.get_tuned_config(IndexCreator)
self.index_creator = IndexCreator(creator_config)
self.index_client = self.index_creator.get_index_client()
with es_context() as conn:
self.connection = conn
def setUp(self):
# Create the supersearch fields.
self.index_super_search_fields()
self.index_creator.create_socorro_index(
self.config.elasticsearch.elasticsearch_index
)
super(ElasticsearchTestCase, self).setUp()
def tearDown(self):
# Clear the test indices.
self.index_client.delete(
self.config.elasticsearch.elasticsearch_default_index
)
self.index_client.delete(
self.config.elasticsearch.elasticsearch_index
)
super(ElasticsearchTestCase, self).tearDown()
def get_tuned_config(self, sources, extra_values=None):
if not isinstance(sources, (list, tuple)):
sources = [sources]
mock_logging = mock.Mock()
config_definitions = []
for source in sources:
conf = source.get_required_config()
conf.add_option('logger', default=mock_logging)
config_definitions.append(conf)
values_source = DEFAULT_VALUES.copy()
values_source.update({'logger': mock_logging})
if extra_values:
values_source.update(extra_values)
config_manager = ConfigurationManager(
config_definitions,
app_name='testapp',
app_version='1.0',
app_description='Elasticsearch integration tests',
values_source_list=[environment, values_source],
argv_source=[],
)
return config_manager.get_config()
def get_mware_config(self, es_index=None):
extra_values = None
if es_index:
extra_values = {
'resource.elasticsearch.elasticsearch_index': es_index
}
return self.get_tuned_config(MiddlewareApp, extra_values=extra_values)
def index_super_search_fields(self, fields=None):
if fields is None:
fields = SUPERSEARCH_FIELDS
es_index = self.config.elasticsearch.elasticsearch_default_index
actions = []
for name, field in fields.iteritems():
action = {
'_index': es_index,
'_type': 'supersearch_fields',
'_id': name,
'_source': field,
}
actions.append(action)
bulk(
client=self.connection,
actions=actions,
)
self.index_client.refresh(index=[es_index])
def index_crash(self, processed_crash, raw_crash=None, crash_id=None):
if crash_id is None:
crash_id = str(uuid.UUID(int=random.getrandbits(128)))
if raw_crash is None:
raw_crash = {}
doc = {
'crash_id': crash_id,
'processed_crash': processed_crash,
'raw_crash': raw_crash,
}
res = self.connection.index(
index=self.config.elasticsearch.elasticsearch_index,
doc_type=self.config.elasticsearch.elasticsearch_doctype,
id=crash_id,
body=doc,
)
return res['_id']
def index_many_crashes(
self, number, processed_crash=None, raw_crash=None, loop_field=None
):
if processed_crash is None:
processed_crash = {}
if raw_crash is None:
raw_crash = {}
actions = []
for i in range(number):
crash_id = str(uuid.UUID(int=random.getrandbits(128)))
if loop_field is not None:
processed_copy = processed_crash.copy()
processed_copy[loop_field] = processed_crash[loop_field] % i
else:
processed_copy = processed_crash
doc = {
'crash_id': crash_id,
'processed_crash': processed_copy,
'raw_crash': raw_crash,
}
action = {
'_index': self.config.elasticsearch.elasticsearch_index,
'_type': self.config.elasticsearch.elasticsearch_doctype,
'_id': crash_id,
'_source': doc,
}
actions.append(action)
bulk(
client=self.connection,
actions=actions,
)
self.refresh_index()
def refresh_index(self):
self.index_client.refresh(
index=self.config.elasticsearch.elasticsearch_index
)
| mpl-2.0 | -6,022,690,469,080,127,000 | 28.968512 | 83 | 0.490489 | false |
dariost/utility | pi.py | 1 | 1131 | #!/usr/bin/env python3
#####################################################
# #
# License: Apache License 2.0 #
# Author: Dario Ostuni <[email protected]> #
# #
#####################################################
import sys
def fatt(n):
tmp = 1
for i in range(1, n + 1):
tmp *= i
return tmp
def term(n):
num = ((-1)**n)*fatt(4*n)*(21460*n+1123)
den = (fatt(n)**4)*(14112**(2*n))
return num, den
def mcd(a, b):
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
if len(sys.argv) == 2:
cfr = int(sys.argv[1])
else:
cfr = int(input("Number of digits: "))
prec = cfr // 5 + 10
num = 0
den = 1
for i in range(prec):
tmp_n, tmp_d = term(i)
num = num * tmp_d + den * tmp_n
den = den * tmp_d
gdc = mcd(num, den)
num //= gdc
den //= gdc
num, den = den * 3528, num
num -= 3 * den
print("3.", end='')
for i in range(cfr):
num *= 10
print(num // den, end='')
num %= den
print(flush=True)
| apache-2.0 | 1,180,066,181,916,586,800 | 20.75 | 53 | 0.402299 | false |
zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/custom/8009_HotSpringsBuffs/__init__.py | 1 | 2538 | import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
from com.l2scoria.gameserver.datatables import SkillTable
from com.l2scoria.util.random import Rnd
qn = "8009_HotSpringsBuffs"
HSMOBS = [21316, 21317, 21321, 21322, 21314, 21319]
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onAttack (self,npc,player,damage,isPet):
npcId = npc.getNpcId()
if npcId in HSMOBS:
if (Rnd.get(2) == 1):
if player.getFirstEffect(int(4554)):
malaria = player.getFirstEffect(int(4554)).getLevel()
if (Rnd.get(100) < 15):
if malaria < 10:
newmalaria = int(malaria + 1)
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4554,newmalaria))
else:
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4554,1))
elif npcId == 21317 or npcId == 21322 :
if player.getFirstEffect(int(4553)):
flu = player.getFirstEffect(int(4553)).getLevel()
if (Rnd.get(100) < 15):
if flu < 10:
newflu = int(flu + 1)
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4553,newflu))
else:
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4553,1))
elif npcId == 21319 or npcId == 21316 :
if player.getFirstEffect(int(4552)):
holera = player.getFirstEffect(int(4552)).getLevel()
if (Rnd.get(100) < 30):
if holera < 10:
newholera = int(holera + 1)
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4552,newholera))
else:
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4552,1))
else:
if player.getFirstEffect(int(4551)):
rheumatism = player.getFirstEffect(int(4551)).getLevel()
if (Rnd.get(100) < 30):
if rheumatism < 10:
newrheumatism = int(rheumatism + 1)
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4551,newrheumatism))
else:
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4551,1))
return
QUEST = Quest(8009,qn,"custom")
for i in HSMOBS:
QUEST.addAttackId(i) | gpl-3.0 | -930,510,215,556,508,700 | 36.338235 | 78 | 0.611505 | false |
simleo/pydoop-features | pyfeatures/app/summarize.py | 1 | 2371 | # BEGIN_COPYRIGHT
#
# Copyright (C) 2017 Open Microscopy Environment:
# - University of Dundee
# - CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""\
Summarize the contents of an output (featureset) Avro container.
"""
import os
import warnings
try:
from pyavroc import AvroFileReader
except ImportError:
from pyfeatures.pyavroc_emu import AvroFileReader
warnings.warn("pyavroc not found, using standard avro lib\n")
def add_parser(subparsers):
parser = subparsers.add_parser("summarize", description=__doc__)
parser.add_argument("in_fn", metavar="FILE", help="Avro container file")
parser.add_argument("-o", "--out-fn", metavar="FILE", help="output file")
parser.set_defaults(func=run)
return parser
def run(logger, args, extra_argv=None):
if not args.out_fn:
tag = os.path.splitext(os.path.basename(args.in_fn))[0]
args.out_fn = "%s.summary" % tag
str_keys = ["name", "img_path"]
int_keys = ["series", "z", "c", "t", "w", "h", "x", "y"]
d = {"n_features": set()}
with open(args.in_fn) as f:
reader = AvroFileReader(f)
for r in reader:
d["n_features"].add(
sum(len(v) for k, v in r.iteritems() if type(v) is list)
)
for k in str_keys:
d.setdefault(k, set()).add(r[k])
for k in int_keys:
d.setdefault(k, set()).add(int(r[k]))
logger.info("writing to %s", args.out_fn)
with open(args.out_fn, "w") as fo:
for k in str_keys:
fo.write("%s: %s\n" % (k, ", ".join(sorted(d[k]))))
for k in int_keys:
v = sorted(d[k])
if len(v) > 2 and v == range(v[0], v[-1] + 1):
fo.write("%s: %d-%d\n" % (k, v[0], v[-1]))
else:
fo.write("%s: %s\n" % (k, ", ".join(map(str, v))))
| apache-2.0 | 5,210,433,854,678,834,000 | 33.362319 | 77 | 0.601012 | false |
WBradbeer/port-routing | lp_helpers.py | 1 | 2253 | import itertools
import numpy as np
def flatten_2(data):
vector = []
for i in data:
for j in i:
vector.append(j)
return vector
def flatten_3(data):
return flatten_2(flatten_2(data))
def reshape_2D(vector, rows, cols):
data = []
for i in range(0, rows):
data.append([])
for j in range(0, cols):
data[i].append(vector[j + i*cols])
return data
def reshape_3D(vector, F, D):
data = []
for i in range(0, F):
data.append([])
for j in range(0, F):
data[i].append([])
for k in range(0, D):
data[i][j].append(vector[k + j*D + i*F*D])
return data
def combine_matrices(d1, d2):
combined = []
for i in range(0, len(d1)):
combined.append([])
for k in range(0, len(d1[i])):
combined[i].append([])
for j in range(0, len(d2[k])):
combined[i][k].append(d1[i][k] + d2[k][j])
return combined
def sum_ij_over_k(F, D):
block = np.tile(np.identity(D), F)
zeros = np.zeros_like(block)
id_f = np.identity(F)
return flatten_2([np.hstack((block if col == 1 else zeros for col in row)
) for row in id_f])
def row_sums(row_num, col_num):
ident = np.identity(row_num)
return [flatten_2([[i] * col_num for i in ident_row]) for ident_row in ident]
def col_sums(row_num, col_num):
ident = np.identity(col_num)
return np.hstack((ident for i in range(0, row_num)))
def scanner_constraints(scanning, F, D):
scanning = [abs(x - 1) for x in scanning]
return flatten_2([[x]*D for x in scanning]*F)
def generate_x(F, D):
x = []
for i in range(0, F):
for k in range(0, F):
for j in range(0, D):
x.append("X({},{},{})".format(i+1, k+1, j+1))
return x
def show_eq(x, coefs, b):
eq = ""
for i in range(0, len(x)):
eq += str(coefs[i]) + "*" + x[i] + " "
return eq + "= " + str(b)
def gen_scanning_combs(F):
for comb in itertools.product([0,1], repeat=F):
yield comb
def gen_scanning_bound(combs, scanner_capacity, base=[]):
for comb in combs:
yield np.append(base, np.array(comb)*scanner_capacity)
| mit | -9,073,479,037,994,305,000 | 23.758242 | 81 | 0.537949 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/env.py | 1 | 2844 |
import dsz
import dsz.ui
import dsz.script
SELF = int(dsz.script.Env['script_command_id'])
def get(env, cmdid=0, addr=dsz.script.Env['target_address']):
if (not dsz.env.Check(env, cmdid, addr)):
return None
else:
return unicode(dsz.env.Get(env, cmdid, addr), 'utf_8')
def set(env, value, cmdid=0, addr=dsz.script.Env['target_address']):
if (bool is type(value)):
translated_value = str(value).upper()
elif (unicode is type(value)):
translated_value = value.encode('utf8')
else:
translated_value = str(value)
dsz.env.Set(env, translated_value, cmdid, addr)
def delete(env, cmdid=0, addr=dsz.script.Env['target_address']):
dsz.env.Delete(env, cmdid, addr)
def upper(env, cmdid=0, addr=dsz.script.Env['target_address']):
value = get(env, cmdid, addr)
if value:
return value.upper()
else:
return value
def lower(env, cmdid=0, addr=dsz.script.Env['target_address']):
value = get(env, cmdid, addr)
return (value.lower() if value else value)
def bool(env, cmdid=0, addr=dsz.script.Env['target_address']):
value = upper(env, cmdid, addr)
return ((value == 'TRUE') if value else value)
def numeric(env, base=10, cmdid=0, addr=dsz.script.Env['target_address']):
value = get(env, cmdid, addr)
return (int(value, base) if value else value)
if (__name__ == '__main__'):
if (not dsz.script.IsLocal()):
import sys
dsz.ui.Echo('To run unit tests, you must be in a local context.', dsz.ERROR)
sys.exit((-1))
import unittest
UNICODE = u'\u0100\u0101\u0102\u0103\u0104\u0105\u0106\u0107\u0108\u0109\u010a'
ASCII = 'The quick brown fox jumped over the lazy dog.'
LOWER = ASCII.lower()
UPPER = ASCII.upper()
TEST = 'OPS_ENV_TEST'
class EnvTest(unittest.TestCase, ):
def testASCII(self):
set(TEST, ASCII)
self.assertEqual(get(TEST), ASCII)
def testAlwaysUnicode(self):
set(TEST, ASCII)
self.assertEqual(type(get(TEST)), unicode)
def testUNICODE(self):
set(TEST, UNICODE)
self.assertEqual(get(TEST), UNICODE)
def testNumericInt(self):
set(TEST, 5)
self.assertEqual(numeric(TEST), 5)
def testNumericHex(self):
set(TEST, '0xBEEF')
self.assertEqual(numeric(TEST, base=16), 48879)
def testBoolTrue(self):
set(TEST, True)
self.assertTrue(bool(TEST))
def testBoolFalse(self):
set(TEST, False)
self.assertFalse(bool(TEST))
def testLower(self):
set(TEST, UPPER)
self.assertEqual(lower(TEST), LOWER)
def testUpper(self):
set(TEST, LOWER)
self.assertEqual(upper(TEST), UPPER)
unittest.main() | unlicense | -8,656,190,649,097,942,000 | 29.923913 | 84 | 0.60443 | false |
lixingcong/shadowsocks_analysis | shadowsocks/server.py | 1 | 5564 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
服务端
ps:我是先看完local.py再看server.py;
发现:除了多用户的思路判断,别的代码思路是一致的,部分没有注释,可以回去翻翻local.py
'''
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import utils, daemon, encrypt, eventloop, tcprelay, udprelay, \
asyncdns
def main():
utils.check_python()
# is_local=false
config = utils.get_config(False)
daemon.daemon_exec(config)
utils.print_shadowsocks()
# 支持多客户端
if config['port_password']:
if config['password']:
logging.warn('warning: port_password should not be used with '
'server_port and password. server_port and password '
'will be ignored')
else:
config['port_password'] = {}
server_port = config['server_port']
# 若发现有多用户配置:采用‘端口->密码’的映射方式。
if type(server_port) == list:
for a_server_port in server_port:
config['port_password'][a_server_port] = config['password']
else:
config['port_password'][str(server_port)] = config['password']
# Create an instance of the cipher class
encrypt.try_cipher(config['password'], config['method'])
tcp_servers = []
udp_servers = []
dns_resolver = asyncdns.DNSResolver()
# 一个服务器端可以打开多个端口
# 对于每个端口,都要新建一个对应的处理器
for port, password in config['port_password'].items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
logging.info("starting server at %s:%d" %
(a_config['server'], int(port)))
# 逐一加到tcp、udp列表
tcp_servers.append(tcprelay.TCPRelay(a_config, dns_resolver, False))
udp_servers.append(udprelay.UDPRelay(a_config, dns_resolver, False))
def run_server():
# 收到退出信号的处理函数,关闭所有socket释放资源。
def child_handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
# 关闭所有的socket,一句话搞定,好厉害,跪拜ing
# map(function, sequence[, sequence, ...]) -> list
# Return a list of the results of applying the function to the items of the argument sequence(s).
list(map(lambda s: s.close(next_tick = True),
tcp_servers + udp_servers))
# 收到退出信号,调用child_handler进行自杀。
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM),
child_handler)
# 收到退出信号,调用int_handler进行自杀。
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
try:
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
# 把所有的监听端口添加到时间循环中,一句话搞定,好厉害,跪拜ing
list(map(lambda s: s.add_to_loop(loop), tcp_servers + udp_servers))
loop.run()
except (KeyboardInterrupt, IOError, OSError) as e:
logging.error(e)
if config['verbose']:
import traceback
traceback.print_exc()
os._exit(1)
# Shadowsocks supports spawning child processes like nginx.
# You can use --workers to specify how many workers to use.
# This argument is only supported on Unix and ssserver.
# Currently UDP relay does not work well on multiple workers.
# 支持像nginx多进程,可以在config中指定worker的数量。仅在linux下生效。
# 目前的bug:worker设为大于1时,udp转发有可能工作不正常
if int(config['workers']) > 1:
if os.name == 'posix':
children = []
is_child = False
for i in range(0, int(config['workers'])):
r = os.fork()
if r == 0:
logging.info('worker started')
is_child = True
run_server()
break
else:
children.append(r)
if not is_child:
def handler(signum, _):
for pid in children:
try:
os.kill(pid, signum)
os.waitpid(pid, 0)
except OSError: # child may already exited
pass
sys.exit()
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGINT, handler)
# master
for a_tcp_server in tcp_servers:
a_tcp_server.close()
for a_udp_server in udp_servers:
a_udp_server.close()
dns_resolver.close()
for child in children:
os.waitpid(child, 0)
else:
logging.warn('worker is only available on Unix/Linux')
run_server()
else:
run_server()
if __name__ == '__main__':
main()
| mit | 9,211,529,882,349,999,000 | 33.353741 | 111 | 0.543762 | false |
JasonJW/mcoc-cogs | rssrelay/rssrelay.py | 1 | 18047 | import pathlib
import asyncio # noqa: F401
import discord # noqa: F401
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from cogs.utils import checks
from .utils.chat_formatting import pagify
path = 'data/rssrelay'
class RSSRelay:
"""Configureable Announcements."""
__version__ = "3.0.1"
__author__ = "mikeshardmind (Sinbad#0001)"
__gimped__ = "JJW$8071"
def __init__(self, bot):
self.bot = bot
try:
self.settings = dataIO.load_json(path + '/settings.json')
except Exception:
self.settings = {}
# this shouldn't be possible, but I've gotten multiple reports
# of it happening so...
if self.settings.get('optout', []) is None:
self.settings['optout'] = []
def save_settings(self):
dataIO.save_json(path + '/settings.json', self.settings)
@checks.is_owner()
@commands.command(name="rssrelay", pass_context=True)
async def rssrelay(self, ctx, *, msg):
"""Announces a message to all channels configured."""
await self.relay_send(msg)
async def relay_send(self, msg):
server_ids = map(lambda s: s.id, self.bot.servers)
cases = {'exceptions': [],
'permissions': [],
'not_found': [],
'not_server': [],
'successes': []}
for server_id in server_ids:
if server_id in self.settings:
server = self.bot.get_server(server_id)
channel = server.get_channel(
self.settings[server_id]['channel'])
if channel is None:
cases['not_found'].append(server)
elif channel.permissions_for(server.me).send_messages:
try:
await self.bot.send_message(channel, msg)
except Exception:
cases['exceptions'].append(channel)
else:
cases['successes'].append(channel)
else:
cases['permissions'].append(channel)
for k, v in self.settings.items():
if k not in server_ids:
cases['not_server'].append(k)
output = "Succesfully sent announcements to {} of {} locations".format(
len(cases['successes']), len(self.settings))
if len(cases['successes']) > 0:
output += "\n\nSuccessful:"
for i in cases['successes']:
output += "\n{0.server.name} | {0.name}".format(i)
if len(cases['permissions']) > 0:
output += "\n\nI lack permissions to send to these locations:\n" \
"Guild | Channel"
for i in cases['permissions']:
output += "\n{0.server.name} | {0.name}".format(i)
if len(cases['exceptions']) > 0:
output += "\n\nI ran into unknown issues while trying " \
"to send to the following\nGuild | Channel"
for i in cases['exceptions']:
output += "\n{0.server.name} | {0.name}".format(i)
if len(cases['not_found']) > 0:
output += "\n\nThe following servers have entries for " \
"channels that no longer exist"
for i in cases['not_found']:
output += "\n{} ".format(i.name)
if len(cases['not_server']) > 0:
output += "\n\nI have a few server IDs that I can't " \
"seem to find in my active servers:"
for i in cases['not_server']:
output += "{} ".format(i)
for page in pagify(output):
await self.bot.say(page)
@commands.group(name="rssrelayset", pass_context=True)
async def rssrelayset(self, ctx):
"""Settings for rssrelay"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@checks.serverowner_or_permissions(manage_server=True)
@rssrelayset.command(name="addchan", pass_context=True)
async def addchan(self, ctx, *, channel: discord.Channel=None):
"""adds a channel to the rssrelay's channel list
defaults to the current channel, can optionally be given
a channel
Will not announce to Direct Message"""
if channel is None:
channel = ctx.message.channel
if channel.is_private:
return await self.bot.say(
"Ignoring Request: Invalid place to send announcements"
)
server = channel.server
member = server.get_member(ctx.message.author.id)
if member is None:
return await self.bot.say(
"Ignoring request: You don't have permission to make "
"announcements for this server (requires manage server)"
)
if not member.server_permissions.manage_server:
return await self.bot.say(
"Ignoring request: You don't have permission to make "
"announcements for this server (requires manage server)"
)
if channel.permissions_for(server.me).send_messages is False:
await self.bot.say(
"Warning: I cannot speak in that channel I "
"will add it to the list, but announcements"
" will not be sent if this is not fixed"
)
if server.id not in self.settings:
self.settings[server.id] = {'channel': channel.id}
else:
self.settings[server.id]['channel'] = channel.id
self.save_settings()
await self.bot.say(
"Announcement channel for the associated server has been set"
)
@checks.is_owner()
@rssrelayset.command(name="getinfo", pass_context=True)
async def getinfo(self, ctx):
"""
get a list of servers without a channel set,
with a channel set that is invalid,
and with a channel set without permissions
"""
self.info = {
'no_chan': [],
'invalid_chan': [],
'lacking_perms': []
}
for server in self.bot.servers:
if server.id in self.settings:
channel = server.get_channel(
self.settings[server.id]['channel']
)
if channel is None:
self.info['invalid_chan'].append(server)
elif not channel.permissions_for(
server.me).send_messages:
self.info['lacking_perms'].append(server)
else:
self.info['no_chan'].append(server)
output = "Servers without a configured channel:"
for server in self.info['no_chan']:
output += "\n{0.id} : {0.name}".format(server)
output += "\nServers with a channel configured that no longer exists:"
for server in self.info['invalid_chan']:
output += "\n{0.id} : {0.name}".format(server)
output += "\nServer where I cannot speak in the configured channel:"
for server in self.info['lacking_perms']:
output += "\n{0.id} : {0.name}".format(server)
for page in pagify(output):
await self.bot.say(page)
@checks.is_owner()
@rssrelayset.command(name='begincleanup', pass_context=True)
async def cleanup_entries(self, ctx):
"""
cleans up bad entries in settings
"""
self.info = {
'no_chan': [],
'invalid_chan': [],
'lacking_perms': []
}
for server in self.bot.servers:
if server.id in self.settings:
channel = server.get_channel(
self.settings[server.id]['channel']
)
if channel is None:
self.info['invalid_chan'].append(server)
elif not channel.permissions_for(
server.me).send_messages:
self.info['lacking_perms'].append(server)
else:
self.info['no_chan'].append(server)
no_srv = [
i for i in self.settings.keys() if i not in [
s.id for s in self.bot.servers
]
]
self.settings = {
k: v for k, v in self.settings.items()
if k not in no_srv
}
self.save_settings()
output = 'I removed entries for servers I am not in.'
if any(len(v) > 0 for k, v in self.info.items()):
output += (
'\nI have also gathered '
'information about misconfigured channels. '
'If you would like to send '
'an automtically generated message to those '
'server owners about fixing this, use '
'`{0.prefix}rssrelayset messageforfix`.'
'\nIf you would instead like to remove '
'those entries from settings, '
'use `{0.prefix}rssrelayset cleansettings`'
).format(ctx)
else:
output += '\nI did not find any other issues.'
await self.bot.say(output)
@checks.is_owner()
@rssrelayset.command(name='cleansettings', pass_context=True)
async def cleanupsettings(self, ctx):
"""
removes all bad entries
"""
if not hasattr(self, 'info'):
return await self.bot.say(
"Use `{0.prefix}rssrelayset begincleanup` first".format(ctx)
)
bad_server_ids = [
s.id for s in list(
self.info['no_chan']
+ self.info['lacking_perms']
+ self.info['invalid_chan']
)
]
if len(bad_server_ids) == 0:
return await self.bot.say(
'Are you sure there are any bad entries?'
'\nYou can use `{}getinfo` to be certain.'
)
self.settings = {
k: v for k, v in self.settings.items()
if k not in bad_server_ids
}
self.save_settings()
await self.bot.say('Invalid entries ahvae been removed')
@checks.is_owner()
@rssrelayset.command(name='messageforfix', pass_context=True)
async def messageforconfigure(self, ctx):
"""
message each server owner about configuring announcements
"""
if not hasattr(self, 'info'):
return await self.bot.say(
"Use `{}rssrelayset getinfo` first".format(ctx.prefix)
)
relevant_servers = [srv for srv in self.bot.servers
if srv in list(self.info['invalid_chan']
+ self.info['no_chan']
+ self.info['lacking_perms'])
and srv.id not in self.settings.get('optout', [])]
who = set(s.owner.id for s in relevant_servers)
for w in who:
if self.settings.get('optout', []) is None:
# This really shouldn't be possible
# Yet, reports of it have happened.
self.settings['optout'] = []
if w in self.settings.get('optout', []):
continue
send = ("Hey, This is a message issued by my owner to inform "
"you that you aren't recieving "
"announcements about the bot in one or more of your "
"servers. If this is intentional, feel free to ignore "
"this message, otherwise, you can use "
"`rssrelayset addchan` "
"in a channel you would like the announcements in for the "
"servers.\n"
"You can opt out of future notifications about this"
"by using `rssrelayset optout`. Alternatively, "
"you can opt out of notifications about this for a "
"specific server by using `rssrelayset srvoptout` "
"from that server."
"\nIssue details:"
"\nServer Name: Issue\n")
w_servers = [s for s in relevant_servers if s.owner.id == w]
w_ic = [s for s in w_servers if s in self.info['invalid_chan']]
w_nc = [s for s in w_servers if s in self.info['no_chan']]
w_lp = [s for s in w_servers if s in self.info['lacking_perms']]
for server in w_servers:
if server in w_ic:
issue = "Announcement channel no longer exists"
elif server in w_nc:
issue = "Announcement channel not set"
elif server in w_lp:
issue = "I can't send messages in the announcement channel"
send += "{}: {}".format(server.name, issue)
where = discord.utils.get(self.bot.get_all_members(), id=w)
try:
await self.bot.send_message(where, send)
except discord.Forbidden:
await self.bot.say((
"{0.mention} isn't accepting DMs"
"\nI'm opting them out of future "
"messages about this.").format(where)
)
self.settings['optout'] = \
(self.settings.get('optout', [])).append(w)
self.save_settings()
@checks.serverowner()
@rssrelayset.command(name='srvoptout', pass_context=True, no_pm=True)
async def srvoptout(self, ctx):
"""
opt out of notifications about bot announcements
not being configured properly for the current server
"""
_id = ctx.message.server.id
self.settings['optout'] = self.settings.get('optout', [])
if _id in self.settings:
return await self.bot.say(
"You already opted out for this server. "
"You can opt back in with "
"{}rssrelayset srvoptin".format(ctx.prefix)
)
self.settings['optout'].append(_id)
await self.bot.say(
"Okay, you won't be informed about misconfigured "
"announcement channels on this server. "
"If you cange your mind, you can opt back in with "
"`{}rssrelayset srvoptin`".format(ctx.prefix)
)
self.save_settings()
@checks.serverowner()
@rssrelayset.command(name='srvoptin', pass_context=True, no_pm=True)
async def srvoptin(self, ctx):
"""
opt into notifications about bot announcements
not being configured properly for the current server
"""
_id = ctx.message.server.id
self.settings['optout'] = self.settings.get('optout', [])
if _id not in self.settings['optout']:
return await self.bot.say(
"You aren't opted out."
)
self.settings['optout'].remove(_id)
await self.bot.say(
"You will recieve notifications about announcement "
"channels on this server again"
)
self.save_settings()
@rssrelayset.command(name="optout", pass_context=True)
async def optout(self, ctx):
"""
opt out of recieving notifications about
servers that are not configured for announcements
"""
_id = ctx.message.author.id
self.settings['optout'] = self.settings.get('optout', [])
if _id in self.settings['optout']:
return await self.bot.say(
"You already opted out. You can opt in again with "
"`{}rssrelayset optin`".format(ctx.prefix)
)
self.settings['optout'].append(_id)
await self.bot.say(
"Okay, you won't be informed about misconfigured "
"announcement channels. If you cange your mind, "
"you can opt back in with `{}rssrelayset optin`".format(
ctx.prefix)
)
self.save_settings()
@rssrelayset.command(name="optin", pass_context=True)
async def optin(self, ctx):
"""
opt into recieving notifications about
servers that are not configured for announcements
"""
_id = ctx.message.author.id
self.settings['optout'] = self.settings.get('optout', [])
if _id not in self.settings['optout']:
return await self.bot.say(
"You aren't opted out."
)
self.settings['optout'].remove(_id)
await self.bot.say(
"You will recieve notifications about announcement "
"channels now"
)
self.save_settings()
@checks.serverowner_or_permissions(manage_server=True)
@rssrelayset.command(name="delchan", pass_context=True)
async def delchan(self, ctx, *, channel: discord.Channel=None):
"""removes a channel from the announcements list
defaults to current if not given a channel"""
if channel is None:
channel = ctx.message.channel
if channel.is_private:
return await self.bot.say(
"This channel is not an announcement channel")
server = channel.server
if server.id in self.settings:
if channel.id == self.settings[server.id]['channel']:
self.settings[server.id]['channel'] = None
await self.bot.say("Channel removed from announcment list")
self.save_settings()
else:
await self.bot.say("This is not an announcement channel")
output = self.settings[server.id]['channel']
await self.bot.say("Hint: The announcement channel for the "
"server is <#{}>".format(output))
else:
await self.bot.say("This channel is not an announcement channel")
def setup(bot):
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
n = RSSRelay(bot)
bot.add_cog(n)
| mit | 899,777,887,668,815,400 | 37.810753 | 79 | 0.535823 | false |
lexdene/hbml | tests/template_test.py | 1 | 1579 | import os
import unittest
import hbml
def _file_content(path):
with open(path, 'r') as f:
content = f.read()
return content
DIRPATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'templates'
)
class TemplateTestCase(unittest.TestCase):
def _test_file(self, filename):
self.assertEqual(
_file_content(
os.path.join(
DIRPATH,
filename + '.html'
)
),
hbml.compile(
_file_content(
os.path.join(
DIRPATH,
filename + '.hbml'
)
)
) + "\n"
)
def _test_uncompress_file(self, filename):
self.assertEqual(
_file_content(
os.path.join(
DIRPATH,
filename + '.uncompress.html'
)
),
hbml.compile(
_file_content(
os.path.join(
DIRPATH,
filename + '.hbml'
)
),
compress_output=False
)
)
def testTemplates(self):
for filename in os.listdir(DIRPATH):
filename, extname = os.path.splitext(filename)
if extname == '.hbml':
with self.subTest(filename=filename):
self._test_file(filename)
self._test_uncompress_file(filename)
| gpl-3.0 | 8,569,644,330,344,459,000 | 23.671875 | 58 | 0.419886 | false |
digitalvectorz/syn | Syn/Unlink.py | 1 | 1819 | """
Simple unlink routines
@license: GPL-3+
@author: Paul Tagliamonte <[email protected]>
@date: August 8th, 2011, 00:10 -0000
Unlink a package into the filesystem
"""
import os.path
import Syn.Exceptions
import Syn.Policy.Db as D
import Syn.Policy.BinaryPackage as B
import Syn.Policy.Chroot as C
import Syn.PackageRegistry
def unlink(packageid):
"""
Unlink a package from filesystem, and get ready
to do some awesomeness.
@arg packageid: Name of the package to unlink into the filesystem.
"""
ROOT_PATH = D.DB_ROOT
pkgdb = Syn.PackageRegistry.PackageRegistry(ROOT_PATH)
cruldb = Syn.PackageRegistry.CrulRegistry(ROOT_PATH)
try:
pkgid = cruldb.getPackage(packageid)
pkginf = pkgdb.getPackage(packageid).format()
Syn.Log.l(Syn.Log.PEDANTIC,"Package DB Dump: %s" % pkgid)
package_root = pkgid['path']
popdir = Syn.Common.getcwd()
Syn.Sh.cd(ROOT_PATH + package_root)
Syn.Sh.cd("./" + B.FS_ROOT)
tree = Syn.Common.getDirectoryTree()
supercool = {}
for t in tree:
supercool[t[1:]] = os.path.abspath(t)
crul = cruldb.getPackage(packageid)
crul_status = crul['status']
crul_path = crul['path']
if crul_status != "LINKED":
raise Syn.Exceptions.PackageNotinstalledException("Package not linked! -- " + packageid)
else:
Syn.Log.l(Syn.Log.PEDANTIC,"Package linked. unlinking.")
cruldb.setPackage(packageid, {
"status" : "HALF-LINKED",
"path" : crul_path
})
cruldb.write()
for s in supercool:
Syn.Log.l(Syn.Log.PEDANTIC,"Removing: %s" % s)
Syn.Sh.rm(C.CHROOT + s)
cruldb.setPackage(packageid, {
"status" : "INSTALLED",
"path" : crul_path
})
cruldb.write()
Syn.Sh.cd(popdir)
except Syn.Exceptions.PackageNotFoundException as e:
Syn.Log.l(Syn.Log.VERBOSE,"Shit. No package found. Passing exception up")
raise e
| gpl-3.0 | 8,327,458,023,312,325,000 | 23.581081 | 91 | 0.692688 | false |
amd77/parker | matriculas/views.py | 1 | 1489 | # Create your views here.
from django.utils import timezone
# from django.views.generic import View, TemplateView, UpdateView
from django.views.generic import ListView, RedirectView
from models import Registro
from django.core.urlresolvers import reverse
class RedirectDia(RedirectView):
permanent = False
def get_redirect_url(self):
now = timezone.now()
return reverse("dia_ymd", args=[now.year, now.month, now.day])
class VistaDia(ListView):
model = Registro
template_name = "matriculas/dia.html"
def get_queryset(self):
self.year = int(self.kwargs["year"], 10)
self.month = int(self.kwargs["month"], 10)
self.day = int(self.kwargs.get("day", "0"), 10)
return Registro.coches_dia(self.year, self.month, self.day)
def get_context_data(self, **kwargs):
context = super(VistaDia, self).get_context_data(**kwargs)
context.update(Registro.estadisticas_dia(self.year, self.month, self.day))
return context
class VistaMes(ListView):
model = Registro
template_name = "matriculas/mes.html"
def get_queryset(self):
self.year = int(self.kwargs["year"], 10)
self.month = int(self.kwargs["month"], 10)
return Registro.coches_dia(self.year, self.month)
def get_context_data(self, **kwargs):
context = super(VistaMes, self).get_context_data(**kwargs)
context.update(Registro.estadisticas_mes(self.year, self.month))
return context
| gpl-2.0 | -4,901,623,654,913,660,000 | 32.088889 | 82 | 0.67495 | false |
hylje/sankarit | sankarit/models/adventure.py | 1 | 5383 | # -*- encoding: utf-8 -*-
import itertools
import random
import datetime
from collections import defaultdict
from flask import g
from sankarit import itemclasses, adventureclasses
from sankarit.models.item import Item
class Adventure(object):
@classmethod
def create(cls, adventureclass, heroes):
c = g.db.cursor()
start_time = datetime.datetime.now()
end_time = start_time + adventureclass.timedelta
c.execute("""
INSERT INTO adventure (start_time, end_time, class, gold)
VALUES (%(start_time)s, %(end_time)s, %(class)s, %(gold)s)
RETURNING id
""", {
"start_time": start_time,
"end_time": end_time,
"class": adventureclass.id,
"gold": 0
})
aid, = c.fetchone()
# Create relation to all heroes
values = list(itertools.chain(*((hero.hid, aid) for hero in heroes)))
query = """
INSERT INTO adventure_hero (hero_id, adventure_id)
VALUES """ + ", ".join("(%s, %s)" for hero in heroes)
c.execute(query, values)
g.db.commit()
return cls(aid, start_time, end_time, adventureclass.id, 0, heroes=heroes, adventureclass=adventureclass)
def __init__(self, aid, start_time, end_time, adventureclass_id,
gold, heroes=None, adventureclass=None):
self.aid = aid
self.start_time = start_time
self.end_time = end_time
self.adventureclass_id = adventureclass_id
self.adventureclass = adventureclass or adventureclasses.get_adventureclass(adventureclass_id)
self.gold = gold
self.heroes = heroes or self.get_heroes()
def get_heroes(self):
from sankarit.models.hero import Hero
c = g.db.cursor()
c.execute("""
SELECT h.id as id, h.name as name, h.class as class, h.xp as xp, h.player_id as player_id
FROM hero h, adventure_hero ah
WHERE ah.adventure_id=%(aid)s AND h.id=ah.hero_id
""", {"aid": self.aid})
ret = []
for hero in c.fetchall():
ret.append(Hero(*hero))
return ret
def can_be_claimed(self):
if self.end_time < datetime.datetime.now() and self.gold == 0:
return True
else:
return False
def resolve_reward(self):
# XXX maybe split this into more functions
c = g.db.cursor()
offense = (sum(hero.offense() for hero in self.heroes)
* random.random() * 4
* (self.adventureclass.timedelta.total_seconds() / 2400))
defense = (sum(hero.defense() for hero in self.heroes)
* sum(hero.defense_factor() for hero in self.heroes)
* random.random() * 3
* (self.adventureclass.timedelta.total_seconds() / 2400))
success_rating = min(offense, defense*3/2) * 5
loot_ratio = random.random()
gold_ratio = 1 - loot_ratio
loot_rating = int(success_rating * loot_ratio)
gold_rating = int(success_rating * gold_ratio)
xp_rating = int(success_rating * 0.5)
c.execute("""
UPDATE adventure SET gold=%(gold)s WHERE id=%(aid)s
""", {"gold": gold_rating, "aid": self.aid})
level_total = sum(hero.get_level() for hero in self.heroes)
gold_per_player = defaultdict(int)
loot_per_player = defaultdict(int)
for hero in self.heroes:
contrib_ratio = hero.get_level() / level_total
gained_loot = contrib_ratio * loot_rating
gained_gold = contrib_ratio * gold_rating
gained_xp = contrib_ratio * xp_rating
c.execute("""
UPDATE hero SET xp=xp+%(gained_xp)s WHERE id=%(hero_id)s
""", {"gained_xp": gained_xp, "hero_id": hero.hid})
gold_per_player[hero.player_id] += gained_gold
loot_per_player[hero.player_id] += gained_loot
for player_id, gold_total in gold_per_player.iteritems():
c.execute("""
UPDATE player SET gold=gold+%(gold_total)s WHERE id=%(player_id)s
""", {"gold_total": gold_total, "player_id": player_id})
itemobjs = Item.generate(loot_per_player, self.heroes)
# commit the entire thing
g.db.commit()
return gold_per_player, itemobjs
def started_ago(self):
now = datetime.datetime.now()
if self.start_time > now:
return "Tulossa"
td = now - self.start_time
bits = [
(td.days, u"päivää"),
(td.seconds / 3600, u"tuntia"),
((td.seconds / 60) % 60, u"minuuttia"),
(td.seconds % 60, u"sekuntia")
]
valid_bits = [(time, text) for time, text in bits if time > 0]
if valid_bits:
valid_bits = valid_bits[:2]
return u", ".join(u"%s %s" % b for b in valid_bits) + u" sitten"
else:
return u"Juuri nyt"
def progress(self):
now = datetime.datetime.now()
if self.end_time < now:
return 1
if self.start_time > now:
return 0
factor = (self.end_time - now).total_seconds() / float((self.end_time - self.start_time).total_seconds())
complement = 1 - factor
percent = 100 * complement
return "%.04f" % percent
| bsd-3-clause | -7,869,746,268,539,831,000 | 30.461988 | 113 | 0.563755 | false |
snakeleon/YouCompleteMe-x86 | third_party/ycmd/ycmd/handlers.py | 1 | 10839 | # Copyright (C) 2013 Google Inc.
# 2017 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import bottle
import json
import logging
import platform
import sys
import time
import traceback
from bottle import request
from threading import Thread
import ycm_core
from ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store
from ycmd.responses import ( BuildExceptionResponse, BuildCompletionResponse,
UnknownExtraConf )
from ycmd.request_wrap import RequestWrap
from ycmd.bottle_utils import SetResponseHeader
from ycmd.completers.completer_utils import FilterAndSortCandidatesWrap
# num bytes for the request body buffer; request.json only works if the request
# size is less than this
bottle.Request.MEMFILE_MAX = 10 * 1024 * 1024
_server_state = None
_hmac_secret = bytes()
_logger = logging.getLogger( __name__ )
app = bottle.Bottle()
wsgi_server = None
@app.post( '/event_notification' )
def EventNotification():
_logger.info( 'Received event notification' )
request_data = RequestWrap( request.json )
event_name = request_data[ 'event_name' ]
_logger.debug( 'Event name: %s', event_name )
event_handler = 'On' + event_name
getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )
filetypes = request_data[ 'filetypes' ]
response_data = None
if _server_state.FiletypeCompletionUsable( filetypes ):
response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),
event_handler )( request_data )
if response_data:
return _JsonResponse( response_data )
return _JsonResponse( {} )
@app.post( '/run_completer_command' )
def RunCompleterCommand():
_logger.info( 'Received command request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.OnUserCommand(
request_data[ 'command_arguments' ],
request_data ) )
@app.post( '/completions' )
def GetCompletions():
_logger.info( 'Received completion request' )
request_data = RequestWrap( request.json )
( do_filetype_completion, forced_filetype_completion ) = (
_server_state.ShouldUseFiletypeCompleter( request_data ) )
_logger.debug( 'Using filetype completion: %s', do_filetype_completion )
errors = None
completions = None
if do_filetype_completion:
try:
completions = ( _server_state.GetFiletypeCompleter(
request_data[ 'filetypes' ] )
.ComputeCandidates( request_data ) )
except Exception as exception:
if forced_filetype_completion:
# user explicitly asked for semantic completion, so just pass the error
# back
raise
else:
# store the error to be returned with results from the identifier
# completer
stack = traceback.format_exc()
_logger.error( 'Exception from semantic completer (using general): ' +
"".join( stack ) )
errors = [ BuildExceptionResponse( exception, stack ) ]
if not completions and not forced_filetype_completion:
completions = ( _server_state.GetGeneralCompleter()
.ComputeCandidates( request_data ) )
return _JsonResponse(
BuildCompletionResponse( completions if completions else [],
request_data.CompletionStartColumn(),
errors = errors ) )
@app.post( '/filter_and_sort_candidates' )
def FilterAndSortCandidates():
_logger.info( 'Received filter & sort request' )
# Not using RequestWrap because no need and the requests coming in aren't like
# the usual requests we handle.
request_data = request.json
return _JsonResponse( FilterAndSortCandidatesWrap(
request_data[ 'candidates'],
request_data[ 'sort_property' ],
request_data[ 'query' ] ) )
@app.get( '/healthy' )
def GetHealthy():
_logger.info( 'Received health request' )
if request.query.include_subservers:
cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )
return _JsonResponse( cs_completer.ServerIsHealthy() )
return _JsonResponse( True )
@app.get( '/ready' )
def GetReady():
_logger.info( 'Received ready request' )
if request.query.subserver:
filetype = request.query.subserver
return _JsonResponse( _IsSubserverReady( filetype ) )
if request.query.include_subservers:
return _JsonResponse( _IsSubserverReady( 'cs' ) )
return _JsonResponse( True )
def _IsSubserverReady( filetype ):
completer = _server_state.GetFiletypeCompleter( [filetype] )
return completer.ServerIsReady()
@app.post( '/semantic_completion_available' )
def FiletypeCompletionAvailable():
_logger.info( 'Received filetype completion available request' )
return _JsonResponse( _server_state.FiletypeCompletionAvailable(
RequestWrap( request.json )[ 'filetypes' ] ) )
@app.post( '/defined_subcommands' )
def DefinedSubcommands():
_logger.info( 'Received defined subcommands request' )
completer = _GetCompleterForRequestData( RequestWrap( request.json ) )
return _JsonResponse( completer.DefinedSubcommands() )
@app.post( '/detailed_diagnostic' )
def GetDetailedDiagnostic():
_logger.info( 'Received detailed diagnostic request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )
@app.post( '/load_extra_conf_file' )
def LoadExtraConfFile():
_logger.info( 'Received extra conf load request' )
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Load( request_data[ 'filepath' ], force = True )
return _JsonResponse( True )
@app.post( '/ignore_extra_conf_file' )
def IgnoreExtraConfFile():
_logger.info( 'Received extra conf ignore request' )
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Disable( request_data[ 'filepath' ] )
return _JsonResponse( True )
@app.post( '/debug_info' )
def DebugInfo():
_logger.info( 'Received debug info request' )
request_data = RequestWrap( request.json )
has_clang_support = ycm_core.HasClangSupport()
clang_version = ycm_core.ClangVersion() if has_clang_support else None
filepath = request_data[ 'filepath' ]
try:
extra_conf_path = extra_conf_store.ModuleFileForSourceFile( filepath )
is_loaded = bool( extra_conf_path )
except UnknownExtraConf as error:
extra_conf_path = error.extra_conf_file
is_loaded = False
response = {
'python': {
'executable': sys.executable,
'version': platform.python_version()
},
'clang': {
'has_support': has_clang_support,
'version': clang_version
},
'extra_conf': {
'path': extra_conf_path,
'is_loaded': is_loaded
},
'completer': None
}
try:
response[ 'completer' ] = _GetCompleterForRequestData(
request_data ).DebugInfo( request_data )
except Exception as error:
_logger.exception( error )
return _JsonResponse( response )
@app.post( '/shutdown' )
def Shutdown():
_logger.info( 'Received shutdown request' )
ServerShutdown()
return _JsonResponse( True )
# The type of the param is Bottle.HTTPError
def ErrorHandler( httperror ):
body = _JsonResponse( BuildExceptionResponse( httperror.exception,
httperror.traceback ) )
hmac_plugin.SetHmacHeader( body, _hmac_secret )
return body
# For every error Bottle encounters it will use this as the default handler
app.default_error_handler = ErrorHandler
def _JsonResponse( data ):
SetResponseHeader( 'Content-Type', 'application/json' )
return json.dumps( data, default = _UniversalSerialize )
def _UniversalSerialize( obj ):
try:
serialized = obj.__dict__.copy()
serialized[ 'TYPE' ] = type( obj ).__name__
return serialized
except AttributeError:
return str( obj )
def _GetCompleterForRequestData( request_data ):
completer_target = request_data.get( 'completer_target', None )
if completer_target == 'identifier':
return _server_state.GetGeneralCompleter().GetIdentifierCompleter()
elif completer_target == 'filetype_default' or not completer_target:
return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )
else:
return _server_state.GetFiletypeCompleter( [ completer_target ] )
def ServerShutdown():
def Terminator():
if wsgi_server:
wsgi_server.Shutdown()
# Use a separate thread to let the server send the response before shutting
# down.
terminator = Thread( target = Terminator )
terminator.daemon = True
terminator.start()
def ServerCleanup():
if _server_state:
_server_state.Shutdown()
extra_conf_store.Shutdown()
def SetHmacSecret( hmac_secret ):
global _hmac_secret
_hmac_secret = hmac_secret
def UpdateUserOptions( options ):
global _server_state
if not options:
return
# This should never be passed in, but let's try to remove it just in case.
options.pop( 'hmac_secret', None )
user_options_store.SetAll( options )
_server_state = server_state.ServerState( options )
def SetServerStateToDefaults():
global _server_state, _logger
_logger = logging.getLogger( __name__ )
user_options_store.LoadDefaults()
_server_state = server_state.ServerState( user_options_store.GetAll() )
extra_conf_store.Reset()
def KeepSubserversAlive( check_interval_seconds ):
def Keepalive( check_interval_seconds ):
while True:
time.sleep( check_interval_seconds )
_logger.debug( 'Keeping subservers alive' )
loaded_completers = _server_state.GetLoadedFiletypeCompleters()
for completer in loaded_completers:
completer.ServerIsHealthy()
keepalive = Thread( target = Keepalive,
args = ( check_interval_seconds, ) )
keepalive.daemon = True
keepalive.start()
| gpl-3.0 | -3,310,765,388,054,272,000 | 29.880342 | 80 | 0.696374 | false |
jacentio/beam | beam/__init__.py | 1 | 7104 | import argparse
import docker
import logging
from pythonjsonlogger import jsonlogger
from importlib import import_module
from time import time, sleep
from beam.models.service import Service
EXCLUDED_ATTRIBUTES = [
'TAGS'
]
class Beam(object):
def __init__(self, args=[]):
self.log = self.init_logger()
self.args = self.parse_args(args)
self.init_drivers()
self.init_client()
def init_logger(self):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logHandler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter()
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
return logger
def init_client(self):
self.log.debug(
'Initiating Docker client, using {}'.format(
self.args.socket))
self.dc = docker.DockerClient(
base_url='unix://{}'.format(self.args.socket))
return self.dc
def parse_args(self, args):
parser = argparse.ArgumentParser(
description='Flexible Docker Service Discovery')
parser.add_argument('--drivers', nargs='+')
parser.add_argument('--hostname', action="store", dest="hostname")
parser.add_argument(
'--exclusive',
dest='exclusive',
action='store_true')
parser.add_argument('--internal', dest='internal', action='store_true')
parser.add_argument('--socket', action="store", dest="socket")
parser.add_argument('--ttl', action="store", dest="ttl")
parser.set_defaults(drivers=[])
parser.set_defaults(hostname=self.get_ip_address())
parser.set_defaults(exclusive=False)
parser.set_defaults(internal=False)
parser.set_defaults(socket="/tmp/docker.sock")
parser.set_defaults(ttl=30)
return parser.parse_args(args)
def get_ip_address(self):
return '192.168.1.1'
def import_driver(self, d):
self.log.debug('Initiating {} backend driver'.format(d))
p = 'beam.drivers.{}_driver'.format(d.lower())
mod = import_module(p)
met = getattr(mod, d.capitalize())
return met()
def init_drivers(self):
self.drivers = []
for d in self.args.drivers:
self.drivers.append(self.import_driver(d))
def get_services_to_register(self, container):
services = []
only_services = []
if self.args.exclusive:
if 'BEAM_PORTS' not in container['Config']['Labels']:
return services
else:
only_services = [
x if '/' in x else '{}/tcp'.format(x) for x in
container['Config']['Labels']['BEAM_PORTS'].split(',')]
if self.args.internal:
try:
ports = container['Config']['ExposedPorts'].keys()
except AttributeError:
return services
for service in ports:
if only_services and service not in only_services:
continue
(container_port, proto) = service.split('/')
s = Service()
try:
s.name = container['Config']['Labels'][
'com.docker.swarm.service.id']
except KeyError:
s.name = '{}-{}'.format(
container['Name'].lstrip('/'), container_port)
s.ip = self.args.hostname
s.port = int(container_port)
s.proto = proto
s.id = container['Config']['Hostname']
services.append(s)
else:
try:
ports = container['HostConfig']['PortBindings'].iteritems()
except AttributeError:
return services
for service, cfg in ports:
try:
cfg = cfg[0]
except TypeError:
continue
if only_services and service not in only_services:
continue
(container_port, proto) = service.split('/')
s = Service()
try:
s.name = container['Config']['Labels'][
'com.docker.swarm.service.id']
except KeyError:
s.name = '{}-{}'.format(
container['Name'].lstrip('/'), container_port)
s.ip = self.args.hostname
s.port = int(cfg['HostPort'])
s.proto = proto
s.id = container['Config']['Hostname']
services.append(s)
return services
def get_service_attributes(self, service, container):
attributes = {}
s = service.name.split('-')
container_port = s[-1]
for k, v in container['Config']['Labels'].iteritems():
if k.startswith('BEAM_') and not k[5].isdigit():
attr_key = k.replace('BEAM_', '')
if attr_key in EXCLUDED_ATTRIBUTES:
continue
attributes[attr_key] = v
for k, v in container['Config']['Labels'].iteritems():
if k.startswith(
'BEAM_{}_{}'.format(
container_port,
service.proto.upper())):
attr_key = k.replace(
'BEAM_{}_{}_'.format(
container_port,
service.proto.upper()), '')
if attr_key in EXCLUDED_ATTRIBUTES:
continue
attributes[attr_key] = v
return attributes
def get_service_tags(self, service, container):
tags = set()
s = service.name.split('-')
container_port = s[-1]
try:
tags |= set(container['Config']['Labels']['BEAM_TAGS'].split(','))
except KeyError:
pass
try:
tags |= set(
container['Config']['Labels'][
'BEAM_{}_{}_TAGS'.format(
container_port,
service.proto.upper())].split('/'))
except KeyError:
pass
return list(tags)
def register_container(self, container):
services = self.get_services_to_register(container)
for service in services:
service.attrs = self.get_service_attributes(service, container)
service.tags = self.get_service_tags(service, container)
[driver.add(service, self.args.ttl) for driver in self.drivers]
def run(self):
start = time()
containers = self.dc.containers.list(filters={'status': 'running'})
[self.register_container(x.attrs) for x in containers]
end = time()
duration = end - start
self.log.debug('Registration run took {}s'.format(duration))
sleep_time = int(self.args.ttl - duration) - 5
self.log.debug('Sleeping for {}s'.format(sleep_time))
sleep(sleep_time)
| mit | -7,491,905,419,200,621,000 | 30.573333 | 79 | 0.519426 | false |
tweemeterjop/thug | thug/DOM/W3C/Attr.py | 1 | 2167 | #!/usr/bin/env python
import bs4 as BeautifulSoup
from .Node import Node
class Attr(Node):
_value = ""
def __init__(self, doc, parent, attr):
self.doc = doc
self.parent = parent
self.attr = attr
self.tag = BeautifulSoup.Tag(parser = self.doc, name = 'attr')
Node.__init__(self, doc)
self._specified = False
self._value = self.getValue()
def __repr__(self):
return "<Attr object %s%s at 0x%08X>" % ("%s." % self.parent.tagName if self.parent else "", self.attr, id(self))
def __eq__(self, other):
return hasattr(other, "parent") and self.parent == other.parent and \
hasattr(other, "attr") and self.attr == other.attr
@property
def nodeType(self):
return Node.ATTRIBUTE_NODE
@property
def nodeName(self):
return self.attr
def getNodeValue(self):
return self.getValue()
def setNodeValue(self, value):
return self.setValue(value)
nodeValue = property(getNodeValue, setNodeValue)
@property
def childNodes(self):
from .NodeList import NodeList
return NodeList(self.parent.doc, [])
@property
def parentNode(self):
return self.parent
# Introduced in DOM Level 2
@property
def ownerElement(self):
if self.parent:
if self.parent.nodeType == Node.ELEMENT_NODE:
return self.parent
return None
@property
def ownerDocument(self):
return self.parent.doc
@property
def name(self):
return self.attr
@property
def specified(self):
if self.ownerElement is None:
return True
return self._specified
def getValue(self):
if self.parent:
if self.parent.tag.has_attr(self.attr):
self._specified = True
return self.parent.tag[self.attr]
return self._value
def setValue(self, value):
self._value = value
if self.parent:
self._specified = True
self.parent.tag[self.attr] = value
value = property(getValue, setValue)
| gpl-2.0 | -8,961,520,917,064,182,000 | 22.301075 | 121 | 0.580988 | false |
foxdog-studios/pyddp | ddp/messages/client/sub_message.py | 1 | 1695 | # -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import copy
from .client_message import ClientMessage
__all__ = ['SubMessage']
class SubMessage(ClientMessage):
def __init__(self, id, name, params=None):
super(SubMessage, self).__init__()
self._id = id
self._name = name
self._params = copy(params)
def __eq__(self, other):
if isinstance(other, SubMessage):
return (self._id == other._id and self._name == other._name
and self._params == other._params)
return super(ClientMessage, self).__eq__(other)
def __str__(self):
return 'SubMessage({!r}, {!r}, params={!r})'.format(
self._id,
self._name,
self._params)
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def params(self):
return copy(self._params)
def has_params(self):
return self._params is not None
| apache-2.0 | 7,933,461,572,024,796,000 | 26.786885 | 74 | 0.630088 | false |
KDNT/p2pool-worldcoin-old | p2pool/web.py | 1 | 25333 | from __future__ import division
import errno
import json
import os
import sys
import time
import traceback
from twisted.internet import defer
from twisted.python import log
from twisted.web import resource, static
import p2pool
from bitcoin import data as bitcoin_data
from . import data as p2pool_data, p2p
from util import deferral, deferred_resource, graph, math, memory, pack, variable
def _atomic_read(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
try:
with open(filename + '.new', 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
return None
def _atomic_write(filename, data):
with open(filename + '.new', 'wb') as f:
f.write(data)
f.flush()
try:
os.fsync(f.fileno())
except:
pass
try:
os.rename(filename + '.new', filename)
except: # XXX windows can't overwrite
os.remove(filename)
os.rename(filename + '.new', filename)
def get_web_root(wb, datadir_path, bitcoind_warning_var, stop_event=variable.Event()):
node = wb.node
start_time = time.time()
web_root = resource.Resource()
def get_users():
height, last = node.tracker.get_height_and_last(node.best_share_var.value)
weights, total_weight, donation_weight = node.tracker.get_cumulative_weights(node.best_share_var.value, min(height, 720), 65535*2**256)
res = {}
for script in sorted(weights, key=lambda s: weights[s]):
res[bitcoin_data.script2_to_address(script, node.net.PARENT)] = weights[script]/total_weight
return res
def get_current_scaled_txouts(scale, trunc=0):
txouts = node.get_current_txouts()
total = sum(txouts.itervalues())
results = dict((script, value*scale//total) for script, value in txouts.iteritems())
if trunc > 0:
total_random = 0
random_set = set()
for s in sorted(results, key=results.__getitem__):
if results[s] >= trunc:
break
total_random += results[s]
random_set.add(s)
if total_random:
winner = math.weighted_choice((script, results[script]) for script in random_set)
for script in random_set:
del results[script]
results[winner] = total_random
if sum(results.itervalues()) < int(scale):
results[math.weighted_choice(results.iteritems())] += int(scale) - sum(results.itervalues())
return results
def get_patron_sendmany(total=None, trunc='0.01'):
if total is None:
return 'need total argument. go to patron_sendmany/<TOTAL>'
total = int(float(total)*1e8)
trunc = int(float(trunc)*1e8)
return json.dumps(dict(
(bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8)
for script, value in get_current_scaled_txouts(total, trunc).iteritems()
if bitcoin_data.script2_to_address(script, node.net.PARENT) is not None
))
def get_global_stats():
# averaged over last hour
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
nonstale_hash_rate = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)
stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
return dict(
pool_nonstale_hash_rate=nonstale_hash_rate,
pool_hash_rate=nonstale_hash_rate/(1 - stale_prop),
pool_stale_prop=stale_prop,
min_difficulty=bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target),
)
def get_local_stats():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
my_unstale_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes)
my_orphan_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'orphan')
my_doa_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'doa')
my_share_count = my_unstale_count + my_orphan_count + my_doa_count
my_stale_count = my_orphan_count + my_doa_count
my_stale_prop = my_stale_count/my_share_count if my_share_count != 0 else None
my_work = sum(bitcoin_data.target_to_average_attempts(share.target)
for share in node.tracker.get_chain(node.best_share_var.value, lookbehind - 1)
if share.hash in wb.my_share_hashes)
actual_time = (node.tracker.items[node.best_share_var.value].timestamp -
node.tracker.items[node.tracker.get_nth_parent_hash(node.best_share_var.value, lookbehind - 1)].timestamp)
share_att_s = my_work / actual_time
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
return dict(
my_hash_rates_in_last_hour=dict(
note="DEPRECATED",
nonstale=share_att_s,
rewarded=share_att_s/(1 - global_stale_prop),
actual=share_att_s/(1 - my_stale_prop) if my_stale_prop is not None else 0, # 0 because we don't have any shares anyway
),
my_share_counts_in_last_hour=dict(
shares=my_share_count,
unstale_shares=my_unstale_count,
stale_shares=my_stale_count,
orphan_stale_shares=my_orphan_count,
doa_stale_shares=my_doa_count,
),
my_stale_proportions_in_last_hour=dict(
stale=my_stale_prop,
orphan_stale=my_orphan_count/my_share_count if my_share_count != 0 else None,
dead_stale=my_doa_count/my_share_count if my_share_count != 0 else None,
),
miner_hash_rates=miner_hash_rates,
miner_dead_hash_rates=miner_dead_hash_rates,
efficiency_if_miner_perfect=(1 - stale_orphan_shares/shares)/(1 - global_stale_prop) if shares else None, # ignores dead shares because those are miner's fault and indicated by pseudoshare rejection
efficiency=(1 - (stale_orphan_shares+stale_doa_shares)/shares)/(1 - global_stale_prop) if shares else None,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
shares=dict(
total=shares,
orphan=stale_orphan_shares,
dead=stale_doa_shares,
),
uptime=time.time() - start_time,
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
warnings=p2pool_data.get_warnings(node.tracker, node.best_share_var.value, node.net, bitcoind_warning_var.value, node.bitcoind_work.value),
donation_proportion=wb.donation_percentage/100,
version=p2pool.__version__,
protocol_version=p2p.Protocol.VERSION,
fee_private=wb.worker_fee,
)
class WebInterface(deferred_resource.DeferredResource):
def __init__(self, func, mime_type='application/json', args=()):
deferred_resource.DeferredResource.__init__(self)
self.func, self.mime_type, self.args = func, mime_type, args
def getChild(self, child, request):
return WebInterface(self.func, self.mime_type, self.args + (child,))
@defer.inlineCallbacks
def render_GET(self, request):
request.setHeader('Content-Type', self.mime_type)
request.setHeader('Access-Control-Allow-Origin', '*')
res = yield self.func(*self.args)
defer.returnValue(json.dumps(res) if self.mime_type == 'application/json' else res)
def decent_height():
return min(node.tracker.get_height(node.best_share_var.value), 720)
web_root.putChild('rate', WebInterface(lambda: p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, decent_height())/(1-p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, decent_height()))))
web_root.putChild('difficulty', WebInterface(lambda: bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target)))
web_root.putChild('users', WebInterface(get_users))
web_root.putChild('user_stales', WebInterface(lambda: dict((bitcoin_data.pubkey_hash_to_address(ph, node.net.PARENT), prop) for ph, prop in
p2pool_data.get_user_stale_props(node.tracker, node.best_share_var.value, node.tracker.get_height(node.best_share_var.value)).iteritems())))
web_root.putChild('fee', WebInterface(lambda: wb.worker_fee))
web_root.putChild('current_payouts', WebInterface(lambda: dict((bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8) for script, value in node.get_current_txouts().iteritems())))
web_root.putChild('patron_sendmany', WebInterface(get_patron_sendmany, 'text/plain'))
web_root.putChild('global_stats', WebInterface(get_global_stats))
web_root.putChild('local_stats', WebInterface(get_local_stats))
web_root.putChild('peer_addresses', WebInterface(lambda: ' '.join('%s%s' % (peer.transport.getPeer().host, ':'+str(peer.transport.getPeer().port) if peer.transport.getPeer().port != node.net.P2P_PORT else '') for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('peer_txpool_sizes', WebInterface(lambda: dict(('%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port), peer.remembered_txs_size) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('pings', WebInterface(defer.inlineCallbacks(lambda: defer.returnValue(
dict([(a, (yield b)) for a, b in
[(
'%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port),
defer.inlineCallbacks(lambda peer=peer: defer.returnValue(
min([(yield peer.do_ping().addCallback(lambda x: x/0.001).addErrback(lambda fail: None)) for i in xrange(3)])
))()
) for peer in list(node.p2p_node.peers.itervalues())]
])
))))
web_root.putChild('peer_versions', WebInterface(lambda: dict(('%s:%i' % peer.addr, peer.other_sub_version) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('payout_addr', WebInterface(lambda: bitcoin_data.pubkey_hash_to_address(wb.my_pubkey_hash, node.net.PARENT)))
web_root.putChild('recent_blocks', WebInterface(lambda: [dict(
ts=s.timestamp,
hash='%064x' % s.header_hash,
number=pack.IntType(24).unpack(s.share_data['coinbase'][1:4]) if len(s.share_data['coinbase']) >= 4 else None,
share='%064x' % s.hash,
) for s in node.tracker.get_chain(node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 24*60*60//node.net.SHARE_PERIOD)) if s.pow_hash <= s.header['bits'].target]))
web_root.putChild('uptime', WebInterface(lambda: time.time() - start_time))
web_root.putChild('stale_rates', WebInterface(lambda: p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, decent_height(), rates=True)))
new_root = resource.Resource()
web_root.putChild('web', new_root)
stat_log = []
if os.path.exists(os.path.join(datadir_path, 'stats')):
try:
with open(os.path.join(datadir_path, 'stats'), 'rb') as f:
stat_log = json.loads(f.read())
except:
log.err(None, 'Error loading stats:')
def update_stat_log():
while stat_log and stat_log[0]['time'] < time.time() - 24*60*60:
stat_log.pop(0)
lookbehind = 3600//node.net.SHARE_PERIOD
if node.tracker.get_height(node.best_share_var.value) < lookbehind:
return None
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
stat_log.append(dict(
time=time.time(),
pool_hash_rate=p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)/(1-global_stale_prop),
pool_stale_prop=global_stale_prop,
local_hash_rates=miner_hash_rates,
local_dead_hash_rates=miner_dead_hash_rates,
shares=shares,
stale_shares=stale_orphan_shares + stale_doa_shares,
stale_shares_breakdown=dict(orphan=stale_orphan_shares, doa=stale_doa_shares),
current_payout=node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
))
with open(os.path.join(datadir_path, 'stats'), 'wb') as f:
f.write(json.dumps(stat_log))
x = deferral.RobustLoopingCall(update_stat_log)
x.start(5*60)
stop_event.watch(x.stop)
new_root.putChild('log', WebInterface(lambda: stat_log))
def get_share(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return None
share = node.tracker.items[int(share_hash_str, 16)]
return dict(
parent='%064x' % share.previous_hash,
children=['%064x' % x for x in sorted(node.tracker.reverse.get(share.hash, set()), key=lambda sh: -len(node.tracker.reverse.get(sh, set())))], # sorted from most children to least children
type_name=type(share).__name__,
local=dict(
verified=share.hash in node.tracker.verified.items,
time_first_seen=start_time if share.time_seen == 0 else share.time_seen,
peer_first_received_from=share.peer_addr,
),
share_data=dict(
timestamp=share.timestamp,
target=share.target,
max_target=share.max_target,
payout_address=bitcoin_data.script2_to_address(share.new_script, node.net.PARENT),
donation=share.share_data['donation']/65535,
stale_info=share.share_data['stale_info'],
nonce=share.share_data['nonce'],
desired_version=share.share_data['desired_version'],
absheight=share.absheight,
abswork=share.abswork,
),
block=dict(
hash='%064x' % share.header_hash,
header=dict(
version=share.header['version'],
previous_block='%064x' % share.header['previous_block'],
merkle_root='%064x' % share.header['merkle_root'],
timestamp=share.header['timestamp'],
target=share.header['bits'].target,
nonce=share.header['nonce'],
),
gentx=dict(
hash='%064x' % share.gentx_hash,
coinbase=share.share_data['coinbase'].ljust(2, '\x00').encode('hex'),
value=share.share_data['subsidy']*1e-8,
),
txn_count=len(list(share.iter_transaction_hash_refs())),
),
)
new_root.putChild('share', WebInterface(lambda share_hash_str: get_share(share_hash_str)))
new_root.putChild('heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.heads]))
new_root.putChild('verified_heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.verified.heads]))
new_root.putChild('tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.tails for x in node.tracker.reverse.get(t, set())]))
new_root.putChild('verified_tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.verified.tails for x in node.tracker.verified.reverse.get(t, set())]))
new_root.putChild('best_share_hash', WebInterface(lambda: '%064x' % node.best_share_var.value))
def get_share_data(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return ''
share = node.tracker.items[int(share_hash_str, 16)]
return p2pool_data.share_type.pack(share.as_share1a())
new_root.putChild('share_data', WebInterface(lambda share_hash_str: get_share_data(share_hash_str), 'application/octet-stream'))
new_root.putChild('currency_info', WebInterface(lambda: dict(
symbol=node.net.PARENT.SYMBOL,
block_explorer_url_prefix=node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX,
address_explorer_url_prefix=node.net.PARENT.ADDRESS_EXPLORER_URL_PREFIX,
)))
new_root.putChild('version', WebInterface(lambda: p2pool.__version__))
hd_path = os.path.join(datadir_path, 'graph_db')
hd_data = _atomic_read(hd_path)
hd_obj = {}
if hd_data is not None:
try:
hd_obj = json.loads(hd_data)
except Exception:
log.err(None, 'Error reading graph database:')
dataview_descriptions = {
'last_hour': graph.DataViewDescription(150, 60*60),
'last_day': graph.DataViewDescription(300, 60*60*24),
'last_week': graph.DataViewDescription(300, 60*60*24*7),
'last_month': graph.DataViewDescription(300, 60*60*24*30),
'last_year': graph.DataViewDescription(300, 60*60*24*365.25),
}
def build_desired_rates(ds_name, ds_desc, dv_name, dv_desc, obj):
if not obj:
last_bin_end = 0
bins = dv_desc.bin_count*[{}]
else:
pool_rates = obj['pool_rates'][dv_name]
desired_versions = obj['desired_versions'][dv_name]
def get_total_pool_rate(t):
n = int((pool_rates['last_bin_end'] - t)/dv_desc.bin_width)
if n < 0 or n >= dv_desc.bin_count:
return None
total = sum(x[0] for x in pool_rates['bins'][n].values())
count = math.mean(x[1] for x in pool_rates['bins'][n].values())
if count == 0:
return None
return total/count
last_bin_end = desired_versions['last_bin_end']
bins = [dict((name, (total*get_total_pool_rate(last_bin_end - (i+1/2)*dv_desc.bin_width), count)) for name, (total, count) in desired_versions['bins'][i].iteritems()) for i in xrange(dv_desc.bin_count)]
return graph.DataView(dv_desc, ds_desc, last_bin_end, bins)
hd = graph.HistoryDatabase.from_obj({
'local_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_dead_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_share_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_dead_share_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'pool_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'current_payout': graph.DataStreamDescription(dataview_descriptions),
'current_payouts': graph.DataStreamDescription(dataview_descriptions, multivalues=True),
'incoming_peers': graph.DataStreamDescription(dataview_descriptions),
'outgoing_peers': graph.DataStreamDescription(dataview_descriptions),
'miner_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'miner_dead_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'desired_versions': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'desired_version_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True, default_func=build_desired_rates),
'traffic_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'getwork_latency': graph.DataStreamDescription(dataview_descriptions),
'memory_usage': graph.DataStreamDescription(dataview_descriptions),
}, hd_obj)
x = deferral.RobustLoopingCall(lambda: _atomic_write(hd_path, json.dumps(hd.to_obj())))
x.start(100)
stop_event.watch(x.stop)
@wb.pseudoshare_received.watch
def _(work, dead, user):
t = time.time()
hd.datastreams['local_hash_rate'].add_datum(t, work)
if dead:
hd.datastreams['local_dead_hash_rate'].add_datum(t, work)
if user is not None:
hd.datastreams['miner_hash_rates'].add_datum(t, {user: work})
if dead:
hd.datastreams['miner_dead_hash_rates'].add_datum(t, {user: work})
@wb.share_received.watch
def _(work, dead):
t = time.time()
hd.datastreams['local_share_hash_rate'].add_datum(t, work)
if dead:
hd.datastreams['local_dead_share_hash_rate'].add_datum(t, work)
@node.p2p_node.traffic_happened.watch
def _(name, bytes):
hd.datastreams['traffic_rate'].add_datum(time.time(), {name: bytes})
def add_point():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.net.CHAIN_LENGTH, 60*60//node.net.SHARE_PERIOD, node.tracker.get_height(node.best_share_var.value))
t = time.time()
pool_rates = p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, lookbehind, rates=True)
pool_total = sum(pool_rates.itervalues())
hd.datastreams['pool_rates'].add_datum(t, pool_rates)
current_txouts = node.get_current_txouts()
hd.datastreams['current_payout'].add_datum(t, current_txouts.get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8)
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
current_txouts_by_address = dict((bitcoin_data.script2_to_address(script, node.net.PARENT), amount) for script, amount in current_txouts.iteritems())
hd.datastreams['current_payouts'].add_datum(t, dict((user, current_txouts_by_address[user]*1e-8) for user in miner_hash_rates if user in current_txouts_by_address))
hd.datastreams['incoming_peers'].add_datum(t, sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming))
hd.datastreams['outgoing_peers'].add_datum(t, sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming))
vs = p2pool_data.get_desired_version_counts(node.tracker, node.best_share_var.value, lookbehind)
vs_total = sum(vs.itervalues())
hd.datastreams['desired_versions'].add_datum(t, dict((str(k), v/vs_total) for k, v in vs.iteritems()))
hd.datastreams['desired_version_rates'].add_datum(t, dict((str(k), v/vs_total*pool_total) for k, v in vs.iteritems()))
try:
hd.datastreams['memory_usage'].add_datum(t, memory.resident())
except:
if p2pool.DEBUG:
traceback.print_exc()
x = deferral.RobustLoopingCall(add_point)
x.start(5)
stop_event.watch(x.stop)
@node.bitcoind_work.changed.watch
def _(new_work):
hd.datastreams['getwork_latency'].add_datum(time.time(), new_work['latency'])
new_root.putChild('graph_data', WebInterface(lambda source, view: hd.datastreams[source].dataviews[view].get_data(time.time())))
web_root.putChild('static', static.File(os.path.join(os.path.dirname(sys.argv[0]), 'web-static')))
return web_root
| gpl-3.0 | 3,808,312,413,652,431,000 | 54.922737 | 260 | 0.6306 | false |
heromod/migrid | mig/vm-proxy/deprecated/proxy/mipbroker.py | 1 | 2409 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# MipBroker - An Mip broker
#
#
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import logging
import SocketServer
import threading
from struct import unpack, pack
import mip
class MipBroker(SocketServer.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.cur_thread = threading.currentThread()
self.running = True
logging.debug("%s starting." % self)
SocketServer.BaseRequestHandler.__init__(self, request, client_address, server)
def setup(self):
logging.debug("%s started." % self)
try:
logging.debug('%s MIP Broker is here!' % self)
#MipServer.lock.acquire()
for proxy in MipServer.proxies:
logging.debug('%s proxy %s' %(self, proxy))
#MipServer.lock.release()
logging.debug('%s Thats it im done..' % self)
except:
self.running = False
logging.exception("%s Unexpected error:" % self)
# Find a server
# Send request to server
# Connect pinhole
pass
def handle(self):
while self.running:
pass
def closeConnection(self):
self.running = False
self.request.close()
logging.debug('%s Closed connection.', self)
"""
def datareceived(self, data):
logging.debug('%s MIP Broker is here!' % self)
MipServer.lock.acquire()
self.proxies.append(proxyHost)
for proxy in MipServer.proxies:
logging.debug('%s proxy %s' %(self, proxy))
MipServer.lock.release()
logging.debug('%s Thats it im done..' % self)
""" | gpl-2.0 | -8,736,584,838,630,681,000 | 26.386364 | 83 | 0.658364 | false |
kevin-zhaoshuai/zun | zun/api/controllers/v1/schemas/images.py | 1 | 1106 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zun.common.validation import parameter_types
_image_properties = {
'image_id': parameter_types.image_id,
'repo': parameter_types.repo,
'tag': parameter_types.tag,
'size': parameter_types.size
}
image_create = {
'type': 'object',
'properties': _image_properties,
'required': ['repo'],
'additionalProperties': False
}
query_param_search = {
'type': 'object',
'properties': {
'image_driver': parameter_types.image_driver,
'exact_match': parameter_types.boolean
},
'additionalProperties': False
}
| apache-2.0 | -7,546,945,377,160,482,000 | 29.722222 | 75 | 0.702532 | false |
d1m0/pyelf | __init__.py | 1 | 13904 | from pylibelf import *
from pylibelf.types import *
from pylibelf.iterators import *
from pylibelf.constants import *
from pylibelf.util import *
from pylibelf.util.syms import *
from pylibelf.macros import *
from bisect import bisect_left
import pylibelf.util
import pylibelf
import types
import os
def _inrange(x, a,b):
return x>=a and x < b
def _overlap(a, b, c, d):
return a <= d and c <= b
class Bunch:
def __setitem__(self, k, v): self.__dict__[k] = v
def __getitem__(self, k): return self.__dict__[k]
class BaseElfNode(object):
@staticmethod
def extract(obj):
return BaseElfNode._extract(obj, {})
@staticmethod
def _extract(obj, m):
""" Given a BaseElfNode object extract a static snapshot of the current
object and its children that does not refer to the parent or any pylibelf
objects
"""
if isinstance(obj, BaseElfNode):
if obj in m:
return m[obj]
res = Bunch()
m[obj] = res
for attr in dir(obj):
if (isinstance(obj, ElfSym) and attr == 'contents' and not obj.defined):
v = None
elif (isinstance(obj, ElfScn) and (attr == 'info_scn' or attr == 'link_scn' or attr == 'index')):
try:
v = getattr(obj, attr)
except ElfError: # This section doesn't have a info_scn or a link_scn
v = None
else:
v = getattr(obj, attr)
if hasattr(v, "__call__"):
# This is a function - ignore
continue
try:
res[attr] = BaseElfNode._extract(v, m)
except AttributeError: pass
return res
elif type(obj) == list:
return map(lambda x: BaseElfNode._extract(x, m), obj)
elif type(obj) == tuple:
return tuple(map(lambda x: BaseElfNode._extract(x, m), obj))
elif type(obj) == dict:
return dict([(BaseElfNode.extract(k, m), BaseElfNode.extract(v, m)) for (k,v) in obj.items()])
elif type(obj) in [int, str, long, bool, types.NoneType]:
return obj
else:
print type(obj), obj
return None
def __init__(self, elf, pt, obj, typ = None, addFields = []):
assert(pt == None or isinstance(pt, BaseElfNode))
self._elf = elf
self._pt = pt
self._obj = obj
self._ptr = cast(self._obj, c_void_p).value
self._typ = typ
# All object's memoization cache points to the root elf file's memoization cache
if (isinstance(self, Elf)):
self._cache = {}
else:
while (not isinstance(pt, Elf)): pt = pt._pt
self._cache = pt._cache
self._fields = []
if self._typ != None:
self._fields += map(lambda x: x[0], self._typ._fields_)
self._fields += addFields
def _select(self, name): return select(self._elf, name)
def __getattr__(self, name):
cache = self._cache
key = (self._ptr, name)
if (key in cache):
return cache[key]
res = self._getattr_impl(name)
if (isinstance(res, types.GeneratorType)):
cache[key] = list(res)
else:
cache[key] = res
return res
def _getattr_impl(self, name):
try:
if (self._obj != None):
inner = self._obj.contents
else:
return 0
except AttributeError:
raise Exception("Can't access %s in %s - not a pointer" % \
(name, str(self._obj)))
return getattr(inner, name)
def _getelf(self):
p = self
while not isinstance(p, Elf):
p = p._pt
return p
def _class(self):
return pylibelf.util._class(self._elf)
def __dir__(self):
return self._fields
class ElfEhdr(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj,
Elf64_Ehdr if is64(elf) else Elf32_Ehdr, [])
class ElfShdr(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj,
Elf64_Shdr if is64(elf) else Elf32_Shdr, ['name'])
def _getattr_impl(self, name):
if (name == "name"):
return elf_strptr(self._elf, self._pt._pt.ehdr.e_shstrndx, self._obj.contents.sh_name)
else:
return BaseElfNode._getattr_impl(self, name)
class ElfSym(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj,
Elf64_Sym if is64(elf) else Elf32_Sym, ['name', 'section', 'defined', \
'contents', 'type', 'binding', 'targetScn', 'index'])
def _getattr_impl(self, name):
if (name == "name"):
return elf_strptr(self._elf, self._pt.shdr.sh_link, self._obj.contents.st_name)
elif (name == "section"):
return self._pt
elif (name == "defined"):
return self.st_shndx != SHN_UNDEF
elif (name == "type"):
if is64(self._elf):
return ELF64_ST_TYPE(self.st_info)
else:
return ELF32_ST_TYPE(self.st_info)
elif (name == "binding"):
if is64(self._elf):
return ELF64_ST_BIND(self.st_info)
else:
return ELF32_ST_BIND(self.st_info)
elif (name == "targetScn"):
return self._pt._pt.section(self.st_shndx)
elif (name == "contents"):
targetSec = self._pt._pt.section(self.st_shndx)
relas = []
for relaScn in targetSec.relaScns:
# [self.st_value ...
start = bisect_left(relaScn.relas, self.st_value)
# ... self.st_value + self.st_size)
end = bisect_left(relaScn.relas, self.st_value + self.st_size)
relas.extend(relaScn.relas[start:end])
# Testing only
#for r in relas:
# assert(r.r_offset >= self.st_value and r.r_offset < self.st_value + self.st_size)
#TODO: rels
rels = []
mem = targetSec.memInRange(self.st_value, self.st_size)
return (mem, rels, relas)
elif (name == "index"):
size = sizeof(self._typ)
ptr = cast(self._obj, c_voidp).value
ind = None
for d in self.section.data():
if d.d_buf <= ptr and d.d_buf + d.d_size > ptr:
assert (ptr - d.d_buf) % size == 0, "Misaligned symbol pointer %d in section %s" % \
(ptr, self.section.shdr.name)
ind = (ptr - d.d_buf) / size
assert ind != None, "Symbol not found in section!"
return ind
else:
return BaseElfNode._getattr_impl(self, name)
class ElfRela(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, \
Elf64_Rela if is64(elf) else Elf32_Rela, ['sym'])
def _getattr_impl(self, name):
if (name == "sym"):
elfO = self._getelf()
scn = elfO.section(self._pt.shdr.sh_link)
symInd = ELF64_R_SYM(self.r_info) if is64(self._elf) else \
ELF32_R_SYM(self.r_info)
return ElfSym(self._elf, scn, scn.sym(symInd)._obj)
else:
return BaseElfNode._getattr_impl(self, name)
def __cmp__(self, other):
if type(other) == long or type(other) == int:
if self.r_offset < other:
return -1
elif self.r_offset == other:
return 0
else:
return 1
raise Exception("NYI")
class ElfRel(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, \
Elf64_Rel if is64(elf) else Elf32_Rel, ['sym'])
def _getattr_impl(self, name):
if (name == "sym"):
elfO = self._getelf()
scn = elfO.section(self._pt.shdr.sh_link)
symInd = ELF64_R_SYM(self.r_info) if is64(self._elf) else \
ELF32_R_SYM(self.r_info)
return ElfSym(self._elf, scn, scn.sym(symInd)._obj)
else:
return BaseElfNode._getattr_impl(self, name)
class ElfData(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, Elf_Data, [])
class ElfArhdr(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, Elf_Arhdr, [])
class ElfScn(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, Elf_Scn,\
['index', 'shdr', 'link_scn', 'info_scn', 'syms', 'relas', 'relaScns', 'sym', 'data', 'memInRange',
'relasInRange', 'strAtAddr'])
def _getattr_impl(self, name):
if (name == "index"):
return elf_ndxscn(self._obj)
elif (name == "shdr"):
return ElfShdr(self._elf, self, select(self._elf, 'getshdr')(self._obj))
elif (name == "link_scn" and self.shdr.sh_link != SHN_UNDEF):
return ElfScn(self._elf, self._pt, elf_getscn(self._elf, \
self.shdr.sh_link))
elif (name == "info_scn" and (self.shdr.sh_type == SHT_REL or \
self.shdr.sh_type == SHT_RELA)):
return ElfScn(self._elf, self._pt, elf_getscn(self._elf, \
self.shdr.sh_info))
elif (name == "syms" and self.shdr.sh_type in [SHT_SYMTAB, SHT_DYNSYM]):
symT = Elf32_Sym if (is32(self._elf)) else Elf64_Sym
return reduce(lambda a,c: a+c, \
map(lambda d: map(lambda sym: ElfSym(self._elf, self, pointer(sym)), \
list(arr_iter(d, symT))), list(data(self._obj))))
elif (name == "relas" and self.shdr.sh_type == SHT_RELA):
relaT = Elf32_Rela if (is32(self._elf)) else Elf64_Rela
return reduce(lambda a,c: a+c, \
map(lambda d: map(lambda rela: ElfRela(self._elf, self, pointer(rela)),\
list(arr_iter(d, relaT))), list(data(self._obj))))
elif (name == "relaScns"):
return [s for s in self._pt.sections if s.shdr.sh_info == self.index\
and s.shdr.sh_type == SHT_RELA]
elif (name == "name"):
return self.shdr.name
else:
return BaseElfNode._getattr_impl(self, name)
def sym(self, ind):
shtype = self.shdr.sh_type
if shtype not in [SHT_SYMTAB, SHT_DYNSYM]:
raise Exception("Section %s does not contain symbols" % (self.shdr.name,))
return self.syms[ind]
def data(self):
d = None
while True:
d = elf_getdata(self._obj, d)
if not bool(d): break
yield ElfData(self._elf, self, d)
def memInRange(self, start, size):
r = ''
off = 0
base = self.shdr.sh_addr
end = start + size
for d in self.data():
if start >= end: break;
off = base + d.d_off
if start >= off and start < off + d.d_size:
c = cast(d.d_buf, POINTER(c_char))
l = min(off + d.d_size, end) - start
r += c[start- off : start - off + l]
start += l
return r
def relasInRange(self, start, size):
relas = []
for relaScn in self.relaScns:
# [self.st_value ...
start = bisect_left(relaScn.relas, start)
# ... self.st_value + self.st_size)
end = bisect_left(relaScn.relas, start + size)
relas.extend(relaScn.relas[start:end])
return relas
def strAtAddr(self, ptr):
r = ''
off = 0
base = self.shdr.sh_addr
start = ptr - base
for d in self.data():
off = d.d_off
c = cast(d.d_buf, POINTER(c_char))
while (start >= off and start < off + d.d_size):
if c[start] == '\x00':
break
r += c[start]
start += 1
return r
class Elf(BaseElfNode):
def __init__(self, elf, pt=None, claz = None):
if type(elf) == str:
self.fd = os.open(elf, os.O_RDONLY)
elf = elf_begin(self.fd, ELF_C_READ, None)
elif isinstance(elf, ElfP):
self.fd = None
else:
raise Exception("Invalid input to Elf.__init__(): %s" % (str(elf), ))
if claz != None:
self._class = claz
else:
self._class = pylibelf.util._class(elf)
BaseElfNode.__init__(self, elf, pt, elf, pylibelf.types.Elf, \
['ehdr', 'shstrndx', 'arhdr', 'sections', 'section', 'syms', 'findSym'])
self._symsMap = dict([
(sym.name, sym) for sym in self.syms()
])
self._secMap = dict([
(elf_ndxscn(s._obj), s) for s in self.sections
])
nullScn = ElfScn(self._elf, self, None)
self._secMap[0] = nullScn
def finalize(self):
elf_end(self._elf)
if self.fd != None:
os.close(self.fd)
def _getattr_impl(self, name):
if (name == "ehdr"):
return ElfEhdr(self._elf, self, self._select("getehdr")(self._elf))
elif (name == "shstrndx"):
return self.ehdr.e_shstrndx
elif (name == "arhdr"):
arhdr = elf_getarhdr(self._elf)
if (bool(arhdr)):
return ElfArhdr(self._elf, self, arhdr)
else:
raise AttributeError("Elf file doesn't have an arhdr")
elif (name == "sections"):
return [ ElfScn(self._elf, self, pointer(s)) for s in
sections(self._elf) ]
elif (name == "relasMap"):
return dict([(s.index, s.relas) \
for s in self.sections if s.shdr.sh_type == SHT_RELA])
else:
return BaseElfNode._getattr_impl(self, name)
def section(self, ind):
return self._secMap[ind]
def syms(self):
for scn in self.sections:
if scn.shdr.sh_type != SHT_SYMTAB and scn.shdr.sh_type != SHT_DYNSYM:
continue
for sym in syms(self._elf, scn._obj.contents):
yield ElfSym(self._elf, scn, pointer(sym[1]))
def findSym(self, name):
try:
return self._symsMap[name]
except:
return None
def deref(self, addr, size):
r = None
for s in self.sections:
# TODO(dbounov): Hack, due to .tbss overlapping other sections. Figure out correct way to deal with this.
if s.shdr.name == ".tbss":
continue
if _overlap(addr, addr+size - 1, s.shdr.sh_addr, s.shdr.sh_addr + s.shdr.sh_size - 1):
assert r == None # Currently support address ranges in a single section only
r = (s.memInRange(addr, size), [], s.relasInRange(addr, size) )
return r
class Ar:
def __init__(self, fname, claz):
self._fname = fname
self._class = claz
def elfs(self):
self.fd = os.open(self._fname, os.O_RDONLY)
ar = elf_begin(self.fd, ELF_C_READ, None)
while True:
e = elf_begin(self.fd, ELF_C_READ, ar)
if (not bool(e)): break
yield Elf(e, None, self._class)
elf_end(ar)
os.close(self.fd)
__all__ = [ 'BaseElfNode', 'ElfEhdr', 'ElfShdr', 'ElfSym', 'ElfRela', \
'ElfData', 'ElfArhdr', 'ElfScn', 'Elf', 'Ar' ]
| mit | -9,088,902,695,502,797,000 | 29.095238 | 111 | 0.583357 | false |
bburan/psiexperiment | psi/data/plots.py | 1 | 35393 | import logging
log = logging.getLogger(__name__)
import itertools
import importlib
from functools import partial
from collections import defaultdict
import numpy as np
import pandas as pd
import pyqtgraph as pg
from atom.api import (Unicode, Float, Tuple, Int, Typed, Property, Atom, Bool,
Enum, List, Dict, Callable, Value)
from enaml.application import deferred_call, timed_call
from enaml.colors import parse_color
from enaml.core.api import Looper, Declarative, d_, d_func
from enaml.qt.QtGui import QColor
from psi.util import SignalBuffer, ConfigurationException
from psi.core.enaml.api import load_manifests, PSIContribution
from psi.controller.calibration import util
from psi.context.context_item import ContextMeta
################################################################################
# Utility functions
################################################################################
def get_x_fft(fs, duration):
n_time = int(fs * duration)
freq = np.fft.rfftfreq(n_time, fs**-1)
return np.log10(freq)
def get_color_cycle(name):
module_name, cmap_name = name.rsplit('.', 1)
module = importlib.import_module(module_name)
cmap = getattr(module, cmap_name)
return itertools.cycle(cmap.colors)
def make_color(color):
if isinstance(color, tuple):
return QColor(*color)
elif isinstance(color, str):
return QColor(color)
else:
raise ValueError('Unknown color %r', color)
################################################################################
# Style mixins
################################################################################
class ColorCycleMixin(Declarative):
#: Define the pen color cycle. Can be a list of colors or a string
#: indicating the color palette to use in palettable.
pen_color_cycle = d_(Typed(object))
_plot_colors = Typed(dict)
def _make_plot_cycle(self):
if isinstance(self.pen_color_cycle, str):
cycle = get_color_cycle(self.pen_color_cycle)
else:
cycle = itertools.cycle(self.pen_color_cycle)
return defaultdict(lambda: next(cycle))
@d_func
def get_pen_color(self, key):
if self._plot_colors is None:
self._plot_colors = self._make_plot_cycle()
color = self._plot_colors[key]
if not isinstance(color, str):
return QColor(*color)
else:
return QColor(color)
def _observe_pen_color_cycle(self, event):
self._plot_colors = self._make_plot_cycle()
self.reset_plots()
def reset_plots(self):
raise NotImplementedError
################################################################################
# Supporting classes
################################################################################
class BaseDataRange(Atom):
container = Typed(object)
# Size of display window
span = Float(1)
# Delay before clearing window once data has "scrolled off" the window.
delay = Float(0)
# Current visible data range
current_range = Tuple(Float(), Float())
def add_source(self, source):
cb = partial(self.source_added, source=source)
source.add_callback(cb)
def _default_current_range(self):
return 0, self.span
def _observe_delay(self, event):
self._update_range()
def _observe_span(self, event):
self._update_range()
def _update_range(self):
raise NotImplementedError
class EpochDataRange(BaseDataRange):
max_duration = Float()
def source_added(self, data, source):
n = [len(d['signal']) for d in data]
max_duration = max(n) / source.fs
self.max_duration = max(max_duration, self.max_duration)
def _observe_max_duration(self, event):
self._update_range()
def _update_range(self):
self.current_range = 0, self.max_duration
class ChannelDataRange(BaseDataRange):
# Automatically updated. Indicates last "seen" time based on all data
# sources reporting to this range.
current_time = Float(0)
current_samples = Typed(defaultdict, (int,))
current_times = Typed(defaultdict, (float,))
def _observe_current_time(self, event):
self._update_range()
def _update_range(self):
low_value = (self.current_time//self.span)*self.span - self.delay
high_value = low_value+self.span
self.current_range = low_value, high_value
def add_event_source(self, source):
cb = partial(self.event_source_added, source=source)
source.add_callback(cb)
def source_added(self, data, source):
self.current_samples[source] += data.shape[-1]
self.current_times[source] = self.current_samples[source]/source.fs
self.current_time = max(self.current_times.values())
def event_source_added(self, data, source):
self.current_times[source] = data[-1][1]
self.current_time = max(self.current_times.values())
def create_container(children, x_axis=None):
container = pg.GraphicsLayout()
container.setSpacing(10)
# Add the x and y axes to the layout, along with the viewbox.
for i, child in enumerate(children):
container.addItem(child.y_axis, i, 0)
container.addItem(child.viewbox, i, 1)
if x_axis is not None:
container.addItem(x_axis, i+1, 1)
# Link the child viewboxes together
for child in children[1:]:
child.viewbox.setXLink(children[0].viewbox)
#children[0].viewbox.setXRange(0, 100, padding=0)
return container
################################################################################
# Pattern containers
################################################################################
class MultiPlotContainer(Looper, PSIContribution):
group = d_(Unicode())
containers = d_(Dict())
_workbench = Value()
selected_item = Value()
def refresh_items(self):
super().refresh_items()
if not self.iterable:
return
self.containers = {str(i): c[0].container for \
i, c in zip(self.iterable, self.items)}
load_manifests(self.items, self._workbench)
for item in self.items:
load_manifests(item, self._workbench)
load_manifests(item[0].children, self._workbench)
deferred_call(item[0].format_container)
################################################################################
# Containers (defines a shared set of containers across axes)
################################################################################
class BasePlotContainer(PSIContribution):
label = d_(Unicode())
container = Typed(pg.GraphicsWidget)
x_axis = Typed(pg.AxisItem)
base_viewbox = Property()
legend = Typed(pg.LegendItem)
def _default_container(self):
return create_container(self.children, self.x_axis)
def _default_legend(self):
legend = pg.LegendItem()
legend.setParentItem(self.container)
return legend
def _get_base_viewbox(self):
return self.children[0].viewbox
def _default_x_axis(self):
x_axis = pg.AxisItem('bottom')
x_axis.setGrid(64)
x_axis.linkToView(self.children[0].viewbox)
return x_axis
def update(self, event=None):
pass
def find(self, name):
for child in self.children:
if child.name == name:
return child
def format_container(self):
pass
def _reset_plots(self):
pass
class PlotContainer(BasePlotContainer):
x_min = d_(Float(0))
x_max = d_(Float(0))
def format_container(self):
# If we want to specify values relative to a psi context variable, we
# cannot do it when initializing the plots.
if (self.x_min != 0) or (self.x_max != 0):
self.base_viewbox.setXRange(self.x_min, self.x_max, padding=0)
def update(self, event=None):
deferred_call(self.format_container)
class BaseTimeContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same time-based X-axis
'''
data_range = Typed(BaseDataRange)
span = d_(Float(1))
delay = d_(Float(0.25))
def _default_container(self):
container = super()._default_container()
# Ensure that the x axis shows the planned range
self.base_viewbox.setXRange(0, self.span, padding=0)
self.data_range.observe('current_range', self.update)
return container
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Time', unitPrefix='sec.')
return x_axis
def update(self, event=None):
low, high = self.data_range.current_range
deferred_call(self.base_viewbox.setXRange, low, high, padding=0)
super().update()
class TimeContainer(BaseTimeContainer):
def _default_data_range(self):
return ChannelDataRange(container=self, span=self.span,
delay=self.delay)
def update(self, event=None):
for child in self.children:
child.update()
super().update()
class EpochTimeContainer(BaseTimeContainer):
def _default_data_range(self):
return EpochDataRange(container=self, span=self.span, delay=self.delay)
def format_log_ticks(values, scale, spacing):
values = 10**np.array(values).astype(np.float)
return ['{:.1f}'.format(v) for v in values]
class FFTContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same frequency-based X-axis
'''
freq_lb = d_(Float(5))
freq_ub = d_(Float(50000))
def _default_container(self):
container = super()._default_container()
self.base_viewbox.setXRange(np.log10(self.freq_lb),
np.log10(self.freq_ub),
padding=0)
return container
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Frequency (Hz)')
x_axis.logTickStrings = format_log_ticks
x_axis.setLogMode(True)
return x_axis
################################################################################
# ViewBox
################################################################################
class ViewBox(PSIContribution):
viewbox = Typed(pg.ViewBox)
y_axis = Typed(pg.AxisItem)
y_mode = d_(Enum('symmetric', 'upper'))
y_min = d_(Float(0))
y_max = d_(Float(0))
allow_zoom_y = d_(Bool(True))
allow_zoom_x = d_(Bool(False))
data_range = Property()
def _default_name(self):
return self.label
def _get_data_range(self):
return self.parent.data_range
def _default_y_axis(self):
y_axis = pg.AxisItem('left')
y_axis.setLabel(self.label)
y_axis.linkToView(self.viewbox)
y_axis.setGrid(64)
return y_axis
def _default_viewbox(self):
viewbox = pg.ViewBox(enableMenu=False)
viewbox.setMouseEnabled(x=False, y=True)
viewbox.setBackgroundColor('w')
if (self.y_min != 0) or (self.y_max != 0):
viewbox.disableAutoRange()
viewbox.setYRange(self.y_min, self.y_max)
for child in self.children:
for plot in child.get_plots():
viewbox.addItem(plot)
return viewbox
def update(self, event=None):
for child in self.children:
child.update()
def add_plot(self, plot, label=None):
self.viewbox.addItem(plot)
if label:
self.parent.legend.addItem(plot, label)
def plot(self, x, y, color='k', log_x=False, log_y=False, label=None,
kind='line'):
'''
Convenience function used by plugins
This is typically used in post-processing routines to add static plots
to existing view boxes.
'''
if log_x:
x = np.log10(x)
if log_y:
y = np.log10(y)
x = np.asarray(x)
y = np.asarray(y)
m = np.isfinite(x) & np.isfinite(y)
x = x[m]
y = y[m]
if kind == 'line':
item = pg.PlotCurveItem(pen=pg.mkPen(color))
elif kind == 'scatter':
item = pg.ScatterPlotItem(pen=pg.mkPen(color))
item.setData(x, y)
self.add_plot(item)
if label is not None:
self.parent.legend.addItem(item, label)
################################################################################
# Plots
################################################################################
class BasePlot(PSIContribution):
# Make this weak-referenceable so we can bind methods to Qt slots.
__slots__ = '__weakref__'
source_name = d_(Unicode())
source = Typed(object)
label = d_(Unicode())
def update(self, event=None):
pass
def _reset_plots(self):
pass
################################################################################
# Single plots
################################################################################
class SinglePlot(BasePlot):
pen_color = d_(Typed(object))
pen_width = d_(Float(0))
antialias = d_(Bool(False))
label = d_(Unicode())
pen = Typed(object)
plot = Typed(object)
def get_plots(self):
return [self.plot]
def _default_pen_color(self):
return 'black'
def _default_pen(self):
color = make_color(self.pen_color)
return pg.mkPen(color, width=self.pen_width)
def _default_name(self):
return self.source_name + '_plot'
class ChannelPlot(SinglePlot):
downsample = Int(0)
decimate_mode = d_(Enum('extremes', 'mean'))
_cached_time = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_channel_plot'
def _default_plot(self):
return pg.PlotCurveItem(pen=self.pen, antialias=self.antialias)
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_source(self.source)
self.parent.data_range.observe('span', self._update_time)
self.source.add_callback(self._append_data)
self.parent.viewbox.sigResized.connect(self._update_decimation)
self._update_time(None)
self._update_decimation(self.parent.viewbox)
def _update_time(self, event):
# Precompute the time array since this can be the "slow" point
# sometimes in computations
n = round(self.parent.data_range.span*self.source.fs)
self._cached_time = np.arange(n)/self.source.fs
self._update_decimation()
self._update_buffer()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs,
self.parent.data_range.span*2)
def _update_decimation(self, viewbox=None):
try:
width, _ = self.parent.viewbox.viewPixelSize()
dt = self.source.fs**-1
self.downsample = round(width/dt/2)
except Exception as e:
pass
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def update(self, event=None):
low, high = self.parent.data_range.current_range
data = self._buffer.get_range_filled(low, high, np.nan)
t = self._cached_time[:len(data)] + low
if self.downsample > 1:
t = t[::self.downsample]
if self.decimate_mode == 'extremes':
d_min, d_max = decimate_extremes(data, self.downsample)
t = t[:len(d_min)]
x = np.c_[t, t].ravel()
y = np.c_[d_min, d_max].ravel()
if x.shape == y.shape:
deferred_call(self.plot.setData, x, y, connect='pairs')
elif self.decimate_mode == 'mean':
d = decimate_mean(data, self.downsample)
t = t[:len(d)]
if t.shape == d.shape:
deferred_call(self.plot.setData, t, d)
else:
t = t[:len(data)]
deferred_call(self.plot.setData, t, data)
def _reshape_for_decimate(data, downsample):
# Determine the "fragment" size that we are unable to decimate. A
# downsampling factor of 5 means that we perform the operation in chunks of
# 5 samples. If we have only 13 samples of data, then we cannot decimate
# the last 3 samples and will simply discard them.
last_dim = data.ndim
offset = data.shape[-1] % downsample
if offset > 0:
data = data[..., :-offset]
shape = (len(data), -1, downsample) if data.ndim == 2 else (-1, downsample)
return data.reshape(shape)
def decimate_mean(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
data = _reshape_for_decimate(data, downsample).copy()
return data.mean(axis=-1)
def decimate_extremes(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
# Force a copy to be made, which speeds up min()/max(). Apparently min/max
# make a copy of a reshaped array before performing the operation, so we
# force it now so the copy only occurs once.
data = _reshape_for_decimate(data, downsample).copy()
return data.min(axis=-1), data.max(axis=-1)
class FFTChannelPlot(ChannelPlot):
time_span = d_(Float(1))
window = d_(Enum('hamming', 'flattop'))
_x = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_fft_plot'
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._append_data)
self.source.observe('fs', self._cache_x)
self._update_buffer()
self._cache_x()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs, self.time_span)
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def _cache_x(self, event=None):
if self.source.fs:
self._x = get_x_fft(self.source.fs, self.time_span)
def update(self, event=None):
if self._buffer.get_time_ub() >= self.time_span:
data = self._buffer.get_latest(-self.time_span, 0)
#psd = util.patodb(util.psd(data, self.source.fs, self.window))
psd = util.psd(data, self.source.fs, self.window)
spl = self.source.calibration.get_spl(self._x, psd)
deferred_call(self.plot.setData, self._x, spl)
class BaseTimeseriesPlot(SinglePlot):
rect_center = d_(Float(0.5))
rect_height = d_(Float(1))
fill_color = d_(Typed(object))
brush = Typed(object)
_rising = Typed(list, ())
_falling = Typed(list, ())
def _default_brush(self):
return pg.mkBrush(self.fill_color)
def _default_plot(self):
plot = pg.QtGui.QGraphicsPathItem()
plot.setPen(self.pen)
plot.setBrush(self.brush)
return plot
def update(self, event=None):
lb, ub = self.parent.data_range.current_range
current_time = self.parent.data_range.current_time
starts = self._rising
ends = self._falling
if len(starts) == 0 and len(ends) == 1:
starts = [0]
elif len(starts) == 1 and len(ends) == 0:
ends = [current_time]
elif len(starts) > 0 and len(ends) > 0:
if starts[0] > ends[0]:
starts = np.r_[0, starts]
if starts[-1] > ends[-1]:
ends = np.r_[ends, current_time]
try:
epochs = np.c_[starts, ends]
except ValueError as e:
log.exception(e)
log.warning('Unable to update %r, starts shape %r, ends shape %r',
self, starts, ends)
return
m = ((epochs >= lb) & (epochs < ub)) | np.isnan(epochs)
epochs = epochs[m.any(axis=-1)]
path = pg.QtGui.QPainterPath()
y_start = self.rect_center - self.rect_height*0.5
for x_start, x_end in epochs:
x_width = x_end-x_start
r = pg.QtCore.QRectF(x_start, y_start, x_width, self.rect_height)
path.addRect(r)
deferred_call(self.plot.setPath, path)
class EventPlot(BaseTimeseriesPlot):
event = d_(Unicode())
def _observe_event(self, event):
if self.event is not None:
self.parent.data_range.observe('current_time', self.update)
def _default_name(self):
return self.event + '_timeseries'
def _append_data(self, bound, timestamp):
if bound == 'start':
self._rising.append(timestamp)
elif bound == 'end':
self._falling.append(timestamp)
self.update()
class TimeseriesPlot(BaseTimeseriesPlot):
source_name = d_(Unicode())
source = Typed(object)
def _default_name(self):
return self.source_name + '_timeseries'
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_event_source(self.source)
self.parent.data_range.observe('current_time', self.update)
self.source.add_callback(self._append_data)
def _append_data(self, data):
for (etype, value) in data:
if etype == 'rising':
self._rising.append(value)
elif etype == 'falling':
self._falling.append(value)
################################################################################
# Group plots
################################################################################
class GroupMixin(ColorCycleMixin):
source = Typed(object)
group_meta = d_(Unicode())
groups = d_(Typed(ContextMeta))
group_names = d_(List())
#: Function that takes the epoch metadata and decides whether to accept it
#: for plotting. Useful to reduce the number of plots shown on a graph.
group_filter = d_(Callable())
#: Function that takes the epoch metadata and returns a key indicating
#: which group it should included in for plotting.
group_color_key = d_(Callable())
pen_width = d_(Int(0))
antialias = d_(Bool(False))
plots = Dict()
_data_cache = Typed(object)
_data_count = Typed(object)
_data_updated = Typed(object)
_data_n_samples = Typed(object)
_pen_color_cycle = Typed(object)
_plot_colors = Typed(object)
_x = Typed(np.ndarray)
n_update = d_(Int(1))
def _default_group_names(self):
return [p.name for p in self.groups.values]
def _default_group_filter(self):
return lambda key: True
def _default_group_color_key(self):
return lambda key: tuple(key[g] for g in self.group_names)
def get_pen_color(self, key):
kw_key = {n: k for n, k in zip(self.group_names, key)}
group_key = self.group_color_key(kw_key)
return super().get_pen_color(group_key)
def reset_plots(self):
# Clear any existing plots and reset color cycle
for plot in self.plots.items():
self.parent.viewbox.removeItem(plot)
self.plots = {}
self._data_cache = defaultdict(list)
self._data_count = defaultdict(int)
self._data_updated = defaultdict(int)
self._data_n_samples = defaultdict(int)
def _observe_groups(self, event):
self.groups.observe('values', self._update_groups)
self._update_groups()
def _update_groups(self, event=None):
self.reset_plots()
self.group_names = [p.name for p in self.groups.values]
if self.source is not None:
self.update()
def get_plots(self):
return []
def _make_new_plot(self, key):
log.info('Adding plot for key %r', key)
try:
pen_color = self.get_pen_color(key)
pen = pg.mkPen(pen_color, width=self.pen_width)
plot = pg.PlotCurveItem(pen=pen, antialias=self.antialias)
deferred_call(self.parent.viewbox.addItem, plot)
self.plots[key] = plot
except KeyError as key_error:
key = key_error.args[0]
m = f'Cannot update plot since a field, {key}, ' \
'required by the plot is missing.'
raise ConfigurationException(m) from key_error
def get_plot(self, key):
if key not in self.plots:
self._make_new_plot(key)
return self.plots[key]
class EpochGroupMixin(GroupMixin):
duration = Float()
def _y(self, epoch):
return np.mean(epoch, axis=0) if len(epoch) \
else np.full_like(self._x, np.nan)
def _update_duration(self, event=None):
self.duration = self.source.duration
def _epochs_acquired(self, epochs):
for d in epochs:
md = d['info']['metadata']
if self.group_filter(md):
signal = d['signal']
key = tuple(md[n] for n in self.group_names)
self._data_cache[key].append(signal)
self._data_count[key] += 1
# Track number of samples
n = max(self._data_n_samples[key], len(signal))
self._data_n_samples[key] = n
# Does at least one epoch need to be updated?
for key, count in self._data_count.items():
if count >= self._data_updated[key] + self.n_update:
n = max(self._data_n_samples.values())
self.duration = n / self.source.fs
self.update()
break
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._epochs_acquired)
self.source.observe('duration', self._update_duration)
self.source.observe('fs', self._cache_x)
self.observe('duration', self._cache_x)
self._reset_plots()
self._cache_x()
def update(self, event=None):
# Update epochs that need updating
todo = []
for key, count in list(self._data_count.items()):
if count >= self._data_updated[key] + self.n_update:
data = self._data_cache[key]
plot = self.get_plot(key)
y = self._y(data)
todo.append((plot.setData, self._x, y))
self._data_updated[key] = len(data)
def update():
for setter, x, y in todo:
setter(x, y)
deferred_call(update)
class GroupedEpochAveragePlot(EpochGroupMixin, BasePlot):
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.duration:
n_time = round(self.source.fs * self.duration)
self._x = np.arange(n_time)/self.source.fs
def _default_name(self):
return self.source_name + '_grouped_epoch_average_plot'
def _observe_source(self, event):
super()._observe_source(event)
if self.source is not None:
self.parent.data_range.add_source(self.source)
class GroupedEpochFFTPlot(EpochGroupMixin, BasePlot):
def _default_name(self):
return self.source_name + '_grouped_epoch_fft_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y = np.mean(epoch, axis=0) if epoch else np.full_like(self._x, np.nan)
return self.source.calibration.get_spl(self._x, util.psd(y, self.source.fs))
class GroupedEpochPhasePlot(EpochGroupMixin, BasePlot):
unwrap = d_(Bool(True))
def _default_name(self):
return self.source_name + '_grouped_epoch_phase_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y = np.mean(epoch, axis=0) if epoch else np.full_like(self._x, np.nan)
return util.phase(y, self.source.fs, unwrap=self.unwrap)
class StackedEpochAveragePlot(EpochGroupMixin, BasePlot):
_offset_update_needed = Bool(False)
def _make_new_plot(self, key):
super()._make_new_plot(key)
self._offset_update_needed = True
def _update_offsets(self, vb=None):
vb = self.parent.viewbox
height = vb.height()
n = len(self.plots)
for i, (_, plot) in enumerate(sorted(self.plots.items())):
offset = (i+1) * height / (n+1)
point = vb.mapToView(pg.Point(0, offset))
plot.setPos(0, point.y())
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.source.duration:
n_time = round(self.source.fs * self.source.duration)
self._x = np.arange(n_time)/self.source.fs
def update(self):
super().update()
if self._offset_update_needed:
deferred_call(self._update_offsets)
self._offset_update_needed = False
def _reset_plots(self):
#super()._reset_plots()
self.parent.viewbox \
.sigRangeChanged.connect(self._update_offsets)
self.parent.viewbox \
.sigRangeChangedManually.connect(self._update_offsets)
################################################################################
# Simple plotters
################################################################################
class ResultPlot(SinglePlot):
x_column = d_(Unicode())
y_column = d_(Unicode())
average = d_(Bool())
SYMBOL_MAP = {
'circle': 'o',
'square': 's',
'triangle': 't',
'diamond': 'd',
}
symbol = d_(Enum('circle', 'square', 'triangle', 'diamond'))
symbol_size = d_(Float(10))
symbol_size_unit = d_(Enum('screen', 'data'))
data_filter = d_(Callable())
_data_cache = Typed(list)
def _default_data_filter(self):
# By default, accept all data points
return lambda x: True
def _default_name(self):
return '.'.join((self.parent.name, self.source_name, 'result_plot',
self.x_column, self.y_column))
def _observe_source(self, event):
if self.source is not None:
self._data_cache = []
self.source.add_callback(self._data_acquired)
def _data_acquired(self, data):
update = False
for d in data:
if self.data_filter(d):
x = d[self.x_column]
y = d[self.y_column]
self._data_cache.append((x, y))
update = True
if update:
self.update()
def update(self, event=None):
if not self._data_cache:
return
x, y = zip(*self._data_cache)
x = np.array(x)
y = np.array(y)
if self.average:
d = pd.DataFrame({'x': x, 'y': y}).groupby('x')['y'].mean()
x = d.index.values
y = d.values
deferred_call(self.plot.setData, x, y)
def _default_plot(self):
symbol_code = self.SYMBOL_MAP[self.symbol]
color = QColor(self.pen_color)
pen = pg.mkPen(color, width=self.pen_width)
brush = pg.mkBrush(color)
plot = pg.PlotDataItem(pen=pen,
antialias=self.antialias,
symbol=symbol_code,
symbolSize=self.symbol_size,
symbolPen=pen,
symbolBrush=brush,
pxMode=self.symbol_size_unit=='screen')
deferred_call(self.parent.add_plot, plot, self.label)
return plot
class DataFramePlot(ColorCycleMixin, PSIContribution):
data = d_(Typed(pd.DataFrame))
x_column = d_(Unicode())
y_column = d_(Unicode())
grouping = d_(List(Unicode()))
_plot_cache = Dict()
SYMBOL_MAP = {
'circle': 'o',
'square': 's',
'triangle': 't',
'diamond': 'd',
}
symbol = d_(Enum('circle', 'square', 'triangle', 'diamond'))
symbol_size = d_(Float(10))
symbol_size_unit = d_(Enum('screen', 'data'))
pen_width = d_(Float(0))
antialias = d_(Bool(False))
def _default_name(self):
return '.'.join((self.parent.name, 'result_plot'))
def _observe_x_column(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_y_column(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_grouping(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_data(self, event):
if self.data is None:
return
if self.x_column not in self.data:
return
if self.y_column not in self.data:
return
todo = []
if self.grouping:
try:
for group, values in self.data.groupby(self.grouping):
if group not in self._plot_cache:
self._plot_cache[group] = self._default_plot(group)
x = values[self.x_column].values
y = values[self.y_column].values
i = np.argsort(x)
todo.append((self._plot_cache[group], x[i], y[i]))
except KeyError as e:
# This is likely triggered when grouping updates an analysis
# before it's ready.
log.warning(e)
return
else:
if None not in self._plot_cache:
self._plot_cache[None] = self._default_plot(None)
x = self.data[self.x_column].values
y = self.data[self.y_column].values
i = np.argsort(x)
todo.append((self._plot_cache[None], x[i], y[i]))
def update():
nonlocal todo
for plot, x, y in todo:
plot.setData(x, y)
deferred_call(update)
def _default_plot(self, group):
symbol_code = self.SYMBOL_MAP[self.symbol]
color = self.get_pen_color(group)
brush = pg.mkBrush(color)
pen = pg.mkPen(color, width=self.pen_width)
plot = pg.PlotDataItem(pen=pen,
antialias=self.antialias,
symbol=symbol_code,
symbolSize=self.symbol_size,
symbolPen=pen,
symbolBrush=brush,
pxMode=self.symbol_size_unit=='screen')
deferred_call(self.parent.add_plot, plot, self.label)
return plot
def reset_plots(self):
for plot in self._plot_cache.values():
deferred_call(self.parent.viewbox.removeItem, plot)
self._plot_cache = {}
def get_plots(self):
return list(self._plot_cache.values())
| mit | 3,576,317,524,640,551,400 | 30.685765 | 84 | 0.559376 | false |
sdemircan/editobj2 | field_qtopia.py | 1 | 9247 | # -*- coding: utf-8 -*-
# field_gtk.py
# Copyright (C) 2007-2008 Jean-Baptiste LAMY -- [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import editobj2
from editobj2.field import *
from editobj2.field import _WithButtonField, _RangeField, _ShortEnumField, _LongEnumField
import qt
class LineEdit(qt.QLineEdit):
def __init__(self, master, on_validate):
qt.QLineEdit.__init__(self, master)
self.on_validate = on_validate
self.connect(self, qt.SIGNAL("returnPressed()"), self.on_validate)
def focusOutEvent(self, e):
qt.QLineEdit.focusOutEvent(self, e)
self.on_validate()
class QtopiaField(MultiGUIField):
y_flags = 0
class QtopiaEntryField(QtopiaField, EntryField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = LineEdit(master.q, self.validate)
super(QtopiaEntryField, self).__init__(gui, master, o, attr, undo_stack)
self.timer = None
self.update()
def validate(self):
print "validate"
s = unicode(self.q.text())
if s != self.old_str:
self.old_str = s
self.set_value(s)
def update(self):
self.updating = 1
try:
self.old_str = self.get_value()
self.q.setText(self.old_str)
finally: self.updating = 0
class QtopiaIntField (QtopiaEntryField, IntField): pass # XXX no "spin-button" since they don't allow entering e.g. "1 + 2" as an integer !
class QtopiaFloatField (QtopiaEntryField, FloatField): pass
class QtopiaStringField(QtopiaEntryField, StringField): pass
class QtopiaPasswordField(QtopiaStringField, PasswordField):
def __init__(self, gui, master, o, attr, undo_stack):
QtopiaStringField.__init__(self, gui, master, o, attr, undo_stack)
self.q.setEchoMode(qt.QLineEdit.Password)
class QtopiaBoolField(QtopiaField, BoolField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QCheckBox(" ", master.q)
super(QtopiaBoolField, self).__init__(gui, master, o, attr, undo_stack)
self.update()
self.q.connect(self.q, qt.SIGNAL("stateChanged(int)"), self.validate)
def validate(self, state):
v = self.descr.get(self.o, self.attr)
if state == 1: self.q.setTristate(0)
elif state == 0:
if isinstance(v, int): self.set_value(0)
else: self.set_value(False)
else:
if isinstance(v, int): self.set_value(1)
else: self.set_value(True)
def update(self):
self.updating = 1
try:
v = self.descr.get(self.o, self.attr)
if v is introsp.NonConsistent:
self.q.setTristate(1)
self.q.setNoChange()
else:
self.q.setChecked(v)
finally:
self.updating = 0
class QtopiaProgressBarField(QtopiaField, ProgressBarField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QProgressBar(master.q)
super(ProgressBarField, self).__init__(gui, master, o, attr, undo_stack)
self.update()
def update(self):
v = self.get_value()
if v is introsp.NonConsistent: self.q.setTotalSteps(0)
else: self.q.setTotalSteps(100); self.q.setProgress(int(v * 100))
class QtopiaEditButtonField(QtopiaField, EditButtonField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QPushButton(editobj2.TRANSLATOR(u"Edit..."), master.q)
super(QtopiaEditButtonField, self).__init__(gui, master, o, attr, undo_stack)
self.q.setAutoDefault(0)
self.q.connect(self.q, qt.SIGNAL("clicked()"), self.on_click)
self.update()
def update(self):
self.q.setEnabled(not self.get_value() is None)
class Qtopia_WithButtonField(QtopiaField, _WithButtonField):
def __init__(self, gui, master, o, attr, undo_stack, Field, button_text, on_button):
self.q = qt.QHBox(master.q)
super(Qtopia_WithButtonField, self).__init__(gui, master, o, attr, undo_stack, Field, button_text, on_button)
button = qt.QPushButton(editobj2.TRANSLATOR(button_text), self.q)
button.setAutoDefault(0)
button.connect(button, qt.SIGNAL("clicked()"), on_button)
class QtopiaWithButtonStringField(QtopiaField, WithButtonStringField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QHBox(master.q)
super(QtopiaWithButtonStringField, self).__init__(gui, master, o, attr, undo_stack)
button = qt.QPushButton(editobj2.TRANSLATOR(self.button_text), self.q)
button.setAutoDefault(0)
button.connect(button, qt.SIGNAL("clicked()"), self.on_button)
class QtopiaFilenameField(QtopiaWithButtonStringField, FilenameField):
def on_button(self):
import editobj2.qtopia_file_chooser
editobj2.qtopia_file_chooser.ask_filename(self.string_field.set_value, self.string_field.get_value())
class QtopiaDirnameField(QtopiaWithButtonStringField, DirnameField):
def on_button(self):
import editobj2.qtopia_file_chooser
editobj2.qtopia_file_chooser.ask_dirname(self.string_field.set_value, self.string_field.get_value())
class QtopiaURLField(QtopiaWithButtonStringField, URLField):
def on_button(self):
import webbrowser
webbrowser.open_new(self.get_value())
class QtopiaTextField(QtopiaField, TextField):
y_flags = 1
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QMultiLineEdit(master.q)
super(QtopiaTextField, self).__init__(gui, master, o, attr, undo_stack)
self.q.connect(self.q, qt.SIGNAL("textChanged()"), self.validate)
self.update()
def validate(self):
s = unicode(self.q.text())
self.set_value(s)
def update(self):
self.updating = 1
try:
self.old_str = self.get_value()
if self.q.text() != self.old_str:
self.q.setText(self.old_str)
finally: self.updating = 0
class QtopiaObjectAttributeField(QtopiaField, ObjectAttributeField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QHBox(master.q)
super(QtopiaObjectAttributeField, self).__init__(gui, master, o, attr, undo_stack)
self.q.setFrameShape (qt.QFrame.Box)
self.q.setFrameShadow(qt.QFrame.Sunken)
self.q.setMargin(5)
class Qtopia_RangeField(QtopiaField, _RangeField):
def __init__(self, gui, master, o, attr, undo_stack, min, max, incr = 1):
self.q = qt.QHBox(master.q)
self.q.setSpacing(5)
self.label = qt.QLabel (self.q)
self.slider = qt.QSlider(min, max, 1, 0, qt.QSlider.Horizontal, self.q)
super(Qtopia_RangeField, self).__init__(gui, master, o, attr, undo_stack, min, max, incr)
self.slider.connect(self.slider, qt.SIGNAL("valueChanged(int)"), self.validate)
def validate(self, v):
self.set_value(v)
self.label.setText(str(v))
def update(self):
self.updating = 1
try:
v = self.get_value()
self.slider.setValue(v)
self.label.setText(str(v))
finally: self.updating = 0
class Qtopia_ShortEnumField(QtopiaField, _ShortEnumField):
def __init__(self, gui, master, o, attr, undo_stack, choices, value_2_enum = None, enum_2_value = None):
self.q = qt.QComboBox(master.q)
super(Qtopia_ShortEnumField, self).__init__(gui, master, o, attr, undo_stack, choices, value_2_enum, enum_2_value)
for choice in self.choice_keys: self.q.insertItem(choice)
self.update()
self.q.connect(self.q, qt.SIGNAL("activated(int)"), self.validate)
def validate(self, enum):
i = self.q.currentItem()
self.set_value(self.choices[self.choice_keys[i]])
def update(self):
self.updating = 1
try:
i = self.choice_2_index.get(self.get_value())
if not i is None: self.q.setCurrentItem(i)
else: self.q.setCurrentItem(-1)
finally: self.updating = 0
class Qtopia_LongEnumField(QtopiaField, _LongEnumField):
y_flags = 1
def __init__(self, gui, master, o, attr, undo_stack, choices, value_2_enum = None, enum_2_value = None):
self.q = qt.QListBox(master.q)
super(Qtopia_LongEnumField, self).__init__(gui, master, o, attr, undo_stack, choices, value_2_enum, enum_2_value)
for choice in self.choice_keys: self.q.insertItem(choice)
self.update()
self.q.connect(self.q, qt.SIGNAL("selectionChanged()"), self.validate)
def validate(self):
i = self.q.currentItem()
if i != self.i:
self.i = i
enum = self.choices[self.choice_keys[i]]
self.set_value(enum)
def update(self):
self.updating = 1
try:
self.q.clearSelection()
self.i = self.choice_2_index.get(self.get_value())
if not self.i is None:
self.q.setSelected(self.i, 1)
self.q.ensureCurrentVisible()
finally: self.updating = 0
| gpl-2.0 | 1,841,811,769,113,129,200 | 34.980545 | 141 | 0.667676 | false |
PhasesResearchLab/ESPEI | tests/test_error_functions.py | 1 | 17483 | # pylint: disable=redefined-outer-name
"""
Test different error functions as isolated units.
"""
from unittest import mock
import numpy as np
import pytest
import scipy.stats
from tinydb import where
from pycalphad import Database, Model, variables as v
from espei.paramselect import generate_parameters
from espei.error_functions import *
from espei.error_functions.equilibrium_thermochemical_error import calc_prop_differences
from .fixtures import datasets_db
from .testing_data import *
def test_activity_error(datasets_db):
"""Test that activity error returns a correct result"""
datasets_db.insert(CU_MG_EXP_ACTIVITY)
dbf = Database(CU_MG_TDB)
error = calculate_activity_error(dbf, ['CU','MG','VA'], list(dbf.phases.keys()), datasets_db, {}, {}, {})
assert np.isclose(error, -257.41020886970756, rtol=1e-6)
def test_subsystem_activity_probability(datasets_db):
"""Test binary Cr-Ni data produces the same probability regardless of whether the main system is a binary or ternary."""
datasets_db.insert(CR_NI_ACTIVITY)
dbf_bin = Database(CR_NI_TDB)
dbf_tern = Database(CR_FE_NI_TDB)
phases = list(dbf_tern.phases.keys())
# Truth
bin_prob = calculate_activity_error(dbf_bin, ['CR','NI','VA'], phases, datasets_db, {}, {}, {})
# Getting binary subsystem data explictly (from binary input)
prob = calculate_activity_error(dbf_tern, ['CR','NI','VA'], phases, datasets_db, {}, {}, {})
assert np.isclose(prob, bin_prob)
# Getting binary subsystem from ternary input
prob = calculate_activity_error(dbf_tern, ['CR', 'FE', 'NI', 'VA'], phases, datasets_db, {}, {}, {})
assert np.isclose(prob, bin_prob)
def test_non_equilibrium_thermochemical_error_with_multiple_X_points(datasets_db):
"""Multiple composition datapoints in a dataset for a mixing phase should be successful."""
datasets_db.insert(CU_MG_CPM_MIX_X_HCP_A3)
dbf = Database(CU_MG_TDB)
phases = list(dbf.phases.keys())
comps = ['CU', 'MG', 'VA']
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db)
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(error, -4061.119001241541, rtol=1e-6)
def test_non_equilibrium_thermochemical_error_with_multiple_T_points(datasets_db):
"""Multiple temperature datapoints in a dataset for a stoichiometric comnpound should be successful."""
datasets_db.insert(CU_MG_HM_MIX_T_CUMG2)
dbf = Database(CU_MG_TDB)
phases = list(dbf.phases.keys())
comps = ['CU', 'MG', 'VA']
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db)
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(error,-14.287293263253728, rtol=1e-6)
def test_non_equilibrium_thermochemical_error_with_multiple_T_X_points(datasets_db):
"""Multiple temperature and composition datapoints in a dataset for a mixing phase should be successful."""
datasets_db.insert(CU_MG_SM_MIX_T_X_FCC_A1)
dbf = Database(CU_MG_TDB)
phases = list(dbf.phases.keys())
comps = ['CU', 'MG', 'VA']
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db)
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(float(error), -3282497.2380024833, rtol=1e-6)
def test_non_equilibrium_thermochemical_error_for_mixing_entropy_error_is_excess_only(datasets_db):
"""Tests that error in mixing entropy data is excess only (the ideal part is removed)."""
# If this fails, make sure the ideal mixing contribution is removed.
phase_models = {
"components": ["AL", "B"],
"phases": {
"LIQUID" : {
"sublattice_model": [["AL", "B"]],
"sublattice_site_ratios": [1]
},
"FCC_A1" : {
"sublattice_model": [["AL", "B"]],
"sublattice_site_ratios": [1]
}
}
}
dataset_excess_mixing = {
"components": ["AL", "B"],
"phases": ["FCC_A1"],
"solver": {
"sublattice_site_ratios": [1],
"sublattice_occupancies": [[[0.5, 0.5]]],
"sublattice_configurations": [[["AL", "B"]]],
"mode": "manual"
},
"conditions": {
"P": 101325,
"T": 298.15
},
"output": "SM_MIX",
"values": [[[10]]],
"excluded_model_contributions": ["idmix"]
}
datasets_db.insert(dataset_excess_mixing)
dbf = generate_parameters(phase_models, datasets_db, 'SGTE91', 'linear')
assert dbf.elements == {'AL', 'B'}
assert set(dbf.phases.keys()) == {'LIQUID', 'FCC_A1'}
assert len(dbf._parameters.search(where('parameter_type') == 'L')) == 1
phases = list(dbf.phases.keys())
comps = list(dbf.elements)
# the error should be exactly 0 because we are only fitting to one point
# the dataset is excess only
zero_error_prob = scipy.stats.norm(loc=0, scale=0.2).logpdf(0.0) # SM weight = 0.2
# Explicitly pass parameters={} to not try fitting anything
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db, symbols_to_fit=[])
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(error, zero_error_prob, atol=1e-6)
def test_non_equilibrium_thermochemical_error_for_of_enthalpy_mixing(datasets_db):
"""Tests that error in mixing enthalpy data is calculated correctly"""
phase_models = {
"components": ["AL", "B"],
"phases": {
"LIQUID" : {
"sublattice_model": [["AL", "B"]],
"sublattice_site_ratios": [1]
},
"FCC_A1" : {
"sublattice_model": [["AL", "B"]],
"sublattice_site_ratios": [1]
}
}
}
dataset_excess_mixing = {
"components": ["AL", "B"],
"phases": ["FCC_A1"],
"solver": {
"sublattice_site_ratios": [1],
"sublattice_occupancies": [[[0.5, 0.5]]],
"sublattice_configurations": [[["AL", "B"]]],
"mode": "manual"
},
"conditions": {
"P": 101325,
"T": 298.15
},
"output": "HM_MIX",
"values": [[[10000]]],
"excluded_model_contributions": ["idmix"]
}
datasets_db.insert(dataset_excess_mixing)
dbf = generate_parameters(phase_models, datasets_db, 'SGTE91', 'linear')
assert dbf.elements == {'AL', 'B'}
assert set(dbf.phases.keys()) == {'LIQUID', 'FCC_A1'}
assert len(dbf._parameters.search(where('parameter_type') == 'L')) == 1
phases = list(dbf.phases.keys())
comps = list(dbf.elements)
# the error should be exactly 0 because we are only fitting to one point
# the dataset is excess only
zero_error_prob = scipy.stats.norm(loc=0, scale=500.0).logpdf(0.0) # HM weight = 500
# Explicitly pass parameters={} to not try fitting anything
thermochemical_data = get_thermochemical_data(dbf, comps, phases, datasets_db, symbols_to_fit=[])
error = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(error, zero_error_prob, atol=1e-6)
def test_subsystem_non_equilibrium_thermochemcial_probability(datasets_db):
"""Test binary Cr-Ni data produces the same probability regardless of whether the main system is a binary or ternary."""
datasets_db.insert(CR_NI_LIQUID_DATA)
dbf_bin = Database(CR_NI_TDB)
dbf_tern = Database(CR_FE_NI_TDB)
phases = list(dbf_tern.phases.keys())
# Truth
thermochemical_data = get_thermochemical_data(dbf_bin, ['CR', 'NI', 'VA'], phases, datasets_db)
bin_prob = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
# Getting binary subsystem data explictly (from binary input)
thermochemical_data = get_thermochemical_data(dbf_tern, ['CR', 'NI', 'VA'], phases, datasets_db)
prob = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(prob, bin_prob)
# Getting binary subsystem from ternary input
thermochemical_data = get_thermochemical_data(dbf_tern, ['CR', 'FE', 'NI', 'VA'], phases, datasets_db)
prob = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
assert np.isclose(prob, bin_prob)
def test_zpf_error_zero(datasets_db):
"""Test that sum of square ZPF errors returns 0 for an exactly correct result"""
datasets_db.insert(CU_MG_DATASET_ZPF_ZERO_ERROR)
dbf = Database(CU_MG_TDB)
comps = ['CU','MG','VA']
phases = list(dbf.phases.keys())
# ZPF weight = 1 kJ and there are two points in the tieline
zero_error_prob = 2 * scipy.stats.norm(loc=0, scale=1000.0).logpdf(0.0)
zpf_data = get_zpf_data(dbf, comps, phases, datasets_db, {})
error = calculate_zpf_error(zpf_data, np.array([]))
assert np.isclose(error, zero_error_prob, rtol=1e-6)
def test_subsystem_zpf_probability(datasets_db):
"""Test binary Cr-Ni data produces the same probability regardless of whether the main system is a binary or ternary."""
datasets_db.insert(CR_NI_ZPF_DATA)
dbf_bin = Database(CR_NI_TDB)
dbf_tern = Database(CR_FE_NI_TDB)
phases = list(dbf_tern.phases.keys())
# Truth
zpf_data = get_zpf_data(dbf_bin, ['CR', 'NI', 'VA'], phases, datasets_db, {})
bin_prob = calculate_zpf_error(zpf_data, np.array([]))
# Getting binary subsystem data explictly (from binary input)
zpf_data = get_zpf_data(dbf_tern, ['CR', 'NI', 'VA'], phases, datasets_db, {})
prob = calculate_zpf_error(zpf_data, np.array([]))
assert np.isclose(prob, bin_prob)
# Getting binary subsystem from ternary input
zpf_data = get_zpf_data(dbf_tern, ['CR', 'FE', 'NI', 'VA'], phases, datasets_db, {})
prob = calculate_zpf_error(zpf_data, np.array([]))
assert np.isclose(prob, bin_prob)
def test_zpf_error_species(datasets_db):
"""Tests that ZPF error works if a species is used."""
# Note that the liquid is stabilized by the species for the equilibrium
# used in the data. If the SPECIES is removed from the database (and LIQUID
# constituents), then the resulting likelihood will NOT match this (and be
# closer to 93, according to a test.)
datasets_db.insert(LI_SN_ZPF_DATA)
dbf = Database(LI_SN_TDB)
comps = ['LI', 'SN']
phases = list(dbf.phases.keys())
# ZPF weight = 1 kJ and there are two points in the tieline
zero_error_probability = 2 * scipy.stats.norm(loc=0, scale=1000.0).logpdf(0.0)
zpf_data = get_zpf_data(dbf, comps, phases, datasets_db, {})
exact_likelihood = calculate_zpf_error(zpf_data, approximate_equilibrium=False)
assert np.isclose(exact_likelihood, zero_error_probability)
approx_likelihood = calculate_zpf_error(zpf_data, approximate_equilibrium=True)
# accept higher tolerance for approximate
assert np.isclose(approx_likelihood, zero_error_probability, rtol=1e-4)
def test_zpf_error_equilibrium_failure(datasets_db):
"""Test that a target hyperplane producing NaN chemical potentials gives a driving force of zero."""
datasets_db.insert(CU_MG_DATASET_ZPF_NAN_EQUILIBRIUM)
dbf = Database(CU_MG_TDB)
comps = ['CU','MG','VA']
phases = list(dbf.phases.keys())
# ZPF weight = 1 kJ and there are two points in the tieline
zero_error_probability = 2 * scipy.stats.norm(loc=0, scale=1000.0).logpdf(0.0)
zpf_data = get_zpf_data(dbf, comps, phases, datasets_db, {})
with mock.patch('espei.error_functions.zpf_error.estimate_hyperplane', return_value=np.array([np.nan, np.nan])):
exact_likelihood = calculate_zpf_error(zpf_data)
assert np.isclose(exact_likelihood, zero_error_probability, rtol=1e-6)
approx_likelihood = calculate_zpf_error(zpf_data)
assert np.isclose(approx_likelihood, zero_error_probability, rtol=1e-6)
def test_zpf_error_works_for_stoichiometric_cmpd_tielines(datasets_db):
"""A stochimetric compound with approximate composition can be in the datasets and work"""
datasets_db.insert(CU_MG_DATASET_ZPF_STOICH_COMPOUND)
dbf = Database(CU_MG_TDB)
comps = ['CU','MG']
phases = list(dbf.phases.keys())
# ZPF weight = 1 kJ and there are two points in the tieline
zero_error_probability = 2 * scipy.stats.norm(loc=0, scale=1000.0).logpdf(0.0)
zpf_data = get_zpf_data(dbf, comps, phases, datasets_db, {})
exact_likelihood = calculate_zpf_error(zpf_data)
assert np.isclose(exact_likelihood, zero_error_probability, rtol=1e-6)
approx_likelihood = calculate_zpf_error(zpf_data)
assert np.isclose(approx_likelihood, zero_error_probability, rtol=1e-6)
def test_non_equilibrium_thermochemcial_species(datasets_db):
"""Test species work for non-equilibrium thermochemical data."""
datasets_db.insert(LI_SN_LIQUID_DATA)
dbf = Database(LI_SN_TDB)
phases = ['LIQUID']
thermochemical_data = get_thermochemical_data(dbf, ['LI', 'SN'], phases, datasets_db)
prob = calculate_non_equilibrium_thermochemical_probability(thermochemical_data)
# Near zero error and non-zero error
assert np.isclose(prob, (-7.13354663 + -22.43585011))
def test_equilibrium_thermochemcial_error_species(datasets_db):
"""Test species work for equilibrium thermochemical data."""
datasets_db.insert(LI_SN_LIQUID_EQ_DATA)
dbf = Database(LI_SN_TDB)
phases = list(dbf.phases.keys())
eqdata = get_equilibrium_thermochemical_data(dbf, ['LI', 'SN'], phases, datasets_db)
# Thermo-Calc
truth_values = np.array([0.0, -28133.588, -40049.995, 0.0])
# Approximate
errors_approximate, weights = calc_prop_differences(eqdata[0], np.array([]), True)
# Looser tolerances because the equilibrium is approximate, note that this is pdens dependent
assert np.all(np.isclose(errors_approximate, truth_values, atol=1e-5, rtol=1e-3))
# Exact
errors_exact, weights = calc_prop_differences(eqdata[0], np.array([]), False)
assert np.all(np.isclose(errors_exact, truth_values, atol=1e-5))
def test_equilibrium_thermochemical_error_unsupported_property(datasets_db):
"""Test that an equilibrium property that is not explictly supported will work."""
# This test specifically tests Curie temperature
datasets_db.insert(CR_NI_LIQUID_EQ_TC_DATA)
EXPECTED_VALUES = np.array([374.6625, 0.0, 0.0]) # the TC should be 374.6625 in both cases, but "values" are [0 and 382.0214], so the differences should be flipped.
dbf = Database(CR_NI_TDB)
phases = list(dbf.phases.keys())
eqdata = get_equilibrium_thermochemical_data(dbf, ['CR', 'NI'], phases, datasets_db)
errors_exact, weights = calc_prop_differences(eqdata[0], np.array([]))
assert np.all(np.isclose(errors_exact, EXPECTED_VALUES, atol=1e-3))
def test_equilibrium_thermochemical_error_computes_correct_probability(datasets_db):
"""Integration test for equilibrium thermochemical error."""
datasets_db.insert(CU_MG_EQ_HMR_LIQUID)
dbf = Database(CU_MG_TDB)
phases = list(dbf.phases.keys())
# Test that errors update in response to changing parameters
# no parameters
eqdata = get_equilibrium_thermochemical_data(dbf, ['CU', 'MG'], phases, datasets_db)
errors, weights = calc_prop_differences(eqdata[0], np.array([]))
expected_vals = [-31626.6*0.5*0.5]
assert np.all(np.isclose(errors, expected_vals))
# VV0017 (LIQUID, L0)
eqdata = get_equilibrium_thermochemical_data(dbf, ['CU', 'MG'], phases, datasets_db, parameters={'VV0017': -31626.6})
# unchanged, should be the same as before
errors, weights = calc_prop_differences(eqdata[0], np.array([-31626.6]))
assert np.all(np.isclose(errors, [-31626.6*0.5*0.5]))
# change to -40000
errors, weights = calc_prop_differences(eqdata[0], np.array([-40000], np.float_))
assert np.all(np.isclose(errors, [-40000*0.5*0.5]))
def test_driving_force_miscibility_gap(datasets_db):
datasets_db.insert(A_B_DATASET_ALPHA)
dbf = Database(A_B_REGULAR_SOLUTION_TDB)
parameters = {"L_ALPHA": None}
zpf_data = get_zpf_data(dbf, ["A", "B"], ["ALPHA"], datasets_db, parameters)
# probability for zero error error with ZPF weight = 1000.0
zero_error_prob = scipy.stats.norm(loc=0, scale=1000.0).logpdf(0.0)
# Ideal solution case
params = np.array([0.0])
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=False)
assert np.isclose(prob, zero_error_prob)
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=True)
assert np.isclose(prob, zero_error_prob)
# Negative interaction case
params = np.array([-10000.0])
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=False)
assert np.isclose(prob, zero_error_prob)
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=True)
assert np.isclose(prob, zero_error_prob)
# Miscibility gap case
params = np.array([10000.0])
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=False)
# Remember these are log probabilities, so more negative means smaller probability and larger error
assert prob < zero_error_prob
prob = calculate_zpf_error(zpf_data, parameters=params, approximate_equilibrium=True)
assert prob < zero_error_prob
| mit | 2,473,230,179,185,101,300 | 40.825359 | 169 | 0.669622 | false |
codeforfrankfurt/PolBotCheck | polbotcheck/word_cluster.py | 1 | 3812 | import json
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import db
import os
DATASET_PATH = os.environ['HOME'] + '/nltk_data/corpora/twitter_samples/tweets.20150430-223406.json'
def calc_frequencies(words, words_n=50, lang='german'):
words = [word for word in words if len(word) > 1]
words = [word for word in words if not word.isnumeric()]
words = [word.lower() for word in words]
# words = [word for word in words if word not in all_stopwords]
# Stemming words seems to make matters worse, disabled
# stemmer = nltk.stem.snowball.SnowballStemmer(lang)
# words = [stemmer.stem(word) for word in words]
fdist = nltk.FreqDist(words)
return fdist.most_common(words_n)
def get_word_clouds(tweets, users, words_n=50, lang='english'):
default_stopwords = set(nltk.corpus.stopwords.words(lang))
stopwords_file = '../data/stopwords.txt'
custom_stopwords = set(open(stopwords_file, 'r').read().splitlines())
all_stopwords = default_stopwords | custom_stopwords
vectorizer = TfidfVectorizer(max_df=0.5, min_df=2, stop_words=list(all_stopwords))
X = vectorizer.fit_transform(tweets)
terms = vectorizer.get_feature_names()
word_cloud_per_person = {}
for doc in range(len(tweets)):
feature_index = X[doc, :].nonzero()[1]
tfidf_scores = zip(feature_index, [X[doc, x] for x in feature_index])
doc_terms = []
for word, score in [(terms[i], score) for (i, score) in tfidf_scores]:
doc_terms.append((word, score))
important_terms = [(word, score) for word, score in sorted(doc_terms, key=lambda x: x[1], reverse=True)][:words_n]
word_cloud_per_person[users[doc]] = important_terms
return word_cloud_per_person
def save_wordcloud_image(frequencies, filename):
wordcloud = WordCloud(width=1024, height=786, min_font_size=1).fit_words(frequencies)
fig = plt.figure()
fig.set_figwidth(12)
fig.set_figheight(16)
plt.imshow(wordcloud)
plt.axis("off")
plt.savefig(filename, facecolor='k', bbox_inches='tight')
print('imaged created')
def load_example_data():
tweets = []
with open(DATASET_PATH) as f:
for line in f:
tweets.append(json.loads(line)['text'])
return tweets
def get_corpus_of_most_active_users(n_users=5):
tweets = []
texts = []
with open(DATASET_PATH) as f:
for line in f:
tweets.append(json.loads(line)['user']['screen_name'])
texts.append((json.loads(line)['user']['screen_name'], json.loads(line)['text']))
users = nltk.FreqDist(tweets).most_common(n_users)
dict = {}
for user, tweet in texts:
if user in dict:
dict[user] = " ".join([dict[user],tweet])
else:
dict[user] = tweet
corpus = [dict[name] for name, _ in users]
user_names = [name for name, _ in users]
return corpus, user_names
if __name__ == "__main__":
corpus, users = get_corpus_of_most_active_users()
word_cloud_per_person = get_word_clouds(corpus, users, words_n=100, lang='english')
for user in users:
topic_frequencies = word_cloud_per_person[user]
print user
print topic_frequencies
db.save_word_frequencies('test_user_seb', dict(topic_frequencies))
exit()
# save_wordcloud_image(dict(topic_frequencies), 'plots/word_clouds/' + user + '.png')
# This is an example how to save a word_cloud in the database
# user_in_db = 'malechanissen'
# db.save_word_frequencies(user_in_db, {'w3':10, 'w4':20})
# db.save_word_frequencies(user_in_db, dict(topic_frequencies))
# db.save_word_frequencies('test_user_seb', {'w3':10, 'w4':20})
| mit | -4,804,049,777,207,499,000 | 37.505051 | 122 | 0.651626 | false |
trentspi/PX8 | examples/plasma/plasma.py | 1 | 17516 | px8 / python cartridge
version 1
__python__
# Original code from rez
# https://www.lexaloffle.com/bbs/?tid=29529
SIZE = 128
A = None
cr = None
cg = None
cb = None
cw = None
def _init():
global SIZE, A, cr, cg, cb, cw
mode(SIZE, SIZE, 1)
cls()
A = SIZE - 1
cr = [0] * SIZE
cg = [0] * SIZE
cb = [0] * SIZE
cw = [0] * SIZE
for i in range(0, SIZE):
cr[i]=sget(i,0)
cg[i]=sget(i,1)
cb[i]=sget(i,2)
cw[i]=sget(i,3)
def _update():
pass
def _draw():
global A, cr, cg, cb, cw
a2 = A * 2
for x in range(3, SIZE, 3):
x2=x/2048
for y in range(3, SIZE, 3):
y2=y/1024
v1,v2=256+192*sin(y2+a2),sin(A-x2+y2)
r,g,b=56*cos(a2+x/v1+v2),48*sin((x+y)/v1*v2),40*cos((x*v2-y)/v1)
pset(x,y,cr[flr(56+r)])
pset(x+1,y,cg[flr(48-g)])
pset(x,y+1,cb[flr(40+b)])
pset(x+1,y+1,cw[flr(24-r+g)])
A+=0.0025
if A>1:
A=0
__gfx__
00000020202222424244448484888898989999a9a9aaaa7a7a7a9842000000002022424484889899a9aa7a7a7aa9a99898848442422020000002489a7a984200
0000001010111151515555d5d5ddddcdcdcccc6c6c6666767676cd510000000010115155d5ddcdcc6c667676766c6ccdcdd5d5515110100000015dc676cd5100
000000202022222252555535353333b3b3bbbb6b6b6666767676b35200000000202252553533b3bb6b667676766b6bb3b335355252202000000253b676b35200
0000000000000050505555d5d5ddddededeeeefefeffff7f7f7fed500000000000005055d5ddedeefeff7f7f7ffefeededd5d5505000000000005def7fed5000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
| mit | -8,687,394,390,623,362,000 | 94.715847 | 128 | 0.96232 | false |
memsharded/conan | conans/client/graph/graph_builder.py | 1 | 18724 | import time
from collections import OrderedDict
from conans.client.graph.graph import DepsGraph, Node, RECIPE_EDITABLE
from conans.errors import (ConanException, ConanExceptionInUserConanfileMethod,
conanfile_exception_formatter)
from conans.model.conan_file import get_env_context_manager
from conans.model.ref import ConanFileReference
from conans.model.requires import Requirements, Requirement
from conans.util.log import logger
class DepsGraphBuilder(object):
""" Responsible for computing the dependencies graph DepsGraph
"""
def __init__(self, proxy, output, loader, resolver, recorder):
self._proxy = proxy
self._output = output
self._loader = loader
self._resolver = resolver
self._recorder = recorder
def load_graph(self, root_node, check_updates, update, remotes, processed_profile):
check_updates = check_updates or update
dep_graph = DepsGraph()
# compute the conanfile entry point for this dependency graph
name = root_node.name
root_node.public_closure = OrderedDict([(name, root_node)])
root_node.public_deps = {name: root_node}
root_node.ancestors = set()
dep_graph.add_node(root_node)
# enter recursive computation
t1 = time.time()
self._load_deps(dep_graph, root_node, Requirements(), None, None,
check_updates, update, remotes,
processed_profile)
logger.debug("GRAPH: Time to load deps %s" % (time.time() - t1))
return dep_graph
def extend_build_requires(self, graph, node, build_requires_refs, check_updates, update,
remotes, processed_profile):
# The options that will be defined in the node will be the real options values that have
# been already propagated downstream from the dependency graph. This will override any
# other possible option in the build_requires dependency graph. This means that in theory
# an option conflict while expanding the build_requires is impossible
node.conanfile.build_requires_options.clear_unscoped_options()
new_options = node.conanfile.build_requires_options._reqs_options
new_reqs = Requirements()
conanfile = node.conanfile
scope = conanfile.display_name
requires = [Requirement(ref) for ref in build_requires_refs]
self._resolve_ranges(graph, requires, scope, update, remotes)
for require in requires:
name = require.ref.name
require.build_require = True
self._handle_require(name, node, require, graph, check_updates, update,
remotes, processed_profile, new_reqs, new_options)
new_nodes = set(n for n in graph.nodes if n.package_id is None)
# This is to make sure that build_requires have precedence over the normal requires
ordered_closure = list(node.public_closure.items())
ordered_closure.sort(key=lambda x: x[1] not in new_nodes)
node.public_closure = OrderedDict(ordered_closure)
subgraph = DepsGraph()
subgraph.aliased = graph.aliased
subgraph.evaluated = graph.evaluated
subgraph.nodes = new_nodes
for n in subgraph.nodes:
n.build_require = True
return subgraph
def _resolve_ranges(self, graph, requires, consumer, update, remotes):
for require in requires:
self._resolver.resolve(require, consumer, update, remotes)
# if the range is resolved, check if it is an alias
alias = graph.aliased.get(require.ref)
if alias:
require.ref = alias
def _resolve_deps(self, graph, node, update, remote_name):
# Resolve possible version ranges of the current node requirements
# new_reqs is a shallow copy of what is propagated upstream, so changes done by the
# RangeResolver are also done in new_reqs, and then propagated!
conanfile = node.conanfile
scope = conanfile.display_name
self._resolve_ranges(graph, conanfile.requires.values(), scope, update, remote_name)
if not hasattr(conanfile, "_conan_evaluated_requires"):
conanfile._conan_evaluated_requires = conanfile.requires.copy()
elif conanfile.requires != conanfile._conan_evaluated_requires:
raise ConanException("%s: Incompatible requirements obtained in different "
"evaluations of 'requirements'\n"
" Previous requirements: %s\n"
" New requirements: %s"
% (scope, list(conanfile._conan_evaluated_requires.values()),
list(conanfile.requires.values())))
def _load_deps(self, dep_graph, node, down_reqs, down_ref, down_options,
check_updates, update, remotes, processed_profile):
""" expands the dependencies of the node, recursively
param node: Node object to be expanded in this step
down_reqs: the Requirements as coming from downstream, which can overwrite current
values
param down_ref: ConanFileReference of who is depending on current node for this expansion
"""
# basic node configuration: calling configure() and requirements()
new_reqs, new_options = self._config_node(dep_graph, node, down_reqs, down_ref, down_options)
# if there are version-ranges, resolve them before expanding each of the requirements
self._resolve_deps(dep_graph, node, update, remotes)
# Expand each one of the current requirements
for name, require in node.conanfile.requires.items():
if require.override:
continue
self._handle_require(name, node, require, dep_graph, check_updates, update,
remotes, processed_profile, new_reqs, new_options)
def _handle_require(self, name, node, require, dep_graph, check_updates, update,
remotes, processed_profile, new_reqs, new_options):
# Handle a requirement of a node. There are 2 possibilities
# node -(require)-> new_node (creates a new node in the graph)
# node -(require)-> previous (creates a diamond with a previously existing node)
# If the required is found in the node ancestors a loop is being closed
# TODO: allow bootstrapping, use references instead of names
if name in node.ancestors or name == node.name:
raise ConanException("Loop detected: '%s' requires '%s' which is an ancestor too"
% (node.ref, require.ref))
# If the requirement is found in the node public dependencies, it is a diamond
previous = node.public_deps.get(name)
previous_closure = node.public_closure.get(name)
# build_requires and private will create a new node if it is not in the current closure
if not previous or ((require.build_require or require.private) and not previous_closure):
# new node, must be added and expanded (node -> new_node)
new_node = self._create_new_node(node, dep_graph, require, name, check_updates, update,
remotes, processed_profile)
# The closure of a new node starts with just itself
new_node.public_closure = OrderedDict([(new_node.ref.name, new_node)])
# The new created node is connected to the parent one
node.connect_closure(new_node)
if require.private or require.build_require:
# If the requirement is private (or build_require), a new public_deps is defined
# the new_node doesn't propagate downstream the "node" consumer, so its public_deps
# will be a copy of the node.public_closure, i.e. it can only cause conflicts in the
# new_node.public_closure.
new_node.public_deps = node.public_closure.copy()
new_node.public_deps[name] = new_node
else:
# Normal requires propagate and can conflict with the parent "node.public_deps" too
new_node.public_deps = node.public_deps.copy()
new_node.public_deps[name] = new_node
# All the dependents of "node" are also connected now to "new_node"
for dep_node in node.inverse_closure:
dep_node.connect_closure(new_node)
# RECURSION, keep expanding (depth-first) the new node
self._load_deps(dep_graph, new_node, new_reqs, node.ref, new_options, check_updates,
update, remotes, processed_profile)
else: # a public node already exist with this name
# This is closing a diamond, the node already exists and is reachable
alias_ref = dep_graph.aliased.get(require.ref)
# Necessary to make sure that it is pointing to the correct aliased
if alias_ref:
require.ref = alias_ref
# As we are closing a diamond, there can be conflicts. This will raise if conflicts
self._conflicting_references(previous.ref, require.ref, node.ref)
# Add current ancestors to the previous node and upstream deps
union = node.ancestors.union([node.name])
for n in previous.public_closure.values():
n.ancestors.update(union)
# Even if it was in private scope, if it is reached via a public require
# the previous node and its upstream becomes public
if previous.private and not require.private:
previous.make_public()
node.connect_closure(previous)
dep_graph.add_edge(node, previous, require.private, require.build_require)
# All the upstream dependencies (public_closure) of the previously existing node
# now will be also connected to the node and to all its dependants
for name, n in previous.public_closure.items():
if n.build_require or n.private:
continue
node.connect_closure(n)
for dep_node in node.inverse_closure:
dep_node.connect_closure(n)
# Recursion is only necessary if the inputs conflict with the current "previous"
# configuration of upstream versions and options
if self._recurse(previous.public_closure, new_reqs, new_options):
self._load_deps(dep_graph, previous, new_reqs, node.ref, new_options, check_updates,
update, remotes, processed_profile)
@staticmethod
def _conflicting_references(previous_ref, new_ref, consumer_ref=None):
if previous_ref.copy_clear_rev() != new_ref.copy_clear_rev():
if consumer_ref:
raise ConanException("Conflict in %s\n"
" Requirement %s conflicts with already defined %s\n"
" To change it, override it in your base requirements"
% (consumer_ref, new_ref, previous_ref))
return True
# Computed node, if is Editable, has revision=None
# If new_ref.revision is None we cannot assume any conflict, the user hasn't specified
# a revision, so it's ok any previous_ref
if previous_ref.revision and new_ref.revision and previous_ref.revision != new_ref.revision:
if consumer_ref:
raise ConanException("Conflict in %s\n"
" Different revisions of %s has been requested"
% (consumer_ref, new_ref))
return True
return False
def _recurse(self, closure, new_reqs, new_options):
""" For a given closure, if some requirements or options coming from downstream
is incompatible with the current closure, then it is necessary to recurse
then, incompatibilities will be raised as usually"""
for req in new_reqs.values():
n = closure.get(req.ref.name)
if n and self._conflicting_references(n.ref, req.ref):
return True
for pkg_name, options_values in new_options.items():
n = closure.get(pkg_name)
if n:
options = n.conanfile.options
for option, value in options_values.items():
if getattr(options, option) != value:
return True
return False
def _config_node(self, graph, node, down_reqs, down_ref, down_options):
""" update settings and option in the current ConanFile, computing actual
requirement values, cause they can be overridden by downstream requires
param settings: dict of settings values => {"os": "windows"}
"""
try:
conanfile, ref = node.conanfile, node.ref
# Avoid extra time manipulating the sys.path for python
with get_env_context_manager(conanfile, without_python=True):
if hasattr(conanfile, "config"):
if not ref:
conanfile.output.warn("config() has been deprecated."
" Use config_options and configure")
with conanfile_exception_formatter(str(conanfile), "config"):
conanfile.config()
with conanfile_exception_formatter(str(conanfile), "config_options"):
conanfile.config_options()
conanfile.options.propagate_upstream(down_options, down_ref, ref)
if hasattr(conanfile, "config"):
with conanfile_exception_formatter(str(conanfile), "config"):
conanfile.config()
with conanfile_exception_formatter(str(conanfile), "configure"):
conanfile.configure()
conanfile.settings.validate() # All has to be ok!
conanfile.options.validate()
# Update requirements (overwrites), computing new upstream
if hasattr(conanfile, "requirements"):
# If re-evaluating the recipe, in a diamond graph, with different options,
# it could happen that one execution path of requirements() defines a package
# and another one a different package raising Duplicate dependency error
# Or the two consecutive calls, adding 2 different dependencies for the two paths
# So it is necessary to save the "requires" state and restore it before a second
# execution of requirements(). It is a shallow copy, if first iteration is
# RequireResolve'd or overridden, the inner requirements are modified
if not hasattr(conanfile, "_conan_original_requires"):
conanfile._conan_original_requires = conanfile.requires.copy()
else:
conanfile.requires = conanfile._conan_original_requires.copy()
with conanfile_exception_formatter(str(conanfile), "requirements"):
conanfile.requirements()
new_options = conanfile.options.deps_package_values
if graph.aliased:
for req in conanfile.requires.values():
req.ref = graph.aliased.get(req.ref, req.ref)
new_down_reqs = conanfile.requires.update(down_reqs, self._output, ref, down_ref)
except ConanExceptionInUserConanfileMethod:
raise
except ConanException as e:
raise ConanException("%s: %s" % (ref or "Conanfile", str(e)))
except Exception as e:
raise ConanException(e)
return new_down_reqs, new_options
def _create_new_node(self, current_node, dep_graph, requirement, name_req,
check_updates, update, remotes, processed_profile, alias_ref=None):
""" creates and adds a new node to the dependency graph
"""
try:
result = self._proxy.get_recipe(requirement.ref, check_updates, update,
remotes, self._recorder)
except ConanException as e:
if current_node.ref:
self._output.error("Failed requirement '%s' from '%s'"
% (requirement.ref,
current_node.conanfile.display_name))
raise e
conanfile_path, recipe_status, remote, new_ref = result
dep_conanfile = self._loader.load_conanfile(conanfile_path, processed_profile,
ref=requirement.ref)
if recipe_status == RECIPE_EDITABLE:
dep_conanfile.in_local_cache = False
dep_conanfile.develop = True
if getattr(dep_conanfile, "alias", None):
alias_ref = alias_ref or new_ref.copy_clear_rev()
requirement.ref = ConanFileReference.loads(dep_conanfile.alias)
dep_graph.aliased[alias_ref] = requirement.ref
return self._create_new_node(current_node, dep_graph, requirement,
name_req, check_updates, update,
remotes, processed_profile,
alias_ref=alias_ref)
logger.debug("GRAPH: new_node: %s" % str(new_ref))
new_node = Node(new_ref, dep_conanfile)
new_node.revision_pinned = requirement.ref.revision is not None
new_node.recipe = recipe_status
new_node.remote = remote
# Ancestors are a copy of the parent, plus the parent itself
new_node.ancestors = current_node.ancestors.copy()
new_node.ancestors.add(current_node.name)
# build-requires and private affect transitively. If "node" is already
# a build_require or a private one, its requirements will inherit that property
# Or if the require specify that property, then it will get it too
new_node.build_require = current_node.build_require or requirement.build_require
new_node.private = current_node.private or requirement.private
dep_graph.add_node(new_node)
dep_graph.add_edge(current_node, new_node, requirement.private, requirement.build_require)
return new_node
| mit | 1,260,864,511,600,322,000 | 52.497143 | 101 | 0.605319 | false |
ianrenton/TelegraphFantasyFootballTeamPicker | telegraphpicker.py | 1 | 19193 | #!/usr/bin/python
# -*- coding: cp1252 -*-
# Telegraph Fantasy Football Team Picker
# version 1.2.1 (11 March 2011)
# by Ian Renton and Mark Harris
# For details, see http://www.onlydreaming.net/software/telegraph-fantasy-football-team-picker
# This code is released under the GPLv3 licence (http://www.gnu.org/licenses/gpl.html).
# Takes player data from the TFF website, and picks the optimum team based
# on players' past performance and current injuries.
import re
import datetime
print "Content-Type: text/html\n\n"
# Port of MATLAB's nchoosek (unique combination) function.
def nchoosek(items, n):
if n==0: yield []
else:
for (i, item) in enumerate(items):
for cc in nchoosek(items[i+1:],n-1):
yield [item]+cc
# Works out the position a given player number maps to.
def calculatePosition(number):
if ((number < 2000) & (number >= 1000)):
return "Goalkeeper"
elif ((number < 3000) & (number >= 2000)):
return "Defender"
elif ((number < 4000) & (number >= 3000)):
return "Midfielder"
elif ((number < 5000) & (number >= 4000)):
return "Striker"
def cutDownPlayerPointsHTML(html):
goalkeepersStart = re.compile("<div class='pla-list' id='list-GK'><table>").search(html)
goalkeepersEnd = re.compile("</table>").search(html[goalkeepersStart.start():len(html)])
goalkeepersText = html[goalkeepersStart.start():goalkeepersStart.start()+goalkeepersEnd.end()]
defendersStart = re.compile("<div class='pla-list' id='list-DEF'><table>").search(html)
defendersEnd = re.compile("</table>").search(html[defendersStart.start():len(html)])
defendersText = html[defendersStart.start():defendersStart.start()+defendersEnd.end()]
midfieldersStart = re.compile("<div class='pla-list' id='list-MID'><table>").search(html)
midfieldersEnd = re.compile("</table>").search(html[midfieldersStart.start():len(html)])
midfieldersText = html[midfieldersStart.start():midfieldersStart.start()+midfieldersEnd.end()]
strikersStart = re.compile("<div class='pla-list' id='list-STR'><table>").search(html)
strikersEnd = re.compile("</table>").search(html[strikersStart.start():len(html)])
strikersText = html[strikersStart.start():strikersStart.start()+strikersEnd.end()]
return goalkeepersText + defendersText + midfieldersText + strikersText
def extractFields(text):
textIndex = 0
arrayIndex = 0
interestingThings = []
while textIndex < len(text):
try:
# Extract data between <tr> and </tr>. This will get an individual player's line.
startPos = re.compile("<tr\s?[^>]*>").search(text[textIndex:len(text)])
endPos = re.compile("</tr>").search(text[textIndex+startPos.end():textIndex+startPos.start()+1000])
thisItem = text[textIndex+startPos.start():textIndex+startPos.end()+endPos.end()]
# Extract the ID field
idStartPos = re.compile("id=\'p").search(thisItem)
idEndPos = re.compile("\'").search(thisItem[idStartPos.end():len(thisItem)])
interestingThings.append(thisItem[idStartPos.end():idStartPos.end()+idEndPos.end()-1])
innerIndex = 0
while innerIndex < len(thisItem):
try:
# Extract data between <td> and </td>. This will get the individual cells.
innerStartPos = re.compile("<td>").search(thisItem[innerIndex:len(thisItem)])
innerEndPos = re.compile("</td>").search(thisItem[innerIndex+innerStartPos.end():len(thisItem)])
innerItem = thisItem[innerIndex+innerStartPos.end():innerIndex+innerStartPos.end()+innerEndPos.start()]
innerIndex = innerIndex + innerStartPos.end() + innerEndPos.end()
interestingThings.append(innerItem)
arrayIndex += 1
except:
break
textIndex = textIndex+startPos.end()+endPos.end()
except:
break
return interestingThings
class Player:
def __init__(self, row):
self.number = int(row[0])
self.name = row[1]
self.team = row[2]
self.points = int(row[3])
self.price = round(float(row[4]), 1)
self.value = self.points / self.price
self.position = calculatePosition(self.number)
def __str__(self):
return '<tr><td><p>%4s</p></td><td><p>%-20s</p></td><td><p>%-20s</p></td><td><p>%4s</p></td><td><p>%4s</p></td></tr>' % (self.number, self.name, self.team, self.price, self.points)
class TeamPicker:
def __init__(self):
self.process()
def set_initial_text(self):
# Print header
introText = "<h2>Optimum Telegraph Fantasy Football Team</h2><p style=\"font-weight:bold\">Generated on " + datetime.datetime.now().strftime("%A %d %B %Y at %H:%M:%S.") + "</p>"
introText = introText + "<p>Created using Telegraph Fantasy Football Team Picker, version 1.2.1 (11 March 2011), by Ian Renton and Mark Harris.<br>"
introText = introText + "For details and source code, see <a href=\"http://www.onlydreaming.net/software/telegraph-fantasy-football-team-picker\">http://www.onlydreaming.net/software/telegraph-fantasy-football-team-picker</a></p>"
self.displayUpdate(introText)
def displayUpdate(self, line):
self.f.write(line)
def process(self):
import urllib2
import re
from collections import defaultdict
try:
urllib2.urlopen('http://www.google.com')
except urllib2.URLError, e:
self.f = open('./output.html', 'w')
self.set_initial_text()
self.displayUpdate('<p style="font-weight:bold">Internet connection failed.</p>')
internetConnectionAvailable = False
else:
internetConnectionAvailable = True
if internetConnectionAvailable == True:
# Download the HTML file, and create a 'tmpData' list to contain the information.
try:
response = urllib2.urlopen('http://fantasyfootball.telegraph.co.uk/select-team/')
html = response.read()
except IOError, e:
self.f = open('./output.html', 'w')
self.set_initial_text()
self.displayUpdate('<p style="font-weight:bold">Could not find the player list, maybe the URL has changed?</p>')
return
else:
pass
else:
self.f = open('./output.html', 'w')
self.set_initial_text()
self.displayUpdate('<p style="font-weight:bold">Using a local mirror of the player list.</p>')
# Load the HTML file, and create a 'tmpData' list to contain the information.
try:
tmpFile = open("export.html","r")
html = tmpFile.read()
tmpFile.close()
except IOError, e:
self.f = open('./output.html', 'w')
self.set_initial_text()
self.displayUpdate('<p style="font-weight:bold">Cannot continue.</p>')
return
else:
pass
# Process the HTML into Players
fields = extractFields(cutDownPlayerPointsHTML(html))
tmpData = []
for i in range(len(fields)/7):
# If Points field is blank, replace it with a zero.
if (fields[i*7+5] == ""):
fields[i*7+5] = 0
# Add player (ID, name, club, points, price)
tmpData.append(Player([fields[i*7],fields[i*7+1],fields[i*7+2],fields[i*7+5],fields[i*7+3]]))
# Extra features if we have a net connection
if internetConnectionAvailable == True:
# Fetch injury list from PhysioRoom
response = urllib2.urlopen('http://www.physioroom.com/news/english_premier_league/epl_injury_table.php')
injuryList = response.read()
# Remove injured players
tmpData = filter(lambda player : re.compile(player.name).search(injuryList)==None, tmpData)
# Fetch transfer password from RichardSweeting.org
response = urllib2.urlopen('http://www.richardsweeting.org/pages/telegraph.html')
passwordPage = response.read()
# Find the Wednesday's date and the password.
try:
match = re.compile("<p style=\"padding-top: 0pt; \" class=\"paragraph_style_1\">[^\n]*\n").search(passwordPage)
match2 = re.compile("[^<]*<").search(passwordPage[match.start()+56:match.end()])
wednesday = passwordPage[match.start()+56:match.start()+56+match2.end()-1]
except:
wednesday = "???"
try:
match = re.compile("\*\*\* [A-Za-z]* \*\*\*").search(passwordPage)
password = passwordPage[match.start()+4:match.end()-4]
except:
password = "Unknown (Could not parse page, visit <a href=\"http://www.richardsweeting.org/pages/telegraph.html\">http://www.richardsweeting.org/pages/telegraph.html</a> to check manually.)"
transferPasswordInfo = "<p>Transfer password for %s: %s</p>" % (wednesday, password)
else:
pass
# Split data into four separate lists, one for each kind of player.
players = defaultdict(list)
for player in tmpData:
players[player.position].append(player)
# Produce a set of thresholds for VFM and overall price. This allows us to cut
# down the list of players to only those that are good value for money or
# particularly high-scoring. This mirrors human behaviour, where the user
# picks some very high-scoring (but expensive) players, then fills out the rest
# of the team with cheap but good-value players.
# These thresholds are necessary to reduce the number of players being considered,
# as otherwise the number of combinations that the script must consider would be
# too large for the script to run in sensible time.
thresholdDivisor = 1.6
sensibleDataSet = 0
while (sensibleDataSet == 0):
points = lambda player: player.points
valueForMoney = lambda player: player.value
pointThresholds = defaultdict(float)
valueThresholds = defaultdict(float)
for position in players.keys():
pointThresholds[position] = max(players[position], key=points).points / thresholdDivisor
valueThresholds[position] = max(players[position], key=valueForMoney).value / thresholdDivisor
# This section applies the thresholds calculated in the previous one, to cut down
# the number of players.
for position in players.keys():
players[position] = filter(lambda x : ((x.points > pointThresholds[position]) | (x.value > valueThresholds[position])), players[position])
# Using a function to pick unique combinations of players, we here form a list of
# all possible combinations: 1 2 3 4, 1 2 3 5, 1 2 3 6 and so on. Because there
# are multiple formations to choose from, we have to do this several times.
defenderChoices3 = list(nchoosek(players["Defender"],3))
defenderChoices4 = list(nchoosek(players["Defender"],4))
# Now the same for the midfielders.
midfielderChoices3 = list(nchoosek(players["Midfielder"],3))
midfielderChoices4 = list(nchoosek(players["Midfielder"],4))
midfielderChoices5 = list(nchoosek(players["Midfielder"],5))
# And now the same for the strikers.
strikerChoices1 = list(nchoosek(players["Striker"],1))
strikerChoices2 = list(nchoosek(players["Striker"],2))
strikerChoices3 = list(nchoosek(players["Striker"],3))
# If we have too many iterations to be possible in sensible time, go back and reduce
# thresholdDivisor until we have something sensible. Assume the 442 formation is pretty representative.
totalIterations = len(defenderChoices4) * len(midfielderChoices4) * len(strikerChoices2)
print thresholdDivisor
print totalIterations
if (totalIterations <= 3000000):
sensibleDataSet = 1
else:
n = 0.1
if (thresholdDivisor < 2.8):
n = 0.05
if (thresholdDivisor < 1.8):
n = 0.05
if (thresholdDivisor < 1.3):
n = 0.025
thresholdDivisor = thresholdDivisor - n
# To reduce the number of combinations, we just pick the one goalkeeper
# who provides best value for money rather than searching through them all.
players["Goalkeeper"].sort(lambda x, y: cmp(y.value, x.value))
goalkeeper = players["Goalkeeper"][0]
# For each combination of defenders, we calculate their combined price
# and combined points totals.
# Create two functions that, given a list of permutations of players, will return a list of prices of those players in the same order.
# Er... I guess if you're not up on your functional programming, this must look a bit hideous...
prices = lambda permutations: reduce(lambda total, player: total + player.price, permutations, 0)
points = lambda permutations: reduce(lambda total, player: total + player.points, permutations, 0)
#Sorry! Having those simplifies the next bit dramatically though:
defChoicesPrice3 = map(prices, defenderChoices3)
defChoicesPoints3 = map(points, defenderChoices3)
defChoicesPrice4 = map(prices, defenderChoices4)
defChoicesPoints4 = map(points, defenderChoices4)
# Same for the midfielders.
midChoicesPrice3 = map(prices, midfielderChoices3)
midChoicesPoints3 = map(points, midfielderChoices3)
midChoicesPrice4 = map(prices, midfielderChoices4)
midChoicesPoints4 = map(points, midfielderChoices4)
midChoicesPrice5 = map(prices, midfielderChoices5)
midChoicesPoints5 = map(points, midfielderChoices5)
# Same for the strikers.
strChoicesPrice1 = map(prices, strikerChoices1)
strChoicesPoints1 = map(points, strikerChoices1)
strChoicesPrice2 = map(prices, strikerChoices2)
strChoicesPoints2 = map(points, strikerChoices2)
strChoicesPrice3 = map(prices, strikerChoices3)
strChoicesPoints3 = map(points, strikerChoices3)
# Now we iterate through all possible choices for defenders, midfielders and
# strikers. In each case, we check to see if this set is better than the one
# before, and if so we record it. First, the 442 team.
bestTotalPoints = 0
bestChoices = []
bestFormation = 0
maxPrice = 50 - goalkeeper.price
# 442
for (i, defs) in enumerate(defenderChoices4):
for (j, mids) in enumerate(midfielderChoices4):
for (k, strs) in enumerate(strikerChoices2):
if ((defChoicesPrice4[i] + midChoicesPrice4[j] + strChoicesPrice2[k]) <= maxPrice):
teamPoints = (defChoicesPoints4[i] + midChoicesPoints4[j] + strChoicesPoints2[k])
if (teamPoints > bestTotalPoints):
bestTotalPoints = teamPoints
(bestDefs, bestMids, bestStrs) = (defs, mids, strs)
# 433
for (i, defs) in enumerate(defenderChoices4):
for (j, mids) in enumerate(midfielderChoices3):
for (k, strs) in enumerate(strikerChoices3):
if ((defChoicesPrice4[i] + midChoicesPrice3[j] + strChoicesPrice3[k]) <= maxPrice):
teamPoints = defChoicesPoints4[i] + midChoicesPoints3[j] + strChoicesPoints3[k]
if (teamPoints > bestTotalPoints):
bestTotalPoints = teamPoints
(bestDefs, bestMids, bestStrs) = (defs, mids, strs)
# 451
for (i, defs) in enumerate(defenderChoices4):
for (j, mids) in enumerate(midfielderChoices5):
for (k, strs) in enumerate(strikerChoices1):
if ((defChoicesPrice4[i] + midChoicesPrice5[j] + strChoicesPrice1[k]) <= maxPrice):
teamPoints = defChoicesPoints4[i] + midChoicesPoints5[j] + strChoicesPoints1[k]
if (teamPoints > bestTotalPoints):
bestTotalPoints = teamPoints
(bestDefs, bestMids, bestStrs) = (defs, mids, strs)
# 352
for (i, defs) in enumerate(defenderChoices3):
for (j, mids) in enumerate(midfielderChoices5):
for (k, strs) in enumerate(strikerChoices2):
if ((defChoicesPrice3[i] + midChoicesPrice5[j] + strChoicesPrice2[k]) <= maxPrice):
teamPoints = defChoicesPoints3[i] + midChoicesPoints5[j] + strChoicesPoints2[k]
if (teamPoints > bestTotalPoints):
bestTotalPoints = teamPoints
(bestDefs, bestMids, bestStrs) = (defs, mids, strs)
# Calculate optimum team's total price.
bestTotalPrice = goalkeeper.price
for p in bestDefs:
bestTotalPrice += p.price
for p in bestMids:
bestTotalPrice += p.price
for p in bestStrs:
bestTotalPrice += p.price
# Print the optimum team's details.
self.f = open('./output.html', 'w')
self.set_initial_text()
self.displayUpdate('<table width="500px" border="1" cellspacing="2">')
self.displayUpdate('<tr><td><p><b>ID</b></p></td><td><p><b>Name</b></p></td><td><p><b>Club</b></p></td><td><p><b>Price</b></p></td><td><p><b>Points</b></p></td></tr>')
self.displayUpdate('<tr><td colspan=5><p><b>Goalkeeper</b></p></td></tr>')
self.displayUpdate( str(goalkeeper))
self.displayUpdate('<tr><td colspan=5><p><b>Defenders</b></p></td></tr>')
self.displayUpdate( ''.join(map(str, bestDefs)))
self.displayUpdate('<tr><td colspan=5><p><b>Midfielders</b></p></td></tr>')
self.displayUpdate(''.join(map(str, bestMids)))
self.displayUpdate('<tr><td colspan=5><p><b>Strikers</b></p></td></tr>')
self.displayUpdate(''.join(map(str, bestStrs)))
self.displayUpdate('<tr><td colspan=3><p><b>Total</b></p></td><td><p><b>%4s</b></p></td><td><p><b>%4s</b></p></td></tr>' % (bestTotalPrice, bestTotalPoints))
self.displayUpdate('</table>')
self.displayUpdate(transferPasswordInfo)
self.f.close()
print "<p><a href=\"output.html\">output.html</a> successfully generated.</p>"
return 0
teampicker = TeamPicker()
| bsd-2-clause | -450,705,566,761,338,500 | 48.981771 | 238 | 0.602355 | false |
H-Software/Zabbix-II | zabbix-templates/ibm-storwize-perf/scripts/svc_perf_discovery_sender_zabbix.py | 1 | 4012 | #!/usr/bin/python
# -*- coding: utf-8 -*- # coding: utf-8
#
# IBM Storwize V7000 autodiscovery script for Zabbix
#
# 2013 Matvey Marinin
#
# Sends volume/mdisk/pool LLD JSON data to LLD trapper items "svc.discovery.<volume-mdisk|volume|mdisk|pool>"
# Use with "_Special_Storwize_Perf" Zabbix template
#
# See also http://www.zabbix.com/documentation/2.0/manual/discovery/low_level_discovery
#
# Usage:
# svc_perf_discovery_sender.py [--debug] --clusters <svc1>[,<svc2>...] --user <username> --password <pwd>
#
# --debug = Enable debug output
# --clusters = Comma-separated Storwize node list
# --user = Storwize V7000 user account with Administrator role (it seems that Monitor role is not enough)
# --password = User password
#
import pywbem
import getopt, sys
from zbxsend import Metric, send_to_zabbix
import logging
def usage():
print >> sys.stderr, "Usage: svc_perf_discovery_sender_zabbix.py [--debug] --clusters <svc1>[,<svc2>...] --user <username> --password <pwd> --discovery-types <type1>,[type2]"
print >> sys.stderr, "Discovery types: 'volume-mdisk','volume','mdisk','pool'"
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "-h", ["help", "clusters=", "user=", "password=", "debug", "discovery-types="])
except getopt.GetoptError, err:
print >> sys.stderr, str(err)
usage()
sys.exit(2)
debug = False
clusters = []
DISCOVERY_TYPES = []
user = None
password = None
for o, a in opts:
if o == "--clusters" and not a.startswith('--'):
clusters.extend( a.split(','))
elif o == "--user" and not a.startswith('--'):
user = a
elif o == "--password" and not a.startswith('--'):
password = a
elif o == "--debug":
debug = True
elif o == "--discovery-types":
DISCOVERY_TYPES.extend( a.split(','))
elif o in ("-h", "--help"):
usage()
sys.exit()
if not clusters:
print >> sys.stderr, '--clusters option must be set'
usage()
sys.exit(2)
if not DISCOVERY_TYPES:
print >> sys.stderr, '--discovery-types option must be set'
usage()
sys.exit(2)
if not user or not password:
print >> sys.stderr, '--user and --password options must be set'
usage()
sys.exit(2)
def debug_print(message):
if debug:
print message
for cluster in clusters:
debug_print('Connecting to: %s' % cluster)
conn = pywbem.WBEMConnection('https://'+cluster, (user, password), 'root/ibm')
conn.debug = True
for discovery in DISCOVERY_TYPES:
output = []
if discovery == 'volume-mdisk' or discovery == 'volume':
for vol in conn.ExecQuery('WQL', 'select DeviceID, ElementName from IBMTSSVC_StorageVolume'):
output.append( '{"{#TYPE}":"%s", "{#NAME}":"%s", "{#ID}":"%s"}' % ('volume', vol.properties['ElementName'].value, vol.properties['DeviceID'].value) )
if discovery == 'volume-mdisk' or discovery == 'mdisk':
for mdisk in conn.ExecQuery('WQL', 'select DeviceID, ElementName from IBMTSSVC_BackendVolume'):
output.append( '{"{#TYPE}":"%s", "{#NAME}":"%s", "{#ID}":"%s"}' % ('mdisk', mdisk.properties['ElementName'].value, mdisk.properties['DeviceID'].value) )
if discovery == 'pool':
for pool in conn.ExecQuery('WQL', 'select PoolID, ElementName from IBMTSSVC_ConcreteStoragePool'):
output.append( '{"{#TYPE}":"%s","{#NAME}":"%s","{#ID}":"%s"}' % ('pool', pool.properties['ElementName'].value, pool.properties['PoolID'].value) )
json = []
json.append('{"data":[')
for i, v in enumerate( output ):
if i < len(output)-1:
json.append(v+',')
else:
json.append(v)
json.append(']}')
json_string = ''.join(json)
print(json_string)
trapper_key = 'svc.discovery.%s' % discovery
debug_print('Sending to host=%s, key=%s' % (cluster, trapper_key))
#send json to LLD trapper item with zbxsend module
if debug:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
send_to_zabbix([Metric(cluster, trapper_key, json_string)], 'localhost', 10051)
debug_print('')
| gpl-2.0 | 5,975,721,742,574,174,000 | 31.617886 | 176 | 0.639083 | false |
rven/odoo | odoo/fields.py | 1 | 165522 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" High-level objects for fields. """
from collections import defaultdict
from datetime import date, datetime, time
from operator import attrgetter
from xmlrpc.client import MAXINT
import itertools
import logging
import base64
import binascii
import pytz
import psycopg2
from .tools import (
float_repr, float_round, float_compare, float_is_zero, html_sanitize, human_size,
pg_varchar, ustr, OrderedSet, pycompat, sql, date_utils, unique, IterableGenerator,
image_process, merge_sequences,
)
from .tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from .tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
from .tools.translate import html_translate, _
from .tools.mimetypes import guess_mimetype
from odoo.exceptions import CacheMiss
DATE_LENGTH = len(date.today().strftime(DATE_FORMAT))
DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT))
# hacky-ish way to prevent access to a field through the ORM (except for sudo mode)
NO_ACCESS='.'
IR_MODELS = (
'ir.model', 'ir.model.data', 'ir.model.fields', 'ir.model.fields.selection',
'ir.model.relation', 'ir.model.constraint', 'ir.module.module',
)
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__[:-7] + '.schema')
Default = object() # default value for __init__() methods
def first(records):
""" Return the first record in ``records``, with the same prefetching. """
return next(iter(records)) if len(records) > 1 else records
def resolve_mro(model, name, predicate):
""" Return the list of successively overridden values of attribute ``name``
in mro order on ``model`` that satisfy ``predicate``. Model classes
(the ones that appear in the registry) are ignored.
"""
result = []
for cls in model._model_classes:
value = cls.__dict__.get(name, Default)
if value is Default:
continue
if not predicate(value):
break
result.append(value)
return result
class MetaField(type):
""" Metaclass for field classes. """
by_type = {}
def __init__(cls, name, bases, attrs):
super(MetaField, cls).__init__(name, bases, attrs)
if not hasattr(cls, 'type'):
return
if cls.type and cls.type not in MetaField.by_type:
MetaField.by_type[cls.type] = cls
# compute class attributes to avoid calling dir() on fields
cls.related_attrs = []
cls.description_attrs = []
for attr in dir(cls):
if attr.startswith('_related_'):
cls.related_attrs.append((attr[9:], attr))
elif attr.startswith('_description_'):
cls.description_attrs.append((attr[13:], attr))
_global_seq = iter(itertools.count())
class Field(MetaField('DummyField', (object,), {})):
"""The field descriptor contains the field definition, and manages accesses
and assignments of the corresponding field on records. The following
attributes may be provided when instanciating a field:
:param str string: the label of the field seen by users; if not
set, the ORM takes the field name in the class (capitalized).
:param str help: the tooltip of the field seen by users
:param invisible: whether the field is invisible (boolean, by default ``False``)
:param bool readonly: whether the field is readonly (default: ``False``)
This only has an impact on the UI. Any field assignation in code will work
(if the field is a stored field or an inversable one).
:param bool required: whether the value of the field is required (default: ``False``)
:param bool index: whether the field is indexed in database. Note: no effect
on non-stored and virtual fields. (default: ``False``)
:param default: the default value for the field; this is either a static
value, or a function taking a recordset and returning a value; use
``default=None`` to discard default values for the field
:type default: value or callable
:param dict states: a dictionary mapping state values to lists of UI attribute-value
pairs; possible attributes are: ``readonly``, ``required``, ``invisible``.
.. warning:: Any state-based condition requires the ``state`` field value to be
available on the client-side UI. This is typically done by including it in
the relevant views, possibly made invisible if not relevant for the
end-user.
:param str groups: comma-separated list of group xml ids (string); this
restricts the field access to the users of the given groups only
:param bool company_dependent: whether the field value is dependent of the current company;
The value isn't stored on the model table. It is registered as `ir.property`.
When the value of the company_dependent field is needed, an `ir.property`
is searched, linked to the current company (and current record if one property
exists).
If the value is changed on the record, it either modifies the existing property
for the current record (if one exists), or creates a new one for the current company
and res_id.
If the value is changed on the company side, it will impact all records on which
the value hasn't been changed.
:param bool copy: whether the field value should be copied when the record
is duplicated (default: ``True`` for normal fields, ``False`` for
``one2many`` and computed fields, including property fields and
related fields)
:param bool store: whether the field is stored in database
(default:``True``, ``False`` for computed fields)
:param str group_operator: aggregate function used by :meth:`~odoo.models.Model.read_group`
when grouping on this field.
Supported aggregate functions are:
* ``array_agg`` : values, including nulls, concatenated into an array
* ``count`` : number of rows
* ``count_distinct`` : number of distinct rows
* ``bool_and`` : true if all values are true, otherwise false
* ``bool_or`` : true if at least one value is true, otherwise false
* ``max`` : maximum value of all values
* ``min`` : minimum value of all values
* ``avg`` : the average (arithmetic mean) of all values
* ``sum`` : sum of all values
:param str group_expand: function used to expand read_group results when grouping on
the current field.
.. code-block:: python
@api.model
def _read_group_selection_field(self, values, domain, order):
return ['choice1', 'choice2', ...] # available selection choices.
@api.model
def _read_group_many2one_field(self, records, domain, order):
return records + self.search([custom_domain])
.. rubric:: Computed Fields
:param str compute: name of a method that computes the field
.. seealso:: :ref:`Advanced Fields/Compute fields <reference/fields/compute>`
:param bool compute_sudo: whether the field should be recomputed as superuser
to bypass access rights (by default ``True`` for stored fields, ``False``
for non stored fields)
:param str inverse: name of a method that inverses the field (optional)
:param str search: name of a method that implement search on the field (optional)
:param str related: sequence of field names
.. seealso:: :ref:`Advanced fields/Related fields <reference/fields/related>`
"""
type = None # type of the field (string)
relational = False # whether the field is a relational one
translate = False # whether the field is translated
column_type = None # database column type (ident, spec)
column_format = '%s' # placeholder for value in queries
column_cast_from = () # column types that may be cast to this
args = None # the parameters given to __init__()
_module = None # the field's module name
_modules = None # modules that define this field
_setup_done = None # the field's setup state: None, 'base' or 'full'
_sequence = None # absolute ordering of the field
automatic = False # whether the field is automatically created ("magic" field)
inherited = False # whether the field is inherited (_inherits)
inherited_field = None # the corresponding inherited field
name = None # name of the field
model_name = None # name of the model of this field
comodel_name = None # name of the model of values (if relational)
store = True # whether the field is stored in database
index = False # whether the field is indexed in database
manual = False # whether the field is a custom field
copy = True # whether the field is copied over by BaseModel.copy()
_depends = None # collection of field dependencies
_depends_context = None # collection of context key dependencies
recursive = False # whether self depends on itself
compute = None # compute(recs) computes field on recs
compute_sudo = False # whether field should be recomputed as superuser
inverse = None # inverse(recs) inverses field on recs
search = None # search(recs, operator, value) searches on self
related = None # sequence of field names, for related fields
company_dependent = False # whether ``self`` is company-dependent (property field)
default = None # default(recs) returns the default value
string = None # field label
help = None # field tooltip
invisible = False # whether the field is invisible
readonly = False # whether the field is readonly
required = False # whether the field is required
states = None # set readonly and required depending on state
groups = None # csv list of group xml ids
change_default = False # whether the field may trigger a "user-onchange"
deprecated = None # whether the field is deprecated
related_field = None # corresponding related field
group_operator = None # operator for aggregating values
group_expand = None # name of method to expand groups in read_group()
prefetch = True # whether the field is prefetched
def __init__(self, string=Default, **kwargs):
kwargs['string'] = string
self._sequence = kwargs['_sequence'] = next(_global_seq)
self.args = {key: val for key, val in kwargs.items() if val is not Default}
def new(self, **kwargs):
""" Return a field of the same type as ``self``, with its own parameters. """
return type(self)(**kwargs)
def __str__(self):
return "%s.%s" % (self.model_name, self.name)
def __repr__(self):
return "%s.%s" % (self.model_name, self.name)
############################################################################
#
# Base field setup: things that do not depend on other models/fields
#
def setup_base(self, model, name):
""" Base setup: things that do not depend on other models/fields. """
if self._setup_done and not self.related:
# optimization for regular fields: keep the base setup
self._setup_done = 'base'
else:
# do the base setup from scratch
self._setup_attrs(model, name)
if not self.related:
self._setup_regular_base(model)
self._setup_done = 'base'
#
# Setup field parameter attributes
#
def _can_setup_from(self, field):
""" Return whether ``self`` can retrieve parameters from ``field``. """
return isinstance(field, type(self))
def _get_attrs(self, model, name):
""" Return the field parameter attributes as a dictionary. """
# determine all inherited field attributes
modules = set()
attrs = {}
if self.args.get('automatic') and resolve_mro(model, name, self._can_setup_from):
# prevent an automatic field from overriding a real field
self.args.clear()
if not (self.args.get('automatic') or self.args.get('manual')):
# magic and custom fields do not inherit from parent classes
for field in reversed(resolve_mro(model, name, self._can_setup_from)):
attrs.update(field.args)
if '_module' in field.args:
modules.add(field.args['_module'])
attrs.update(self.args) # necessary in case self is not in class
attrs['args'] = self.args
attrs['model_name'] = model._name
attrs['name'] = name
attrs['_modules'] = modules
# initialize ``self`` with ``attrs``
if name == 'state':
# by default, `state` fields should be reset on copy
attrs['copy'] = attrs.get('copy', False)
if attrs.get('compute'):
# by default, computed fields are not stored, computed in superuser
# mode if stored, not copied (unless stored and explicitly not
# readonly), and readonly (unless inversible)
attrs['store'] = store = attrs.get('store', False)
attrs['compute_sudo'] = attrs.get('compute_sudo', store)
if not (attrs['store'] and not attrs.get('readonly', True)):
attrs['copy'] = attrs.get('copy', False)
attrs['readonly'] = attrs.get('readonly', not attrs.get('inverse'))
if attrs.get('related'):
# by default, related fields are not stored, computed in superuser
# mode, not copied and readonly
attrs['store'] = store = attrs.get('store', False)
attrs['compute_sudo'] = attrs.get('compute_sudo', attrs.get('related_sudo', True))
attrs['copy'] = attrs.get('copy', False)
attrs['readonly'] = attrs.get('readonly', True)
if attrs.get('company_dependent'):
# by default, company-dependent fields are not stored, not computed
# in superuser mode and not copied
attrs['store'] = False
attrs['compute_sudo'] = attrs.get('compute_sudo', False)
attrs['copy'] = attrs.get('copy', False)
attrs['default'] = attrs.get('default', self._default_company_dependent)
attrs['compute'] = self._compute_company_dependent
if not attrs.get('readonly'):
attrs['inverse'] = self._inverse_company_dependent
attrs['search'] = self._search_company_dependent
attrs['depends_context'] = attrs.get('depends_context', ()) + ('company',)
if attrs.get('translate'):
# by default, translatable fields are context-dependent
attrs['depends_context'] = attrs.get('depends_context', ()) + ('lang',)
# parameters 'depends' and 'depends_context' are stored in attributes
# '_depends' and '_depends_context', respectively
if 'depends' in attrs:
attrs['_depends'] = tuple(attrs.pop('depends'))
if 'depends_context' in attrs:
attrs['_depends_context'] = tuple(attrs.pop('depends_context'))
return attrs
def _setup_attrs(self, model, name):
""" Initialize the field parameter attributes. """
attrs = self._get_attrs(model, name)
# validate arguments
for key in attrs:
# TODO: improve filter as there are attributes on the class which
# are not valid on the field, probably
if not (hasattr(self, key) or model._valid_field_parameter(self, key)):
_logger.warning(
"Field %s.%s: unknown parameter %r, if this is an actual"
" parameter you may want to override the method"
" _valid_field_parameter on the relevant model in order to"
" allow it",
model._name, name, key
)
self.__dict__.update(attrs)
# prefetch only stored, column, non-manual and non-deprecated fields
if not (self.store and self.column_type) or self.manual or self.deprecated:
self.prefetch = False
if not self.string and not self.related:
# related fields get their string from their parent field
self.string = (
name[:-4] if name.endswith('_ids') else
name[:-3] if name.endswith('_id') else name
).replace('_', ' ').title()
# self.default must be a callable
if self.default is not None:
value = self.default
self.default = value if callable(value) else lambda model: value
############################################################################
#
# Full field setup: everything else, except recomputation triggers
#
def setup_full(self, model):
""" Full setup: everything else, except recomputation triggers. """
if self._setup_done != 'full':
if not self.related:
self._setup_regular_full(model)
else:
self._setup_related_full(model)
self._setup_done = 'full'
#
# Setup of non-related fields
#
def _setup_regular_base(self, model):
""" Setup the attributes of a non-related field. """
pass
def _setup_regular_full(self, model):
""" Determine the dependencies and inverse field(s) of ``self``. """
if self._depends is not None:
# the parameter 'depends' has priority over 'depends' on compute
self.depends = self._depends
self.depends_context = self._depends_context or ()
return
# determine the functions implementing self.compute
if isinstance(self.compute, str):
funcs = resolve_mro(model, self.compute, callable)
elif self.compute:
funcs = [self.compute]
else:
funcs = []
# collect depends and depends_context
depends = []
depends_context = list(self._depends_context or ())
for func in funcs:
deps = getattr(func, '_depends', ())
depends.extend(deps(model) if callable(deps) else deps)
depends_context.extend(getattr(func, '_depends_context', ()))
self.depends = tuple(depends)
self.depends_context = tuple(depends_context)
# display_name may depend on context['lang'] (`test_lp1071710`)
if self.automatic and self.name == 'display_name' and model._rec_name:
if model._fields[model._rec_name].base_field.translate:
if 'lang' not in self.depends_context:
self.depends_context += ('lang',)
#
# Setup of related fields
#
def _setup_related_full(self, model):
""" Setup the attributes of a related field. """
# fix the type of self.related if necessary
if isinstance(self.related, str):
self.related = tuple(self.related.split('.'))
# determine the chain of fields, and make sure they are all set up
model_name = self.model_name
for name in self.related:
field = model.pool[model_name]._fields[name]
if field._setup_done != 'full':
field.setup_full(model.env[model_name])
model_name = field.comodel_name
self.related_field = field
# check type consistency
if self.type != field.type:
raise TypeError("Type of related field %s is inconsistent with %s" % (self, field))
# determine dependencies, compute, inverse, and search
if self._depends is not None:
self.depends = self._depends
else:
self.depends = ('.'.join(self.related),)
self.compute = self._compute_related
if self.inherited or not (self.readonly or field.readonly):
self.inverse = self._inverse_related
if field._description_searchable:
# allow searching on self only if the related field is searchable
self.search = self._search_related
# copy attributes from field to self (string, help, etc.)
for attr, prop in self.related_attrs:
if not getattr(self, attr):
setattr(self, attr, getattr(field, prop))
for attr, value in field.__dict__.items():
if not hasattr(self, attr) and model._valid_field_parameter(self, attr):
setattr(self, attr, value)
# special cases of inherited fields
if self.inherited:
if not self.states:
self.states = field.states
if field.required:
self.required = True
self._modules.update(field._modules)
if self._depends_context is not None:
self.depends_context = self._depends_context
else:
self.depends_context = field.depends_context
def traverse_related(self, record):
""" Traverse the fields of the related field `self` except for the last
one, and return it as a pair `(last_record, last_field)`. """
for name in self.related[:-1]:
record = first(record[name])
return record, self.related_field
def _compute_related(self, records):
""" Compute the related field ``self`` on ``records``. """
#
# Traverse fields one by one for all records, in order to take advantage
# of prefetching for each field access. In order to clarify the impact
# of the algorithm, consider traversing 'foo.bar' for records a1 and a2,
# where 'foo' is already present in cache for a1, a2. Initially, both a1
# and a2 are marked for prefetching. As the commented code below shows,
# traversing all fields one record at a time will fetch 'bar' one record
# at a time.
#
# b1 = a1.foo # mark b1 for prefetching
# v1 = b1.bar # fetch/compute bar for b1
# b2 = a2.foo # mark b2 for prefetching
# v2 = b2.bar # fetch/compute bar for b2
#
# On the other hand, traversing all records one field at a time ensures
# maximal prefetching for each field access.
#
# b1 = a1.foo # mark b1 for prefetching
# b2 = a2.foo # mark b2 for prefetching
# v1 = b1.bar # fetch/compute bar for b1, b2
# v2 = b2.bar # value already in cache
#
# This difference has a major impact on performance, in particular in
# the case where 'bar' is a computed field that takes advantage of batch
# computation.
#
values = list(records)
for name in self.related[:-1]:
try:
values = [first(value[name]) for value in values]
except AccessError as e:
description = records.env['ir.model']._get(records._name).name
raise AccessError(
_("%(previous_message)s\n\nImplicitly accessed through '%(document_kind)s' (%(document_model)s).") % {
'previous_message': e.args[0],
'document_kind': description,
'document_model': records._name,
}
)
# assign final values to records
for record, value in zip(records, values):
record[self.name] = self._process_related(value[self.related_field.name])
def _process_related(self, value):
"""No transformation by default, but allows override."""
return value
def _inverse_related(self, records):
""" Inverse the related field ``self`` on ``records``. """
# store record values, otherwise they may be lost by cache invalidation!
record_value = {record: record[self.name] for record in records}
for record in records:
target, field = self.traverse_related(record)
# update 'target' only if 'record' and 'target' are both real or
# both new (see `test_base_objects.py`, `test_basic`)
if target and bool(target.id) == bool(record.id):
target[field.name] = record_value[record]
def _search_related(self, records, operator, value):
""" Determine the domain to search on field ``self``. """
return [('.'.join(self.related), operator, value)]
# properties used by _setup_related_full() to copy values from related field
_related_comodel_name = property(attrgetter('comodel_name'))
_related_string = property(attrgetter('string'))
_related_help = property(attrgetter('help'))
_related_groups = property(attrgetter('groups'))
_related_group_operator = property(attrgetter('group_operator'))
@property
def base_field(self):
""" Return the base field of an inherited field, or ``self``. """
return self.inherited_field.base_field if self.inherited_field else self
#
# Company-dependent fields
#
def _default_company_dependent(self, model):
return model.env['ir.property']._get(self.name, self.model_name)
def _compute_company_dependent(self, records):
# read property as superuser, as the current user may not have access
Property = records.env['ir.property'].sudo()
values = Property._get_multi(self.name, self.model_name, records.ids)
for record in records:
record[self.name] = values.get(record.id)
def _inverse_company_dependent(self, records):
# update property as superuser, as the current user may not have access
Property = records.env['ir.property'].sudo()
values = {
record.id: self.convert_to_write(record[self.name], record)
for record in records
}
Property._set_multi(self.name, self.model_name, values)
def _search_company_dependent(self, records, operator, value):
Property = records.env['ir.property'].sudo()
return Property.search_multi(self.name, self.model_name, operator, value)
#
# Setup of field triggers
#
def resolve_depends(self, registry):
""" Return the dependencies of `self` as a collection of field tuples. """
Model0 = registry[self.model_name]
for dotnames in self.depends:
field_seq = []
model_name = self.model_name
for index, fname in enumerate(dotnames.split('.')):
Model = registry[model_name]
if Model0._transient and not Model._transient:
# modifying fields on regular models should not trigger
# recomputations of fields on transient models
break
try:
field = Model._fields[fname]
except KeyError:
msg = "Field %s cannot find dependency %r on model %r."
raise ValueError(msg % (self, fname, model_name))
if field is self and index:
self.recursive = True
field_seq.append(field)
# do not make self trigger itself: for instance, a one2many
# field line_ids with domain [('foo', ...)] will have
# 'line_ids.foo' as a dependency
if not (field is self and not index):
yield tuple(field_seq)
if field.type in ('one2many', 'many2many'):
for inv_field in Model._field_inverses[field]:
yield tuple(field_seq) + (inv_field,)
model_name = field.comodel_name
############################################################################
#
# Field description
#
def get_description(self, env):
""" Return a dictionary that describes the field ``self``. """
desc = {'type': self.type}
for attr, prop in self.description_attrs:
value = getattr(self, prop)
if callable(value):
value = value(env)
if value is not None:
desc[attr] = value
return desc
# properties used by get_description()
_description_store = property(attrgetter('store'))
_description_manual = property(attrgetter('manual'))
_description_depends = property(attrgetter('depends'))
_description_related = property(attrgetter('related'))
_description_company_dependent = property(attrgetter('company_dependent'))
_description_readonly = property(attrgetter('readonly'))
_description_required = property(attrgetter('required'))
_description_states = property(attrgetter('states'))
_description_groups = property(attrgetter('groups'))
_description_change_default = property(attrgetter('change_default'))
_description_deprecated = property(attrgetter('deprecated'))
_description_group_operator = property(attrgetter('group_operator'))
@property
def _description_searchable(self):
return bool(self.store or self.search)
@property
def _description_sortable(self):
return (self.column_type and self.store) or (self.inherited and self.related_field._description_sortable)
def _description_string(self, env):
if self.string and env.lang:
model_name = self.base_field.model_name
field_string = env['ir.translation'].get_field_string(model_name)
return field_string.get(self.name) or self.string
return self.string
def _description_help(self, env):
if self.help and env.lang:
model_name = self.base_field.model_name
field_help = env['ir.translation'].get_field_help(model_name)
return field_help.get(self.name) or self.help
return self.help
def is_editable(self):
""" Return whether the field can be editable in a view. """
return not self.readonly or self.states and any(
'readonly' in item for items in self.states.values() for item in items
)
############################################################################
#
# Conversion of values
#
def null(self, record):
""" Return the null value for this field in the record format. """
return False
def convert_to_column(self, value, record, values=None, validate=True):
""" Convert ``value`` from the ``write`` format to the SQL format. """
if value is None or value is False:
return None
return pycompat.to_text(value)
def convert_to_cache(self, value, record, validate=True):
""" Convert ``value`` to the cache format; ``value`` may come from an
assignment, or have the format of methods :meth:`BaseModel.read` or
:meth:`BaseModel.write`. If the value represents a recordset, it should
be added for prefetching on ``record``.
:param bool validate: when True, field-specific validation of ``value``
will be performed
"""
return value
def convert_to_record(self, value, record):
""" Convert ``value`` from the cache format to the record format.
If the value represents a recordset, it should share the prefetching of
``record``.
"""
return False if value is None else value
def convert_to_record_multi(self, values, records):
""" Convert a list of values from the cache format to the record format.
Some field classes may override this method to add optimizations for
batch processing.
"""
# spare the method lookup overhead
convert = self.convert_to_record
return [convert(value, records) for value in values]
def convert_to_read(self, value, record, use_name_get=True):
""" Convert ``value`` from the record format to the format returned by
method :meth:`BaseModel.read`.
:param bool use_name_get: when True, the value's display name will be
computed using :meth:`BaseModel.name_get`, if relevant for the field
"""
return False if value is None else value
def convert_to_write(self, value, record):
""" Convert ``value`` from any format to the format of method
:meth:`BaseModel.write`.
"""
cache_value = self.convert_to_cache(value, record, validate=False)
record_value = self.convert_to_record(cache_value, record)
return self.convert_to_read(record_value, record)
def convert_to_onchange(self, value, record, names):
""" Convert ``value`` from the record format to the format returned by
method :meth:`BaseModel.onchange`.
:param names: a tree of field names (for relational fields only)
"""
return self.convert_to_read(value, record)
def convert_to_export(self, value, record):
""" Convert ``value`` from the record format to the export format. """
if not value:
return ''
return value
def convert_to_display_name(self, value, record):
""" Convert ``value`` from the record format to a suitable display name. """
return ustr(value)
############################################################################
#
# Update database schema
#
def update_db(self, model, columns):
""" Update the database schema to implement this field.
:param model: an instance of the field's model
:param columns: a dict mapping column names to their configuration in database
:return: ``True`` if the field must be recomputed on existing rows
"""
if not self.column_type:
return
column = columns.get(self.name)
# create/update the column, not null constraint; the index will be
# managed by registry.check_indexes()
self.update_db_column(model, column)
self.update_db_notnull(model, column)
# optimization for computing simple related fields like 'foo_id.bar'
if (
not column
and len(self.related or ()) == 2
and self.related_field.store and not self.related_field.compute
and not (self.related_field.type == 'binary' and self.related_field.attachment)
and self.related_field.type not in ('one2many', 'many2many')
):
join_field = model._fields[self.related[0]]
if (
join_field.type == 'many2one'
and join_field.store and not join_field.compute
):
model.pool.post_init(self.update_db_related, model)
# discard the "classical" computation
return False
return not column
def update_db_column(self, model, column):
""" Create/update the column corresponding to ``self``.
:param model: an instance of the field's model
:param column: the column's configuration (dict) if it exists, or ``None``
"""
if not column:
# the column does not exist, create it
sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string)
return
if column['udt_name'] == self.column_type[0]:
return
if column['udt_name'] in self.column_cast_from:
sql.convert_column(model._cr, model._table, self.name, self.column_type[1])
else:
newname = (self.name + '_moved{}').format
i = 0
while sql.column_exists(model._cr, model._table, newname(i)):
i += 1
if column['is_nullable'] == 'NO':
sql.drop_not_null(model._cr, model._table, self.name)
sql.rename_column(model._cr, model._table, self.name, newname(i))
sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string)
def update_db_notnull(self, model, column):
""" Add or remove the NOT NULL constraint on ``self``.
:param model: an instance of the field's model
:param column: the column's configuration (dict) if it exists, or ``None``
"""
has_notnull = column and column['is_nullable'] == 'NO'
if not column or (self.required and not has_notnull):
# the column is new or it becomes required; initialize its values
if model._table_has_rows():
model._init_column(self.name)
if self.required and not has_notnull:
# _init_column may delay computations in post-init phase
@model.pool.post_init
def add_not_null():
# flush values before adding NOT NULL constraint
model.flush([self.name])
model.pool.post_constraint(apply_required, model, self.name)
elif not self.required and has_notnull:
sql.drop_not_null(model._cr, model._table, self.name)
def update_db_related(self, model):
""" Compute a stored related field directly in SQL. """
comodel = model.env[self.related_field.model_name]
model.env.cr.execute("""
UPDATE "{model_table}" AS x
SET "{model_field}" = y."{comodel_field}"
FROM "{comodel_table}" AS y
WHERE x."{join_field}" = y.id
""".format(
model_table=model._table,
model_field=self.name,
comodel_table=comodel._table,
comodel_field=self.related[1],
join_field=self.related[0],
))
############################################################################
#
# Alternatively stored fields: if fields don't have a `column_type` (not
# stored as regular db columns) they go through a read/create/write
# protocol instead
#
def read(self, records):
""" Read the value of ``self`` on ``records``, and store it in cache. """
raise NotImplementedError("Method read() undefined on %s" % self)
def create(self, record_values):
""" Write the value of ``self`` on the given records, which have just
been created.
:param record_values: a list of pairs ``(record, value)``, where
``value`` is in the format of method :meth:`BaseModel.write`
"""
for record, value in record_values:
self.write(record, value)
def write(self, records, value):
""" Write the value of ``self`` on ``records``. This method must update
the cache and prepare database updates.
:param value: a value in any format
:return: the subset of `records` that have been modified
"""
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# update the cache, and discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
cache.update(records, self, [cache_value] * len(records))
# update towrite
if self.store:
towrite = records.env.all.towrite[self.model_name]
record = records[:1]
write_value = self.convert_to_write(cache_value, record)
column_value = self.convert_to_column(write_value, record)
for record in records.filtered('id'):
towrite[record.id][self.name] = column_value
return records
############################################################################
#
# Descriptor methods
#
def __get__(self, record, owner):
""" return the value of field ``self`` on ``record`` """
if record is None:
return self # the field is accessed through the owner class
if not record._ids:
# null record -> return the null value for this field
value = self.convert_to_cache(False, record, validate=False)
return self.convert_to_record(value, record)
env = record.env
# only a single record may be accessed
record.ensure_one()
if self.compute and self.store:
# process pending computations
self.recompute(record)
try:
value = env.cache.get(record, self)
except KeyError:
# behavior in case of cache miss:
#
# on a real record:
# stored -> fetch from database (computation done above)
# not stored and computed -> compute
# not stored and not computed -> default
#
# on a new record w/ origin:
# stored and not (computed and readonly) -> fetch from origin
# stored and computed and readonly -> compute
# not stored and computed -> compute
# not stored and not computed -> default
#
# on a new record w/o origin:
# stored and computed -> compute
# stored and not computed -> new delegate or default
# not stored and computed -> compute
# not stored and not computed -> default
#
if self.store and record.id:
# real record: fetch from database
recs = record._in_cache_without(self)
try:
recs._fetch_field(self)
except AccessError:
record._fetch_field(self)
if not env.cache.contains(record, self) and not record.exists():
raise MissingError("\n".join([
_("Record does not exist or has been deleted."),
_("(Record: %s, User: %s)") % (record, env.uid),
]))
value = env.cache.get(record, self)
elif self.store and record._origin and not (self.compute and self.readonly):
# new record with origin: fetch from origin
value = self.convert_to_cache(record._origin[self.name], record)
env.cache.set(record, self, value)
elif self.compute:
# non-stored field or new record without origin: compute
if env.is_protected(self, record):
value = self.convert_to_cache(False, record, validate=False)
env.cache.set(record, self, value)
else:
recs = record if self.recursive else record._in_cache_without(self)
try:
self.compute_value(recs)
except (AccessError, MissingError):
self.compute_value(record)
try:
value = env.cache.get(record, self)
except CacheMiss:
if self.readonly and not self.store:
raise ValueError("Compute method failed to assign %s.%s" % (record, self.name))
# fallback to null value if compute gives nothing
value = self.convert_to_cache(False, record, validate=False)
env.cache.set(record, self, value)
elif self.type == 'many2one' and self.delegate and not record.id:
# parent record of a new record: new record, with the same
# values as record for the corresponding inherited fields
def is_inherited_field(name):
field = record._fields[name]
return field.inherited and field.related[0] == self.name
parent = record.env[self.comodel_name].new({
name: value
for name, value in record._cache.items()
if is_inherited_field(name)
})
value = self.convert_to_cache(parent, record)
env.cache.set(record, self, value)
else:
# non-stored field or stored field on new record: default value
value = self.convert_to_cache(False, record, validate=False)
env.cache.set(record, self, value)
defaults = record.default_get([self.name])
if self.name in defaults:
# The null value above is necessary to convert x2many field
# values. For instance, converting [(4, id)] accesses the
# field's current value, then adds the given id. Without an
# initial value, the conversion ends up here to determine
# the field's value, and generates an infinite recursion.
value = self.convert_to_cache(defaults[self.name], record)
env.cache.set(record, self, value)
return self.convert_to_record(value, record)
def mapped(self, records):
""" Return the values of ``self`` for ``records``, either as a list
(scalar fields), or as a recordset (relational fields).
This method is meant to be used internally and has very little benefit
over a simple call to `~odoo.models.BaseModel.mapped()` on a recordset.
"""
if self.name == 'id':
# not stored in cache
return list(records._ids)
if self.compute and self.store:
# process pending computations
self.recompute(records)
# retrieve values in cache, and fetch missing ones
vals = records.env.cache.get_until_miss(records, self)
while len(vals) < len(records):
# It is important to construct a 'remaining' recordset with the
# _prefetch_ids of the original recordset, in order to prefetch as
# many records as possible. If not done this way, scenarios such as
# [rec.line_ids.mapped('name') for rec in recs] would generate one
# query per record in `recs`!
remaining = records._browse(records.env, records[len(vals):]._ids, records._prefetch_ids)
self.__get__(first(remaining), type(remaining))
vals += records.env.cache.get_until_miss(remaining, self)
return self.convert_to_record_multi(vals, records)
def __set__(self, records, value):
""" set the value of field ``self`` on ``records`` """
protected_ids = []
new_ids = []
other_ids = []
for record_id in records._ids:
if record_id in records.env._protected.get(self, ()):
protected_ids.append(record_id)
elif not record_id:
new_ids.append(record_id)
else:
other_ids.append(record_id)
if protected_ids:
# records being computed: no business logic, no recomputation
protected_records = records.browse(protected_ids)
self.write(protected_records, value)
if new_ids:
# new records: no business logic
new_records = records.browse(new_ids)
with records.env.protecting(records.pool.field_computed.get(self, [self]), records):
if self.relational:
new_records.modified([self.name], before=True)
self.write(new_records, value)
new_records.modified([self.name])
if self.inherited:
# special case: also assign parent records if they are new
parents = records[self.related[0]]
parents.filtered(lambda r: not r.id)[self.name] = value
if other_ids:
# base case: full business logic
records = records.browse(other_ids)
write_value = self.convert_to_write(value, records)
records.write({self.name: write_value})
############################################################################
#
# Computation of field values
#
def recompute(self, records):
""" Process the pending computations of ``self`` on ``records``. This
should be called only if ``self`` is computed and stored.
"""
to_compute_ids = records.env.all.tocompute.get(self)
if not to_compute_ids:
return
if self.recursive:
for record in records:
if record.id in to_compute_ids:
self.compute_value(record)
return
for record in records:
if record.id in to_compute_ids:
ids = expand_ids(record.id, to_compute_ids)
recs = record.browse(itertools.islice(ids, PREFETCH_MAX))
try:
self.compute_value(recs)
except (AccessError, MissingError):
self.compute_value(record)
def compute_value(self, records):
""" Invoke the compute method on ``records``; the results are in cache. """
env = records.env
if self.compute_sudo:
records = records.sudo()
fields = records.pool.field_computed[self]
# Just in case the compute method does not assign a value, we already
# mark the computation as done. This is also necessary if the compute
# method accesses the old value of the field: the field will be fetched
# with _read(), which will flush() it. If the field is still to compute,
# the latter flush() will recursively compute this field!
for field in fields:
if field.store:
env.remove_to_compute(field, records)
try:
with records.env.protecting(fields, records):
records._compute_field_value(self)
except Exception:
for field in fields:
if field.store:
env.add_to_compute(field, records)
raise
def determine_inverse(self, records):
""" Given the value of ``self`` on ``records``, inverse the computation. """
if isinstance(self.inverse, str):
getattr(records, self.inverse)()
else:
self.inverse(records)
def determine_domain(self, records, operator, value):
""" Return a domain representing a condition on ``self``. """
if isinstance(self.search, str):
return getattr(records, self.search)(operator, value)
else:
return self.search(records, operator, value)
############################################################################
#
# Notification when fields are modified
#
class Boolean(Field):
""" Encapsulates a :class:`bool`. """
type = 'boolean'
column_type = ('bool', 'bool')
def convert_to_column(self, value, record, values=None, validate=True):
return bool(value)
def convert_to_cache(self, value, record, validate=True):
return bool(value)
def convert_to_export(self, value, record):
return value
class Integer(Field):
""" Encapsulates an :class:`int`. """
type = 'integer'
column_type = ('int4', 'int4')
group_operator = 'sum'
def convert_to_column(self, value, record, values=None, validate=True):
return int(value or 0)
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, dict):
# special case, when an integer field is used as inverse for a one2many
return value.get('id', None)
return int(value or 0)
def convert_to_record(self, value, record):
return value or 0
def convert_to_read(self, value, record, use_name_get=True):
# Integer values greater than 2^31-1 are not supported in pure XMLRPC,
# so we have to pass them as floats :-(
if value and value > MAXINT:
return float(value)
return value
def _update(self, records, value):
# special case, when an integer field is used as inverse for a one2many
cache = records.env.cache
for record in records:
cache.set(record, self, value.id or 0)
def convert_to_export(self, value, record):
if value or value == 0:
return value
return ''
class Float(Field):
""" Encapsulates a :class:`float`.
The precision digits are given by the (optional) ``digits`` attribute.
:param digits: a pair (total, decimal) or a string referencing a
:class:`~odoo.addons.base.models.decimal_precision.DecimalPrecision` record name.
:type digits: tuple(int,int) or str
When a float is a quantity associated with an unit of measure, it is important
to use the right tool to compare or round values with the correct precision.
The Float class provides some static methods for this purpose:
:func:`~odoo.fields.Float.round()` to round a float with the given precision.
:func:`~odoo.fields.Float.is_zero()` to check if a float equals zero at the given precision.
:func:`~odoo.fields.Float.compare()` to compare two floats at the given precision.
.. admonition:: Example
To round a quantity with the precision of the unit of mesure::
fields.Float.round(self.product_uom_qty, precision_rounding=self.product_uom_id.rounding)
To check if the quantity is zero with the precision of the unit of mesure::
fields.Float.is_zero(self.product_uom_qty, precision_rounding=self.product_uom_id.rounding)
To compare two quantities::
field.Float.compare(self.product_uom_qty, self.qty_done, precision_rounding=self.product_uom_id.rounding)
The compare helper uses the __cmp__ semantics for historic purposes, therefore
the proper, idiomatic way to use this helper is like so:
if result == 0, the first and second floats are equal
if result < 0, the first float is lower than the second
if result > 0, the first float is greater than the second
"""
type = 'float'
column_cast_from = ('int4', 'numeric', 'float8')
_digits = None # digits argument passed to class initializer
group_operator = 'sum'
def __init__(self, string=Default, digits=Default, **kwargs):
super(Float, self).__init__(string=string, _digits=digits, **kwargs)
@property
def column_type(self):
# Explicit support for "falsy" digits (0, False) to indicate a NUMERIC
# field with no fixed precision. The values are saved in the database
# with all significant digits.
# FLOAT8 type is still the default when there is no precision because it
# is faster for most operations (sums, etc.)
return ('numeric', 'numeric') if self._digits is not None else \
('float8', 'double precision')
def get_digits(self, env):
if isinstance(self._digits, str):
precision = env['decimal.precision'].precision_get(self._digits)
return 16, precision
else:
return self._digits
_related__digits = property(attrgetter('_digits'))
def _description_digits(self, env):
return self.get_digits(env)
def convert_to_column(self, value, record, values=None, validate=True):
result = float(value or 0.0)
digits = self.get_digits(record.env)
if digits:
precision, scale = digits
result = float_repr(float_round(result, precision_digits=scale), precision_digits=scale)
return result
def convert_to_cache(self, value, record, validate=True):
# apply rounding here, otherwise value in cache may be wrong!
value = float(value or 0.0)
if not validate:
return value
digits = self.get_digits(record.env)
return float_round(value, precision_digits=digits[1]) if digits else value
def convert_to_record(self, value, record):
return value or 0.0
def convert_to_export(self, value, record):
if value or value == 0.0:
return value
return ''
round = staticmethod(float_round)
is_zero = staticmethod(float_is_zero)
compare = staticmethod(float_compare)
class Monetary(Field):
""" Encapsulates a :class:`float` expressed in a given
:class:`res_currency<odoo.addons.base.models.res_currency.Currency>`.
The decimal precision and currency symbol are taken from the ``currency_field`` attribute.
:param str currency_field: name of the :class:`Many2one` field
holding the :class:`res_currency <odoo.addons.base.models.res_currency.Currency>`
this monetary field is expressed in (default: `\'currency_id\'`)
"""
type = 'monetary'
column_type = ('numeric', 'numeric')
column_cast_from = ('float8',)
currency_field = None
group_operator = 'sum'
def __init__(self, string=Default, currency_field=Default, **kwargs):
super(Monetary, self).__init__(string=string, currency_field=currency_field, **kwargs)
_description_currency_field = property(attrgetter('currency_field'))
def _setup_currency_field(self, model):
if not self.currency_field:
# pick a default, trying in order: 'currency_id', 'x_currency_id'
if 'currency_id' in model._fields:
self.currency_field = 'currency_id'
elif 'x_currency_id' in model._fields:
self.currency_field = 'x_currency_id'
assert self.currency_field in model._fields, \
"Field %s with unknown currency_field %r" % (self, self.currency_field)
def _setup_regular_full(self, model):
super(Monetary, self)._setup_regular_full(model)
self._setup_currency_field(model)
def _setup_related_full(self, model):
super(Monetary, self)._setup_related_full(model)
if self.inherited:
self.currency_field = self.related_field.currency_field
self._setup_currency_field(model)
def convert_to_column(self, value, record, values=None, validate=True):
# retrieve currency from values or record
if values and self.currency_field in values:
field = record._fields[self.currency_field]
currency = field.convert_to_cache(values[self.currency_field], record, validate)
currency = field.convert_to_record(currency, record)
else:
# Note: this is wrong if 'record' is several records with different
# currencies, which is functional nonsense and should not happen
# BEWARE: do not prefetch other fields, because 'value' may be in
# cache, and would be overridden by the value read from database!
currency = record[:1].with_context(prefetch_fields=False)[self.currency_field]
value = float(value or 0.0)
if currency:
return float_repr(currency.round(value), currency.decimal_places)
return value
def convert_to_cache(self, value, record, validate=True):
# cache format: float
value = float(value or 0.0)
if value and validate:
# FIXME @rco-odoo: currency may not be already initialized if it is
# a function or related field!
# BEWARE: do not prefetch other fields, because 'value' may be in
# cache, and would be overridden by the value read from database!
currency = record.sudo().with_context(prefetch_fields=False)[self.currency_field]
if len(currency) > 1:
raise ValueError("Got multiple currencies while assigning values of monetary field %s" % str(self))
elif currency:
value = currency.round(value)
return value
def convert_to_record(self, value, record):
return value or 0.0
def convert_to_read(self, value, record, use_name_get=True):
return value
def convert_to_write(self, value, record):
return value
class _String(Field):
""" Abstract class for string fields. """
translate = False # whether the field is translated
prefetch = None
def __init__(self, string=Default, **kwargs):
# translate is either True, False, or a callable
if 'translate' in kwargs and not callable(kwargs['translate']):
kwargs['translate'] = bool(kwargs['translate'])
super(_String, self).__init__(string=string, **kwargs)
def _setup_attrs(self, model, name):
super()._setup_attrs(model, name)
if self.prefetch is None:
# do not prefetch complex translated fields by default
self.prefetch = not callable(self.translate)
_related_translate = property(attrgetter('translate'))
def _description_translate(self, env):
return bool(self.translate)
def get_trans_terms(self, value):
""" Return the sequence of terms to translate found in `value`. """
if not callable(self.translate):
return [value] if value else []
terms = []
self.translate(terms.append, value)
return terms
def get_trans_func(self, records):
""" Return a translation function `translate` for `self` on the given
records; the function call `translate(record_id, value)` translates the
field value to the language given by the environment of `records`.
"""
if callable(self.translate):
rec_src_trans = records.env['ir.translation']._get_terms_translations(self, records)
def translate(record_id, value):
src_trans = rec_src_trans[record_id]
return self.translate(src_trans.get, value)
else:
rec_trans = records.env['ir.translation']._get_ids(
'%s,%s' % (self.model_name, self.name), 'model', records.env.lang, records.ids)
def translate(record_id, value):
return rec_trans.get(record_id) or value
return translate
def check_trans_value(self, value):
""" Check and possibly sanitize the translated term `value`. """
if callable(self.translate):
# do a "no-translation" to sanitize the value
callback = lambda term: None
return self.translate(callback, value)
else:
return value
def write(self, records, value):
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# update the cache, and discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
cache.update(records, self, [cache_value] * len(records))
if not self.store:
return records
real_recs = records.filtered('id')
if not real_recs._ids:
return records
update_column = True
update_trans = False
single_lang = len(records.env['res.lang'].get_installed()) <= 1
if self.translate:
lang = records.env.lang or None # used in _update_translations below
if single_lang:
# a single language is installed
update_trans = True
elif callable(self.translate) or lang == 'en_US':
# update the source and synchronize translations
update_column = True
update_trans = True
elif lang != 'en_US' and lang is not None:
# update the translations only except if emptying
update_column = not cache_value
update_trans = True
# else: lang = None
# update towrite if modifying the source
if update_column:
towrite = records.env.all.towrite[self.model_name]
for rid in real_recs._ids:
# cache_value is already in database format
towrite[rid][self.name] = cache_value
if self.translate is True and cache_value:
tname = "%s,%s" % (records._name, self.name)
records.env['ir.translation']._set_source(tname, real_recs._ids, value)
if self.translate:
# invalidate the field in the other languages
cache.invalidate([(self, records.ids)])
cache.update(records, self, [cache_value] * len(records))
if update_trans:
if callable(self.translate):
# the source value of self has been updated, synchronize
# translated terms when possible
records.env['ir.translation']._sync_terms_translations(self, real_recs)
else:
# update translations
value = self.convert_to_column(value, records)
source_recs = real_recs.with_context(lang=None)
source_value = first(source_recs)[self.name]
if not source_value:
source_recs[self.name] = value
source_value = value
tname = "%s,%s" % (self.model_name, self.name)
if not value:
records.env['ir.translation'].search([
('name', '=', tname),
('type', '=', 'model'),
('res_id', 'in', real_recs._ids)
]).unlink()
elif single_lang:
records.env['ir.translation']._update_translations([dict(
src=source_value,
value=value,
name=tname,
lang=lang,
type='model',
state='translated',
res_id=res_id) for res_id in real_recs._ids])
else:
records.env['ir.translation']._set_ids(
tname, 'model', lang, real_recs._ids, value, source_value,
)
return records
class Char(_String):
""" Basic string field, can be length-limited, usually displayed as a
single-line string in clients.
:param int size: the maximum size of values stored for that field
:param bool trim: states whether the value is trimmed or not (by default,
``True``). Note that the trim operation is applied only by the web client.
:param translate: enable the translation of the field's values; use
``translate=True`` to translate field values as a whole; ``translate``
may also be a callable such that ``translate(callback, value)``
translates ``value`` by using ``callback(term)`` to retrieve the
translation of terms.
:type translate: bool or callable
"""
type = 'char'
column_cast_from = ('text',)
size = None # maximum size of values (deprecated)
trim = True # whether value is trimmed (only by web client)
@property
def column_type(self):
return ('varchar', pg_varchar(self.size))
def update_db_column(self, model, column):
if (
column and column['udt_name'] == 'varchar' and column['character_maximum_length'] and
(self.size is None or column['character_maximum_length'] < self.size)
):
# the column's varchar size does not match self.size; convert it
sql.convert_column(model._cr, model._table, self.name, self.column_type[1])
super(Char, self).update_db_column(model, column)
_related_size = property(attrgetter('size'))
_related_trim = property(attrgetter('trim'))
_description_size = property(attrgetter('size'))
_description_trim = property(attrgetter('trim'))
def _setup_regular_base(self, model):
super(Char, self)._setup_regular_base(model)
assert self.size is None or isinstance(self.size, int), \
"Char field %s with non-integer size %r" % (self, self.size)
def convert_to_column(self, value, record, values=None, validate=True):
if value is None or value is False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
return pycompat.to_text(value)[:self.size]
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return None
return pycompat.to_text(value)[:self.size]
class Text(_String):
""" Very similar to :class:`Char` but used for longer contents, does not
have a size and usually displayed as a multiline text box.
:param translate: enable the translation of the field's values; use
``translate=True`` to translate field values as a whole; ``translate``
may also be a callable such that ``translate(callback, value)``
translates ``value`` by using ``callback(term)`` to retrieve the
translation of terms.
:type translate: bool or callable
"""
type = 'text'
column_type = ('text', 'text')
column_cast_from = ('varchar',)
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return None
return ustr(value)
class Html(_String):
""" Encapsulates an html code content.
:param bool sanitize: whether value must be sanitized (default: ``True``)
:param bool sanitize_tags: whether to sanitize tags
(only a white list of attributes is accepted, default: ``True``)
:param bool sanitize_attributes: whether to sanitize attributes
(only a white list of attributes is accepted, default: ``True``)
:param bool sanitize_style: whether to sanitize style attributes (default: ``False``)
:param bool strip_style: whether to strip style attributes
(removed and therefore not sanitized, default: ``False``)
:param bool strip_classes: whether to strip classes attributes (default: ``False``)
"""
type = 'html'
column_type = ('text', 'text')
sanitize = True # whether value must be sanitized
sanitize_tags = True # whether to sanitize tags (only a white list of attributes is accepted)
sanitize_attributes = True # whether to sanitize attributes (only a white list of attributes is accepted)
sanitize_style = False # whether to sanitize style attributes
sanitize_form = True # whether to sanitize forms
strip_style = False # whether to strip style attributes (removed and therefore not sanitized)
strip_classes = False # whether to strip classes attributes
def _get_attrs(self, model, name):
# called by _setup_attrs(), working together with _String._setup_attrs()
attrs = super()._get_attrs(model, name)
# Translated sanitized html fields must use html_translate or a callable.
if attrs.get('translate') is True and attrs.get('sanitize', True):
attrs['translate'] = html_translate
return attrs
_related_sanitize = property(attrgetter('sanitize'))
_related_sanitize_tags = property(attrgetter('sanitize_tags'))
_related_sanitize_attributes = property(attrgetter('sanitize_attributes'))
_related_sanitize_style = property(attrgetter('sanitize_style'))
_related_strip_style = property(attrgetter('strip_style'))
_related_strip_classes = property(attrgetter('strip_classes'))
_description_sanitize = property(attrgetter('sanitize'))
_description_sanitize_tags = property(attrgetter('sanitize_tags'))
_description_sanitize_attributes = property(attrgetter('sanitize_attributes'))
_description_sanitize_style = property(attrgetter('sanitize_style'))
_description_strip_style = property(attrgetter('strip_style'))
_description_strip_classes = property(attrgetter('strip_classes'))
def convert_to_column(self, value, record, values=None, validate=True):
if value is None or value is False:
return None
if self.sanitize:
return html_sanitize(
value, silent=True,
sanitize_tags=self.sanitize_tags,
sanitize_attributes=self.sanitize_attributes,
sanitize_style=self.sanitize_style,
sanitize_form=self.sanitize_form,
strip_style=self.strip_style,
strip_classes=self.strip_classes)
return value
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return None
if validate and self.sanitize:
return html_sanitize(
value, silent=True,
sanitize_tags=self.sanitize_tags,
sanitize_attributes=self.sanitize_attributes,
sanitize_style=self.sanitize_style,
sanitize_form=self.sanitize_form,
strip_style=self.strip_style,
strip_classes=self.strip_classes)
return value
class Date(Field):
""" Encapsulates a python :class:`date <datetime.date>` object. """
type = 'date'
column_type = ('date', 'date')
column_cast_from = ('timestamp',)
start_of = staticmethod(date_utils.start_of)
end_of = staticmethod(date_utils.end_of)
add = staticmethod(date_utils.add)
subtract = staticmethod(date_utils.subtract)
@staticmethod
def today(*args):
"""Return the current day in the format expected by the ORM.
.. note:: This function may be used to compute default values.
"""
return date.today()
@staticmethod
def context_today(record, timestamp=None):
"""Return the current date as seen in the client's timezone in a format
fit for date fields.
.. note:: This method may be used to compute default values.
:param record: recordset from which the timezone will be obtained.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a datetime, regular dates
can't be converted between timezones).
:rtype: date
"""
today = timestamp or datetime.now()
context_today = None
tz_name = record._context.get('tz') or record.env.user.tz
if tz_name:
try:
today_utc = pytz.timezone('UTC').localize(today, is_dst=False) # UTC = no DST
context_today = today_utc.astimezone(pytz.timezone(tz_name))
except Exception:
_logger.debug("failed to compute context/client-specific today date, using UTC value for `today`",
exc_info=True)
return (context_today or today).date()
@staticmethod
def to_date(value):
"""Attempt to convert ``value`` to a :class:`date` object.
.. warning::
If a datetime object is given as value,
it will be converted to a date object and all
datetime-specific information will be lost (HMS, TZ, ...).
:param value: value to convert.
:type value: str or date or datetime
:return: an object representing ``value``.
:rtype: date or None
"""
if not value:
return None
if isinstance(value, date):
if isinstance(value, datetime):
return value.date()
return value
value = value[:DATE_LENGTH]
return datetime.strptime(value, DATE_FORMAT).date()
# kept for backwards compatibility, but consider `from_string` as deprecated, will probably
# be removed after V12
from_string = to_date
@staticmethod
def to_string(value):
"""
Convert a :class:`date` or :class:`datetime` object to a string.
:param value: value to convert.
:return: a string representing ``value`` in the server's date format, if ``value`` is of
type :class:`datetime`, the hours, minute, seconds, tzinfo will be truncated.
:rtype: str
"""
return value.strftime(DATE_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
if not value:
return None
if isinstance(value, datetime):
# TODO: better fix data files (crm demo data)
value = value.date()
# raise TypeError("%s (field %s) must be string or date, not datetime." % (value, self))
return self.to_date(value)
def convert_to_export(self, value, record):
if not value:
return ''
return self.from_string(value)
class Datetime(Field):
""" Encapsulates a python :class:`datetime <datetime.datetime>` object. """
type = 'datetime'
column_type = ('timestamp', 'timestamp')
column_cast_from = ('date',)
start_of = staticmethod(date_utils.start_of)
end_of = staticmethod(date_utils.end_of)
add = staticmethod(date_utils.add)
subtract = staticmethod(date_utils.subtract)
@staticmethod
def now(*args):
"""Return the current day and time in the format expected by the ORM.
.. note:: This function may be used to compute default values.
"""
# microseconds must be annihilated as they don't comply with the server datetime format
return datetime.now().replace(microsecond=0)
@staticmethod
def today(*args):
"""Return the current day, at midnight (00:00:00)."""
return Datetime.now().replace(hour=0, minute=0, second=0)
@staticmethod
def context_timestamp(record, timestamp):
"""Return the given timestamp converted to the client's timezone.
.. note:: This method is *not* meant for use as a default initializer,
because datetime fields are automatically converted upon
display on client side. For default values, :meth:`now`
should be used instead.
:param record: recordset from which the timezone will be obtained.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone.
:return: timestamp converted to timezone-aware datetime in context timezone.
:rtype: datetime
"""
assert isinstance(timestamp, datetime), 'Datetime instance expected'
tz_name = record._context.get('tz') or record.env.user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
@staticmethod
def to_datetime(value):
"""Convert an ORM ``value`` into a :class:`datetime` value.
:param value: value to convert.
:type value: str or date or datetime
:return: an object representing ``value``.
:rtype: datetime or None
"""
if not value:
return None
if isinstance(value, date):
if isinstance(value, datetime):
if value.tzinfo:
raise ValueError("Datetime field expects a naive datetime: %s" % value)
return value
return datetime.combine(value, time.min)
# TODO: fix data files
return datetime.strptime(value, DATETIME_FORMAT[:len(value)-2])
# kept for backwards compatibility, but consider `from_string` as deprecated, will probably
# be removed after V12
from_string = to_datetime
@staticmethod
def to_string(value):
"""Convert a :class:`datetime` or :class:`date` object to a string.
:param value: value to convert.
:type value: datetime or date
:return: a string representing ``value`` in the server's datetime format,
if ``value`` is of type :class:`date`,
the time portion will be midnight (00:00:00).
:rtype: str
"""
return value.strftime(DATETIME_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
return self.to_datetime(value)
def convert_to_export(self, value, record):
if not value:
return ''
value = self.convert_to_display_name(value, record)
return self.from_string(value)
def convert_to_display_name(self, value, record):
assert record, 'Record expected'
return Datetime.to_string(Datetime.context_timestamp(record, Datetime.from_string(value)))
# http://initd.org/psycopg/docs/usage.html#binary-adaptation
# Received data is returned as buffer (in Python 2) or memoryview (in Python 3).
_BINARY = memoryview
class Binary(Field):
"""Encapsulates a binary content (e.g. a file).
:param bool attachment: whether the field should be stored as `ir_attachment`
or in a column of the model's table (default: ``True``).
"""
type = 'binary'
prefetch = False # not prefetched by default
_depends_context = ('bin_size',) # depends on context (content or size)
attachment = True # whether value is stored in attachment
@property
def column_type(self):
return None if self.attachment else ('bytea', 'bytea')
def _get_attrs(self, model, name):
attrs = super(Binary, self)._get_attrs(model, name)
if not attrs.get('store', True):
attrs['attachment'] = False
return attrs
_description_attachment = property(attrgetter('attachment'))
def convert_to_column(self, value, record, values=None, validate=True):
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast here.
# This str() coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
if not value:
return None
# Detect if the binary content is an SVG for restricting its upload
# only to system users.
magic_bytes = {
b'P', # first 6 bits of '<' (0x3C) b64 encoded
b'<', # plaintext XML tag opening
}
if isinstance(value, str):
value = value.encode()
if value[:1] in magic_bytes:
try:
decoded_value = base64.b64decode(value.translate(None, delete=b'\r\n'), validate=True)
except binascii.Error:
decoded_value = value
# Full mimetype detection
if (guess_mimetype(decoded_value).startswith('image/svg') and
not record.env.is_system()):
raise UserError(_("Only admins can upload SVG files."))
if isinstance(value, bytes):
return psycopg2.Binary(value)
try:
return psycopg2.Binary(str(value).encode('ascii'))
except UnicodeEncodeError:
raise UserError(_("ASCII characters are required for %s in %s") % (value, self.name))
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, _BINARY):
return bytes(value)
if isinstance(value, str):
# the cache must contain bytes or memoryview, but sometimes a string
# is given when assigning a binary field (test `TestFileSeparator`)
return value.encode()
if isinstance(value, int) and \
(record._context.get('bin_size') or
record._context.get('bin_size_' + self.name)):
# If the client requests only the size of the field, we return that
# instead of the content. Presumably a separate request will be done
# to read the actual content, if necessary.
value = human_size(value)
# human_size can return False (-> None) or a string (-> encoded)
return value.encode() if value else None
return None if value is False else value
def convert_to_record(self, value, record):
if isinstance(value, _BINARY):
return bytes(value)
return False if value is None else value
def compute_value(self, records):
bin_size_name = 'bin_size_' + self.name
if records.env.context.get('bin_size') or records.env.context.get(bin_size_name):
# always compute without bin_size
records_no_bin_size = records.with_context(**{'bin_size': False, bin_size_name: False})
super().compute_value(records_no_bin_size)
# manually update the bin_size cache
cache = records.env.cache
for record_no_bin_size, record in zip(records_no_bin_size, records):
try:
value = cache.get(record_no_bin_size, self)
try:
value = base64.b64decode(value)
except (TypeError, binascii.Error):
pass
try:
if isinstance(value, (bytes, _BINARY)):
value = human_size(len(value))
except (TypeError):
pass
cache_value = self.convert_to_cache(value, record)
cache.set(record, self, cache_value)
except CacheMiss:
pass
else:
super().compute_value(records)
def read(self, records):
# values are stored in attachments, retrieve them
assert self.attachment
domain = [
('res_model', '=', records._name),
('res_field', '=', self.name),
('res_id', 'in', records.ids),
]
# Note: the 'bin_size' flag is handled by the field 'datas' itself
data = {
att.res_id: att.datas
for att in records.env['ir.attachment'].sudo().search(domain)
}
cache = records.env.cache
for record in records:
cache.set(record, self, data.get(record.id, False))
def create(self, record_values):
assert self.attachment
if not record_values:
return
# create the attachments that store the values
env = record_values[0][0].env
with env.norecompute():
env['ir.attachment'].sudo().with_context(
binary_field_real_user=env.user,
).create([{
'name': self.name,
'res_model': self.model_name,
'res_field': self.name,
'res_id': record.id,
'type': 'binary',
'datas': value,
}
for record, value in record_values
if value
])
def write(self, records, value):
if not self.attachment:
return super().write(records, value)
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# update the cache, and discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
if self.store:
# determine records that are known to be not null
not_null = cache.get_records_different_from(records, self, None)
cache.update(records, self, [cache_value] * len(records))
# retrieve the attachments that store the values, and adapt them
if self.store and any(records._ids):
real_records = records.filtered('id')
atts = records.env['ir.attachment'].sudo()
if not_null:
atts = atts.search([
('res_model', '=', self.model_name),
('res_field', '=', self.name),
('res_id', 'in', real_records.ids),
])
if value:
# update the existing attachments
atts.write({'datas': value})
atts_records = records.browse(atts.mapped('res_id'))
# create the missing attachments
missing = (real_records - atts_records)
if missing:
atts.create([{
'name': self.name,
'res_model': record._name,
'res_field': self.name,
'res_id': record.id,
'type': 'binary',
'datas': value,
}
for record in missing
])
else:
atts.unlink()
return records
class Image(Binary):
"""Encapsulates an image, extending :class:`Binary`.
If image size is greater than the ``max_width``/``max_height`` limit of pixels, the image will be
resized to the limit by keeping aspect ratio.
:param int max_width: the maximum width of the image (default: ``0``, no limit)
:param int max_height: the maximum height of the image (default: ``0``, no limit)
:param bool verify_resolution: whether the image resolution should be verified
to ensure it doesn't go over the maximum image resolution (default: ``True``).
See :class:`odoo.tools.image.ImageProcess` for maximum image resolution (default: ``45e6``).
.. note::
If no ``max_width``/``max_height`` is specified (or is set to 0) and ``verify_resolution`` is False,
the field content won't be verified at all and a :class:`Binary` field should be used.
"""
max_width = 0
max_height = 0
verify_resolution = True
def create(self, record_values):
new_record_values = []
for record, value in record_values:
# strange behavior when setting related image field, when `self`
# does not resize the same way as its related field
new_value = self._image_process(value)
new_record_values.append((record, new_value))
cache_value = self.convert_to_cache(value if self.related else new_value, record)
record.env.cache.update(record, self, [cache_value] * len(record))
super(Image, self).create(new_record_values)
def write(self, records, value):
try:
new_value = self._image_process(value)
except UserError:
if not any(records._ids):
# Some crap is assigned to a new record. This can happen in an
# onchange, where the client sends the "bin size" value of the
# field instead of its full value (this saves bandwidth). In
# this case, we simply don't assign the field: its value will be
# taken from the records' origin.
return
raise
super(Image, self).write(records, new_value)
cache_value = self.convert_to_cache(value if self.related else new_value, records)
records.env.cache.update(records, self, [cache_value] * len(records))
def _image_process(self, value):
return image_process(value,
size=(self.max_width, self.max_height),
verify_resolution=self.verify_resolution,
)
def _process_related(self, value):
"""Override to resize the related value before saving it on self."""
try:
return self._image_process(super()._process_related(value))
except UserError:
# Avoid the following `write` to fail if the related image was saved
# invalid, which can happen for pre-existing databases.
return False
class Selection(Field):
""" Encapsulates an exclusive choice between different values.
:param selection: specifies the possible values for this field.
It is given as either a list of pairs ``(value, label)``, or a model
method, or a method name.
:type selection: list(tuple(str,str)) or callable or str
:param selection_add: provides an extension of the selection in the case
of an overridden field. It is a list of pairs ``(value, label)`` or
singletons ``(value,)``, where singleton values must appear in the
overridden selection. The new values are inserted in an order that is
consistent with the overridden selection and this list::
selection = [('a', 'A'), ('b', 'B')]
selection_add = [('c', 'C'), ('b',)]
> result = [('a', 'A'), ('c', 'C'), ('b', 'B')]
:type selection_add: list(tuple(str,str))
:param ondelete: provides a fallback mechanism for any overridden
field with a selection_add. It is a dict that maps every option
from the selection_add to a fallback action.
This fallback action will be applied to all records whose
selection_add option maps to it.
The actions can be any of the following:
- 'set null' -- the default, all records with this option
will have their selection value set to False.
- 'cascade' -- all records with this option will be
deleted along with the option itself.
- 'set default' -- all records with this option will be
set to the default of the field definition
- <callable> -- a callable whose first and only argument will be
the set of records containing the specified Selection option,
for custom processing
The attribute ``selection`` is mandatory except in the case of
``related`` or extended fields.
"""
type = 'selection'
column_type = ('varchar', pg_varchar())
selection = None # [(value, string), ...], function or method name
validate = True # whether validating upon write
ondelete = None # {value: policy} (what to do when value is deleted)
def __init__(self, selection=Default, string=Default, **kwargs):
super(Selection, self).__init__(selection=selection, string=string, **kwargs)
def _setup_regular_base(self, model):
super(Selection, self)._setup_regular_base(model)
assert self.selection is not None, "Field %s without selection" % self
if isinstance(self.selection, list):
assert all(isinstance(v, str) for v, _ in self.selection), \
"Field %s with non-str value in selection" % self
def _setup_related_full(self, model):
super(Selection, self)._setup_related_full(model)
# selection must be computed on related field
field = self.related_field
self.selection = lambda model: field._description_selection(model.env)
def _get_attrs(self, model, name):
attrs = super(Selection, self)._get_attrs(model, name)
# arguments 'selection' and 'selection_add' are processed below
attrs.pop('selection_add', None)
return attrs
def _setup_attrs(self, model, name):
super(Selection, self)._setup_attrs(model, name)
# determine selection (applying 'selection_add' extensions)
values = None
labels = {}
for field in reversed(resolve_mro(model, name, self._can_setup_from)):
# We cannot use field.selection or field.selection_add here
# because those attributes are overridden by ``_setup_attrs``.
if 'selection' in field.args:
if self.related:
_logger.warning("%s: selection attribute will be ignored as the field is related", self)
selection = field.args['selection']
if isinstance(selection, list):
if values is not None and values != [kv[0] for kv in selection]:
_logger.warning("%s: selection=%r overrides existing selection; use selection_add instead", self, selection)
values = [kv[0] for kv in selection]
labels = dict(selection)
self.ondelete = {}
else:
values = None
labels = {}
self.selection = selection
self.ondelete = None
if 'selection_add' in field.args:
if self.related:
_logger.warning("%s: selection_add attribute will be ignored as the field is related", self)
selection_add = field.args['selection_add']
assert isinstance(selection_add, list), \
"%s: selection_add=%r must be a list" % (self, selection_add)
assert values is not None, \
"%s: selection_add=%r on non-list selection %r" % (self, selection_add, self.selection)
ondelete = field.args.get('ondelete') or {}
new_values = [kv[0] for kv in selection_add if kv[0] not in values]
for key in new_values:
ondelete.setdefault(key, 'set null')
if self.required and new_values and 'set null' in ondelete.values():
raise ValueError(
"%r: required selection fields must define an ondelete policy that "
"implements the proper cleanup of the corresponding records upon "
"module uninstallation. Please use one or more of the following "
"policies: 'set default' (if the field has a default defined), 'cascade', "
"or a single-argument callable where the argument is the recordset "
"containing the specified option." % self
)
# check ondelete values
for key, val in ondelete.items():
if callable(val) or val in ('set null', 'cascade'):
continue
if val == 'set default':
assert self.default is not None, (
"%r: ondelete policy of type 'set default' is invalid for this field "
"as it does not define a default! Either define one in the base "
"field, or change the chosen ondelete policy" % self
)
continue
raise ValueError(
"%r: ondelete policy %r for selection value %r is not a valid ondelete "
"policy, please choose one of 'set null', 'set default', 'cascade' or "
"a callable" % (self, val, key)
)
values = merge_sequences(values, [kv[0] for kv in selection_add])
labels.update(kv for kv in selection_add if len(kv) == 2)
self.ondelete.update(ondelete)
if values is not None:
self.selection = [(value, labels[value]) for value in values]
def _selection_modules(self, model):
""" Return a mapping from selection values to modules defining each value. """
if not isinstance(self.selection, list):
return {}
value_modules = defaultdict(set)
for field in reversed(resolve_mro(model, self.name, self._can_setup_from)):
module = field.args.get('_module')
if not module:
continue
if 'selection' in field.args:
value_modules.clear()
if isinstance(field.args['selection'], list):
for value, label in field.args['selection']:
value_modules[value].add(module)
if 'selection_add' in field.args:
for value_label in field.args['selection_add']:
if len(value_label) > 1:
value_modules[value_label[0]].add(module)
return value_modules
def _description_selection(self, env):
""" return the selection list (pairs (value, label)); labels are
translated according to context language
"""
selection = self.selection
if isinstance(selection, str):
return getattr(env[self.model_name], selection)()
if callable(selection):
return selection(env[self.model_name])
# translate selection labels
if env.lang:
return env['ir.translation'].get_field_selection(self.model_name, self.name)
else:
return selection
def get_values(self, env):
"""Return a list of the possible values."""
selection = self.selection
if isinstance(selection, str):
selection = getattr(env[self.model_name], selection)()
elif callable(selection):
selection = selection(env[self.model_name])
return [value for value, _ in selection]
def convert_to_column(self, value, record, values=None, validate=True):
if validate and self.validate:
value = self.convert_to_cache(value, record)
return super(Selection, self).convert_to_column(value, record, values, validate)
def convert_to_cache(self, value, record, validate=True):
if not validate:
return value or None
if value and self.column_type[0] == 'int4':
value = int(value)
if value in self.get_values(record.env):
return value
elif not value:
return None
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_export(self, value, record):
if not isinstance(self.selection, list):
# FIXME: this reproduces an existing buggy behavior!
return value if value else ''
for item in self._description_selection(record.env):
if item[0] == value:
return item[1]
return ''
class Reference(Selection):
""" Pseudo-relational field (no FK in database).
The field value is stored as a :class:`string <str>` following the pattern
``"res_model.res_id"`` in database.
"""
type = 'reference'
@property
def column_type(self):
return ('varchar', pg_varchar())
def convert_to_column(self, value, record, values=None, validate=True):
return Field.convert_to_column(self, value, record, values, validate)
def convert_to_cache(self, value, record, validate=True):
# cache format: str ("model,id") or None
if isinstance(value, BaseModel):
if not validate or (value._name in self.get_values(record.env) and len(value) <= 1):
return "%s,%s" % (value._name, value.id) if value else None
elif isinstance(value, str):
res_model, res_id = value.split(',')
if not validate or res_model in self.get_values(record.env):
if record.env[res_model].browse(int(res_id)).exists():
return value
else:
return None
elif not value:
return None
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_record(self, value, record):
if value:
res_model, res_id = value.split(',')
return record.env[res_model].browse(int(res_id))
return None
def convert_to_read(self, value, record, use_name_get=True):
return "%s,%s" % (value._name, value.id) if value else False
def convert_to_export(self, value, record):
return value.display_name if value else ''
def convert_to_display_name(self, value, record):
return ustr(value and value.display_name)
class _Relational(Field):
""" Abstract class for relational fields. """
relational = True
domain = [] # domain for searching values
context = {} # context for searching values
check_company = False
def __get__(self, records, owner):
# base case: do the regular access
if records is None or len(records._ids) <= 1:
return super().__get__(records, owner)
# multirecord case: use mapped
return self.mapped(records)
def _setup_regular_base(self, model):
super(_Relational, self)._setup_regular_base(model)
if self.comodel_name not in model.pool:
_logger.warning("Field %s with unknown comodel_name %r", self, self.comodel_name)
self.comodel_name = '_unknown'
def get_domain_list(self, model):
""" Return a list domain from the domain parameter. """
domain = self.domain
if callable(domain):
domain = domain(model)
return domain if isinstance(domain, list) else []
@property
def _related_domain(self):
if callable(self.domain):
# will be called with another model than self's
return lambda recs: self.domain(recs.env[self.model_name])
else:
# maybe not correct if domain is a string...
return self.domain
_related_context = property(attrgetter('context'))
_description_relation = property(attrgetter('comodel_name'))
_description_context = property(attrgetter('context'))
def _description_domain(self, env):
if self.check_company and not self.domain:
if self.company_dependent:
if self.comodel_name == "res.users":
# user needs access to current company (self.env.company)
return "[('company_ids', 'in', allowed_company_ids[0])]"
else:
return "[('company_id', 'in', [allowed_company_ids[0], False])]"
else:
# when using check_company=True on a field on 'res.company', the
# company_id comes from the id of the current record
cid = "id" if self.model_name == "res.company" else "company_id"
if self.comodel_name == "res.users":
# User allowed company ids = user.company_ids
return f"['|', (not {cid}, '=', True), ('company_ids', 'in', [{cid}])]"
else:
return f"[('company_id', 'in', [{cid}, False])]"
return self.domain(env[self.model_name]) if callable(self.domain) else self.domain
def null(self, record):
return record.env[self.comodel_name]
class Many2one(_Relational):
""" The value of such a field is a recordset of size 0 (no
record) or 1 (a single record).
:param str comodel_name: name of the target model
``Mandatory`` except for related or extended fields.
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param dict context: an optional context to use on the client side when
handling that field
:param str ondelete: what to do when the referred record is deleted;
possible values are: ``'set null'``, ``'restrict'``, ``'cascade'``
:param bool auto_join: whether JOINs are generated upon search through that
field (default: ``False``)
:param bool delegate: set it to ``True`` to make fields of the target model
accessible from the current model (corresponds to ``_inherits``)
:param bool check_company: Mark the field to be verified in
:meth:`~odoo.models.Model._check_company`. Add a default company
domain depending on the field attributes.
"""
type = 'many2one'
column_type = ('int4', 'int4')
ondelete = None # what to do when value is deleted
auto_join = False # whether joins are generated upon search
delegate = False # whether self implements delegation
def __init__(self, comodel_name=Default, string=Default, **kwargs):
super(Many2one, self).__init__(comodel_name=comodel_name, string=string, **kwargs)
def _setup_attrs(self, model, name):
super(Many2one, self)._setup_attrs(model, name)
# determine self.delegate
if not self.delegate:
self.delegate = name in model._inherits.values()
def _setup_regular_base(self, model):
super()._setup_regular_base(model)
# 3 cases:
# 1) The ondelete attribute is not defined, we assign it a sensible default
# 2) The ondelete attribute is defined and its definition makes sense
# 3) The ondelete attribute is explicitly defined as 'set null' for a required m2o,
# this is considered a programming error.
if not self.ondelete:
comodel = model.env[self.comodel_name]
if model.is_transient() and not comodel.is_transient():
# Many2one relations from TransientModel Model are annoying because
# they can block deletion due to foreign keys. So unless stated
# otherwise, we default them to ondelete='cascade'.
self.ondelete = 'cascade' if self.required else 'set null'
else:
self.ondelete = 'restrict' if self.required else 'set null'
if self.ondelete == 'set null' and self.required:
raise ValueError(
"The m2o field %s of model %s is required but declares its ondelete policy "
"as being 'set null'. Only 'restrict' and 'cascade' make sense."
% (self.name, model._name)
)
if self.ondelete == 'restrict' and self.comodel_name in IR_MODELS:
raise ValueError(
f"Field {self.name} of model {model._name} is defined as ondelete='restrict' "
f"while having {self.comodel_name} as comodel, the 'restrict' mode is not "
f"supported for this type of field as comodel."
)
def update_db(self, model, columns):
comodel = model.env[self.comodel_name]
if not model.is_transient() and comodel.is_transient():
raise ValueError('Many2one %s from Model to TransientModel is forbidden' % self)
return super(Many2one, self).update_db(model, columns)
def update_db_column(self, model, column):
super(Many2one, self).update_db_column(model, column)
model.pool.post_init(self.update_db_foreign_key, model, column)
def update_db_foreign_key(self, model, column):
comodel = model.env[self.comodel_name]
# foreign keys do not work on views, and users can define custom models on sql views.
if not model._is_an_ordinary_table() or not comodel._is_an_ordinary_table():
return
# ir_actions is inherited, so foreign key doesn't work on it
if not comodel._auto or comodel._table == 'ir_actions':
return
# create/update the foreign key, and reflect it in 'ir.model.constraint'
model.pool.add_foreign_key(
model._table, self.name, comodel._table, 'id', self.ondelete or 'set null',
model, self._module
)
def _update(self, records, value):
""" Update the cached value of ``self`` for ``records`` with ``value``. """
cache = records.env.cache
for record in records:
cache.set(record, self, self.convert_to_cache(value, record, validate=False))
def convert_to_column(self, value, record, values=None, validate=True):
return value or None
def convert_to_cache(self, value, record, validate=True):
# cache format: id or None
if type(value) in IdType:
id_ = value
elif isinstance(value, BaseModel):
if validate and (value._name != self.comodel_name or len(value) > 1):
raise ValueError("Wrong value for %s: %r" % (self, value))
id_ = value._ids[0] if value._ids else None
elif isinstance(value, tuple):
# value is either a pair (id, name), or a tuple of ids
id_ = value[0] if value else None
elif isinstance(value, dict):
# return a new record (with the given field 'id' as origin)
comodel = record.env[self.comodel_name]
origin = comodel.browse(value.get('id'))
id_ = comodel.new(value, origin=origin).id
else:
id_ = None
if self.delegate and record and not any(record._ids):
# if all records are new, then so is the parent
id_ = id_ and NewId(id_)
return id_
def convert_to_record(self, value, record):
# use registry to avoid creating a recordset for the model
ids = () if value is None else (value,)
prefetch_ids = IterableGenerator(prefetch_many2one_ids, record, self)
return record.pool[self.comodel_name]._browse(record.env, ids, prefetch_ids)
def convert_to_record_multi(self, values, records):
# return the ids as a recordset without duplicates
prefetch_ids = IterableGenerator(prefetch_many2one_ids, records, self)
ids = tuple(unique(id_ for id_ in values if id_ is not None))
return records.pool[self.comodel_name]._browse(records.env, ids, prefetch_ids)
def convert_to_read(self, value, record, use_name_get=True):
if use_name_get and value:
# evaluate name_get() as superuser, because the visibility of a
# many2one field value (id and name) depends on the current record's
# access rights, and not the value's access rights.
try:
# performance: value.sudo() prefetches the same records as value
return (value.id, value.sudo().display_name)
except MissingError:
# Should not happen, unless the foreign key is missing.
return False
else:
return value.id
def convert_to_write(self, value, record):
if type(value) in IdType:
return value
if not value:
return False
if isinstance(value, BaseModel) and value._name == self.comodel_name:
return value.id
if isinstance(value, tuple):
# value is either a pair (id, name), or a tuple of ids
return value[0] if value else False
if isinstance(value, dict):
return record.env[self.comodel_name].new(value).id
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_export(self, value, record):
return value.display_name if value else ''
def convert_to_display_name(self, value, record):
return ustr(value.display_name)
def convert_to_onchange(self, value, record, names):
if not value.id:
return False
return super(Many2one, self).convert_to_onchange(value, record, names)
def write(self, records, value):
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
# remove records from the cache of one2many fields of old corecords
self._remove_inverses(records, cache_value)
# update the cache of self
cache.update(records, self, [cache_value] * len(records))
# update towrite
if self.store:
towrite = records.env.all.towrite[self.model_name]
for record in records.filtered('id'):
# cache_value is already in database format
towrite[record.id][self.name] = cache_value
# update the cache of one2many fields of new corecord
self._update_inverses(records, cache_value)
return records
def _remove_inverses(self, records, value):
""" Remove `records` from the cached values of the inverse fields of `self`. """
cache = records.env.cache
record_ids = set(records._ids)
# align(id) returns a NewId if records are new, a real id otherwise
align = (lambda id_: id_) if all(record_ids) else (lambda id_: id_ and NewId(id_))
for invf in records._field_inverses[self]:
corecords = records.env[self.comodel_name].browse(
align(id_) for id_ in cache.get_values(records, self)
)
for corecord in corecords:
ids0 = cache.get(corecord, invf, None)
if ids0 is not None:
ids1 = tuple(id_ for id_ in ids0 if id_ not in record_ids)
cache.set(corecord, invf, ids1)
def _update_inverses(self, records, value):
""" Add `records` to the cached values of the inverse fields of `self`. """
if value is None:
return
cache = records.env.cache
corecord = self.convert_to_record(value, records)
for invf in records._field_inverses[self]:
valid_records = records.filtered_domain(invf.get_domain_list(corecord))
if not valid_records:
continue
ids0 = cache.get(corecord, invf, None)
# if the value for the corecord is not in cache, but this is a new
# record, assign it anyway, as you won't be able to fetch it from
# database (see `test_sale_order`)
if ids0 is not None or not corecord.id:
ids1 = tuple(unique((ids0 or ()) + valid_records._ids))
cache.set(corecord, invf, ids1)
class Many2oneReference(Integer):
""" Pseudo-relational field (no FK in database).
The field value is stored as an :class:`integer <int>` id in database.
Contrary to :class:`Reference` fields, the model has to be specified
in a :class:`Char` field, whose name has to be specified in the
`model_field` attribute for the current :class:`Many2oneReference` field.
:param str model_field: name of the :class:`Char` where the model name is stored.
"""
type = 'many2one_reference'
model_field = None
_related_model_field = property(attrgetter('model_field'))
def convert_to_cache(self, value, record, validate=True):
# cache format: id or None
if isinstance(value, BaseModel):
value = value._ids[0] if value._ids else None
return super().convert_to_cache(value, record, validate)
def _remove_inverses(self, records, value):
# TODO: unused
# remove records from the cache of one2many fields of old corecords
cache = records.env.cache
record_ids = set(records._ids)
model_ids = self._record_ids_per_res_model(records)
for invf in records._field_inverses[self]:
records = records.browse(model_ids[invf.model_name])
if not records:
continue
corecords = records.env[invf.model_name].browse(
id_ for id_ in cache.get_values(records, self)
)
for corecord in corecords:
ids0 = cache.get(corecord, invf, None)
if ids0 is not None:
ids1 = tuple(id_ for id_ in ids0 if id_ not in record_ids)
cache.set(corecord, invf, ids1)
def _update_inverses(self, records, value):
""" Add `records` to the cached values of the inverse fields of `self`. """
if not value:
return
cache = records.env.cache
model_ids = self._record_ids_per_res_model(records)
for invf in records._field_inverses[self]:
records = records.browse(model_ids[invf.model_name])
if not records:
continue
corecord = records.env[invf.model_name].browse(value)
records = records.filtered_domain(invf.get_domain_list(corecord))
if not records:
continue
ids0 = cache.get(corecord, invf, None)
# if the value for the corecord is not in cache, but this is a new
# record, assign it anyway, as you won't be able to fetch it from
# database (see `test_sale_order`)
if ids0 is not None or not corecord.id:
ids1 = tuple(unique((ids0 or ()) + records._ids))
cache.set(corecord, invf, ids1)
def _record_ids_per_res_model(self, records):
model_ids = defaultdict(set)
for record in records:
model = record[self.model_field]
if not model and record._fields[self.model_field].compute:
# fallback when the model field is computed :-/
record._fields[self.model_field].compute_value(record)
model = record[self.model_field]
if not model:
continue
model_ids[model].add(record.id)
return model_ids
class _RelationalMulti(_Relational):
""" Abstract class for relational fields *2many. """
# Important: the cache contains the ids of all the records in the relation,
# including inactive records. Inactive records are filtered out by
# convert_to_record(), depending on the context.
def _update(self, records, value):
""" Update the cached value of ``self`` for ``records`` with ``value``,
and return whether everything is in cache.
"""
if not isinstance(records, BaseModel):
# the inverse of self is a non-relational field; `value` is a
# corecord that refers to `records` by an integer field
model = value.env[self.model_name]
domain = self.domain(model) if callable(self.domain) else self.domain
if not value.filtered_domain(domain):
return
records = model.browse(records)
result = True
if value:
cache = records.env.cache
for record in records:
if cache.contains(record, self):
val = self.convert_to_cache(record[self.name] | value, record, validate=False)
cache.set(record, self, val)
else:
result = False
records.modified([self.name])
return result
def convert_to_cache(self, value, record, validate=True):
# cache format: tuple(ids)
if isinstance(value, BaseModel):
if validate and value._name != self.comodel_name:
raise ValueError("Wrong value for %s: %s" % (self, value))
ids = value._ids
if record and not record.id:
# x2many field value of new record is new records
ids = tuple(it and NewId(it) for it in ids)
return ids
elif isinstance(value, (list, tuple)):
# value is a list/tuple of commands, dicts or record ids
comodel = record.env[self.comodel_name]
# if record is new, the field's value is new records
if record and not record.id:
browse = lambda it: comodel.browse([it and NewId(it)])
else:
browse = comodel.browse
# determine the value ids
ids = OrderedSet(record[self.name]._ids if validate else ())
# modify ids with the commands
for command in value:
if isinstance(command, (tuple, list)):
if command[0] == 0:
ids.add(comodel.new(command[2], ref=command[1]).id)
elif command[0] == 1:
line = browse(command[1])
if validate:
line.update(command[2])
else:
line._update_cache(command[2], validate=False)
ids.add(line.id)
elif command[0] in (2, 3):
ids.discard(browse(command[1]).id)
elif command[0] == 4:
ids.add(browse(command[1]).id)
elif command[0] == 5:
ids.clear()
elif command[0] == 6:
ids = OrderedSet(browse(it).id for it in command[2])
elif isinstance(command, dict):
ids.add(comodel.new(command).id)
else:
ids.add(browse(command).id)
# return result as a tuple
return tuple(ids)
elif not value:
return ()
raise ValueError("Wrong value for %s: %s" % (self, value))
def convert_to_record(self, value, record):
# use registry to avoid creating a recordset for the model
prefetch_ids = IterableGenerator(prefetch_x2many_ids, record, self)
Comodel = record.pool[self.comodel_name]
corecords = Comodel._browse(record.env, value, prefetch_ids)
if (
Comodel._active_name
and self.context.get('active_test', record.env.context.get('active_test', True))
):
corecords = corecords.filtered(Comodel._active_name).with_prefetch(prefetch_ids)
return corecords
def convert_to_record_multi(self, values, records):
# return the list of ids as a recordset without duplicates
prefetch_ids = IterableGenerator(prefetch_x2many_ids, records, self)
Comodel = records.pool[self.comodel_name]
ids = tuple(unique(id_ for ids in values for id_ in ids))
corecords = Comodel._browse(records.env, ids, prefetch_ids)
if (
Comodel._active_name
and self.context.get('active_test', records.env.context.get('active_test', True))
):
corecords = corecords.filtered(Comodel._active_name).with_prefetch(prefetch_ids)
return corecords
def convert_to_read(self, value, record, use_name_get=True):
return value.ids
def convert_to_write(self, value, record):
if isinstance(value, tuple):
# a tuple of ids, this is the cache format
value = record.env[self.comodel_name].browse(value)
if isinstance(value, BaseModel) and value._name == self.comodel_name:
def get_origin(val):
return val._origin if isinstance(val, BaseModel) else val
# make result with new and existing records
inv_names = {field.name for field in record._field_inverses[self]}
result = [(6, 0, [])]
for record in value:
origin = record._origin
if not origin:
values = record._convert_to_write({
name: record[name]
for name in record._cache
if name not in inv_names
})
result.append((0, 0, values))
else:
result[0][2].append(origin.id)
if record != origin:
values = record._convert_to_write({
name: record[name]
for name in record._cache
if name not in inv_names and get_origin(record[name]) != origin[name]
})
if values:
result.append((1, origin.id, values))
return result
if value is False or value is None:
return [(5,)]
if isinstance(value, list):
return value
raise ValueError("Wrong value for %s: %s" % (self, value))
def convert_to_export(self, value, record):
return ','.join(name for id, name in value.name_get()) if value else ''
def convert_to_display_name(self, value, record):
raise NotImplementedError()
def _setup_regular_full(self, model):
super(_RelationalMulti, self)._setup_regular_full(model)
if not self.compute and isinstance(self.domain, list):
self.depends = tuple(unique(itertools.chain(self.depends, (
self.name + '.' + arg[0]
for arg in self.domain
if isinstance(arg, (tuple, list)) and isinstance(arg[0], str)
))))
def create(self, record_values):
""" Write the value of ``self`` on the given records, which have just
been created.
:param record_values: a list of pairs ``(record, value)``, where
``value`` is in the format of method :meth:`BaseModel.write`
"""
self.write_batch(record_values, True)
def write(self, records, value):
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
return self.write_batch([(records, value)])
def write_batch(self, records_commands_list, create=False):
if not records_commands_list:
return False
for idx, (recs, value) in enumerate(records_commands_list):
if isinstance(value, tuple):
value = [(6, 0, value)]
elif isinstance(value, BaseModel) and value._name == self.comodel_name:
value = [(6, 0, value._ids)]
elif value is False or value is None:
value = [(5,)]
elif isinstance(value, list) and value and not isinstance(value[0], (tuple, list)):
value = [(6, 0, tuple(value))]
if not isinstance(value, list):
raise ValueError("Wrong value for %s: %s" % (self, value))
records_commands_list[idx] = (recs, value)
record_ids = {rid for recs, cs in records_commands_list for rid in recs._ids}
if all(record_ids):
return self.write_real(records_commands_list, create)
else:
assert not any(record_ids)
return self.write_new(records_commands_list)
class One2many(_RelationalMulti):
"""One2many field; the value of such a field is the recordset of all the
records in ``comodel_name`` such that the field ``inverse_name`` is equal to
the current record.
:param str comodel_name: name of the target model
:param str inverse_name: name of the inverse ``Many2one`` field in
``comodel_name``
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param dict context: an optional context to use on the client side when
handling that field
:param bool auto_join: whether JOINs are generated upon search through that
field (default: ``False``)
:param int limit: optional limit to use upon read
The attributes ``comodel_name`` and ``inverse_name`` are mandatory except in
the case of related fields or field extensions.
"""
type = 'one2many'
inverse_name = None # name of the inverse field
auto_join = False # whether joins are generated upon search
limit = None # optional limit to use upon read
copy = False # o2m are not copied by default
def __init__(self, comodel_name=Default, inverse_name=Default, string=Default, **kwargs):
super(One2many, self).__init__(
comodel_name=comodel_name,
inverse_name=inverse_name,
string=string,
**kwargs
)
def _setup_regular_full(self, model):
super(One2many, self)._setup_regular_full(model)
if self.inverse_name:
# link self to its inverse field and vice-versa
comodel = model.env[self.comodel_name]
invf = comodel._fields[self.inverse_name]
if isinstance(invf, (Many2one, Many2oneReference)):
# setting one2many fields only invalidates many2one inverses;
# integer inverses (res_model/res_id pairs) are not supported
model._field_inverses.add(self, invf)
comodel._field_inverses.add(invf, self)
_description_relation_field = property(attrgetter('inverse_name'))
def update_db(self, model, columns):
if self.comodel_name in model.env:
comodel = model.env[self.comodel_name]
if self.inverse_name not in comodel._fields:
raise UserError(_("No inverse field %r found for %r") % (self.inverse_name, self.comodel_name))
def get_domain_list(self, records):
comodel = records.env.registry[self.comodel_name]
inverse_field = comodel._fields[self.inverse_name]
domain = super(One2many, self).get_domain_list(records)
if inverse_field.type == 'many2one_reference':
domain = domain + [(inverse_field.model_field, '=', records._name)]
return domain
def __get__(self, records, owner):
if records is not None and self.inverse_name is not None:
# force the computation of the inverse field to ensure that the
# cache value of self is consistent
inverse_field = records.pool[self.comodel_name]._fields[self.inverse_name]
if inverse_field.compute:
records.env[self.comodel_name].recompute([self.inverse_name])
return super().__get__(records, owner)
def read(self, records):
# retrieve the lines in the comodel
context = {'active_test': False}
context.update(self.context)
comodel = records.env[self.comodel_name].with_context(**context)
inverse = self.inverse_name
inverse_field = comodel._fields[inverse]
get_id = (lambda rec: rec.id) if inverse_field.type == 'many2one' else int
domain = self.get_domain_list(records) + [(inverse, 'in', records.ids)]
lines = comodel.search(domain, limit=self.limit)
# group lines by inverse field (without prefetching other fields)
group = defaultdict(list)
for line in lines.with_context(prefetch_fields=False):
# line[inverse] may be a record or an integer
group[get_id(line[inverse])].append(line.id)
# store result in cache
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(group[record.id]))
def write_real(self, records_commands_list, create=False):
""" Update real records. """
# records_commands_list = [(records, commands), ...]
if not records_commands_list:
return
model = records_commands_list[0][0].browse()
comodel = model.env[self.comodel_name].with_context(**self.context)
ids = {rid for recs, cs in records_commands_list for rid in recs.ids}
records = records_commands_list[0][0].browse(ids)
if self.store:
inverse = self.inverse_name
to_create = [] # line vals to create
to_delete = [] # line ids to delete
to_inverse = {}
allow_full_delete = not create
def unlink(lines):
if getattr(comodel._fields[inverse], 'ondelete', False) == 'cascade':
to_delete.extend(lines._ids)
else:
lines[inverse] = False
def flush():
if to_delete:
# unlink() will remove the lines from the cache
comodel.browse(to_delete).unlink()
to_delete.clear()
if to_create:
# create() will add the new lines to the cache of records
comodel.create(to_create)
to_create.clear()
if to_inverse:
for record, inverse_ids in to_inverse.items():
lines = comodel.browse(inverse_ids)
lines = lines.filtered(lambda line: int(line[inverse]) != record.id)
lines[inverse] = record
for recs, commands in records_commands_list:
for command in (commands or ()):
if command[0] == 0:
for record in recs:
to_create.append(dict(command[2], **{inverse: record.id}))
allow_full_delete = False
elif command[0] == 1:
comodel.browse(command[1]).write(command[2])
elif command[0] == 2:
to_delete.append(command[1])
elif command[0] == 3:
unlink(comodel.browse(command[1]))
elif command[0] == 4:
to_inverse.setdefault(recs[-1], set()).add(command[1])
allow_full_delete = False
elif command[0] in (5, 6) :
# do not try to delete anything in creation mode if nothing has been created before
line_ids = command[2] if command[0] == 6 else []
if not allow_full_delete and not line_ids:
continue
flush()
# assign the given lines to the last record only
lines = comodel.browse(line_ids)
domain = self.get_domain_list(model) + \
[(inverse, 'in', recs.ids), ('id', 'not in', lines.ids)]
unlink(comodel.search(domain))
lines[inverse] = recs[-1]
flush()
else:
cache = records.env.cache
def link(record, lines):
ids = record[self.name]._ids
cache.set(record, self, tuple(unique(ids + lines._ids)))
def unlink(lines):
for record in records:
cache.set(record, self, (record[self.name] - lines)._ids)
for recs, commands in records_commands_list:
for command in (commands or ()):
if command[0] == 0:
for record in recs:
link(record, comodel.new(command[2], ref=command[1]))
elif command[0] == 1:
comodel.browse(command[1]).write(command[2])
elif command[0] == 2:
unlink(comodel.browse(command[1]))
elif command[0] == 3:
unlink(comodel.browse(command[1]))
elif command[0] == 4:
link(recs[-1], comodel.browse(command[1]))
elif command[0] in (5, 6):
# assign the given lines to the last record only
cache.update(recs, self, [()] * len(recs))
lines = comodel.browse(command[2] if command[0] == 6 else [])
cache.set(recs[-1], self, lines._ids)
return records
def write_new(self, records_commands_list):
if not records_commands_list:
return
model = records_commands_list[0][0].browse()
cache = model.env.cache
comodel = model.env[self.comodel_name].with_context(**self.context)
ids = {record.id for records, _ in records_commands_list for record in records}
records = model.browse(ids)
def browse(ids):
return comodel.browse([id_ and NewId(id_) for id_ in ids])
# make sure self is in cache
records[self.name]
if self.store:
inverse = self.inverse_name
# make sure self's inverse is in cache
inverse_field = comodel._fields[inverse]
for record in records:
cache.update(record[self.name], inverse_field, itertools.repeat(record.id))
for recs, commands in records_commands_list:
for command in commands:
if command[0] == 0:
for record in recs:
line = comodel.new(command[2], ref=command[1])
line[inverse] = record
elif command[0] == 1:
browse([command[1]]).update(command[2])
elif command[0] == 2:
browse([command[1]])[inverse] = False
elif command[0] == 3:
browse([command[1]])[inverse] = False
elif command[0] == 4:
browse([command[1]])[inverse] = recs[-1]
elif command[0] == 5:
cache.update(recs, self, itertools.repeat(()))
elif command[0] == 6:
# assign the given lines to the last record only
cache.update(recs, self, itertools.repeat(()))
last, lines = recs[-1], browse(command[2])
cache.set(last, self, lines._ids)
cache.update(lines, inverse_field, itertools.repeat(last.id))
else:
def link(record, lines):
ids = record[self.name]._ids
cache.set(record, self, tuple(unique(ids + lines._ids)))
def unlink(lines):
for record in records:
cache.set(record, self, (record[self.name] - lines)._ids)
for recs, commands in records_commands_list:
for command in commands:
if command[0] == 0:
for record in recs:
link(record, comodel.new(command[2], ref=command[1]))
elif command[0] == 1:
browse([command[1]]).update(command[2])
elif command[0] == 2:
unlink(browse([command[1]]))
elif command[0] == 3:
unlink(browse([command[1]]))
elif command[0] == 4:
link(recs[-1], browse([command[1]]))
elif command[0] in (5, 6):
# assign the given lines to the last record only
cache.update(recs, self, [()] * len(recs))
lines = comodel.browse(command[2] if command[0] == 6 else [])
cache.set(recs[-1], self, lines._ids)
return records
class Many2many(_RelationalMulti):
""" Many2many field; the value of such a field is the recordset.
:param comodel_name: name of the target model (string)
mandatory except in the case of related or extended fields
:param str relation: optional name of the table that stores the relation in
the database
:param str column1: optional name of the column referring to "these" records
in the table ``relation``
:param str column2: optional name of the column referring to "those" records
in the table ``relation``
The attributes ``relation``, ``column1`` and ``column2`` are optional.
If not given, names are automatically generated from model names,
provided ``model_name`` and ``comodel_name`` are different!
Note that having several fields with implicit relation parameters on a
given model with the same comodel is not accepted by the ORM, since
those field would use the same table. The ORM prevents two many2many
fields to use the same relation parameters, except if
- both fields use the same model, comodel, and relation parameters are
explicit; or
- at least one field belongs to a model with ``_auto = False``.
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param dict context: an optional context to use on the client side when
handling that field
:param bool check_company: Mark the field to be verified in
:meth:`~odoo.models.Model._check_company`. Add a default company
domain depending on the field attributes.
:param int limit: optional limit to use upon read
"""
type = 'many2many'
_explicit = True # whether schema is explicitly given
relation = None # name of table
column1 = None # column of table referring to model
column2 = None # column of table referring to comodel
auto_join = False # whether joins are generated upon search
limit = None # optional limit to use upon read
ondelete = None # optional ondelete for the column2 fkey
def __init__(self, comodel_name=Default, relation=Default, column1=Default,
column2=Default, string=Default, **kwargs):
super(Many2many, self).__init__(
comodel_name=comodel_name,
relation=relation,
column1=column1,
column2=column2,
string=string,
**kwargs
)
def _setup_regular_base(self, model):
super(Many2many, self)._setup_regular_base(model)
# 3 cases:
# 1) The ondelete attribute is not defined, we assign it a sensible default
# 2) The ondelete attribute is defined and its definition makes sense
# 3) The ondelete attribute is explicitly defined as 'set null' for a m2m,
# this is considered a programming error.
self.ondelete = self.ondelete or 'cascade'
if self.ondelete == 'set null':
raise ValueError(
"The m2m field %s of model %s declares its ondelete policy "
"as being 'set null'. Only 'restrict' and 'cascade' make sense."
% (self.name, model._name)
)
if self.store:
if not (self.relation and self.column1 and self.column2):
self._explicit = False
# table name is based on the stable alphabetical order of tables
comodel = model.env[self.comodel_name]
if not self.relation:
tables = sorted([model._table, comodel._table])
assert tables[0] != tables[1], \
"%s: Implicit/canonical naming of many2many relationship " \
"table is not possible when source and destination models " \
"are the same" % self
self.relation = '%s_%s_rel' % tuple(tables)
if not self.column1:
self.column1 = '%s_id' % model._table
if not self.column2:
self.column2 = '%s_id' % comodel._table
# check validity of table name
check_pg_name(self.relation)
else:
self.relation = self.column1 = self.column2 = None
def _setup_regular_full(self, model):
super(Many2many, self)._setup_regular_full(model)
if self.relation:
m2m = model.pool._m2m
# check whether other fields use the same schema
fields = m2m[(self.relation, self.column1, self.column2)]
for field in fields:
if ( # same model: relation parameters must be explicit
self.model_name == field.model_name and
self.comodel_name == field.comodel_name and
self._explicit and field._explicit
) or ( # different models: one model must be _auto=False
self.model_name != field.model_name and
not (model._auto and model.env[field.model_name]._auto)
):
continue
msg = "Many2many fields %s and %s use the same table and columns"
raise TypeError(msg % (self, field))
fields.append(self)
# retrieve inverse fields, and link them in _field_inverses
for field in m2m[(self.relation, self.column2, self.column1)]:
model._field_inverses.add(self, field)
model.env[field.model_name]._field_inverses.add(field, self)
def update_db(self, model, columns):
cr = model._cr
# Do not reflect relations for custom fields, as they do not belong to a
# module. They are automatically removed when dropping the corresponding
# 'ir.model.field'.
if not self.manual:
model.pool.post_init(model.env['ir.model.relation']._reflect_relation,
model, self.relation, self._module)
comodel = model.env[self.comodel_name]
if not sql.table_exists(cr, self.relation):
query = """
CREATE TABLE "{rel}" ("{id1}" INTEGER NOT NULL,
"{id2}" INTEGER NOT NULL,
PRIMARY KEY("{id1}","{id2}"));
COMMENT ON TABLE "{rel}" IS %s;
CREATE INDEX ON "{rel}" ("{id2}","{id1}");
""".format(rel=self.relation, id1=self.column1, id2=self.column2)
cr.execute(query, ['RELATION BETWEEN %s AND %s' % (model._table, comodel._table)])
_schema.debug("Create table %r: m2m relation between %r and %r", self.relation, model._table, comodel._table)
model.pool.post_init(self.update_db_foreign_keys, model)
def update_db_foreign_keys(self, model):
""" Add the foreign keys corresponding to the field's relation table. """
comodel = model.env[self.comodel_name]
if model._is_an_ordinary_table():
model.pool.add_foreign_key(
self.relation, self.column1, model._table, 'id', 'cascade',
model, self._module, force=False,
)
if comodel._is_an_ordinary_table():
model.pool.add_foreign_key(
self.relation, self.column2, comodel._table, 'id', self.ondelete,
model, self._module,
)
def read(self, records):
context = {'active_test': False}
context.update(self.context)
comodel = records.env[self.comodel_name].with_context(**context)
domain = self.get_domain_list(records)
comodel._flush_search(domain)
wquery = comodel._where_calc(domain)
comodel._apply_ir_rules(wquery, 'read')
order_by = comodel._generate_order_by(None, wquery)
from_c, where_c, where_params = wquery.get_sql()
query = """ SELECT {rel}.{id1}, {rel}.{id2} FROM {rel}, {from_c}
WHERE {where_c} AND {rel}.{id1} IN %s AND {rel}.{id2} = {tbl}.id
{order_by} {limit} OFFSET {offset}
""".format(rel=self.relation, id1=self.column1, id2=self.column2,
tbl=comodel._table, from_c=from_c, where_c=where_c or '1=1',
limit=(' LIMIT %d' % self.limit) if self.limit else '',
offset=0, order_by=order_by)
where_params.append(tuple(records.ids))
# retrieve lines and group them by record
group = defaultdict(list)
records._cr.execute(query, where_params)
for row in records._cr.fetchall():
group[row[0]].append(row[1])
# store result in cache
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(group[record.id]))
def write_real(self, records_commands_list, create=False):
# records_commands_list = [(records, commands), ...]
if not records_commands_list:
return
comodel = records_commands_list[0][0].env[self.comodel_name].with_context(**self.context)
cr = records_commands_list[0][0].env.cr
# determine old and new relation {x: ys}
set = OrderedSet
ids = {rid for recs, cs in records_commands_list for rid in recs.ids}
records = records_commands_list[0][0].browse(ids)
if self.store:
# Using `record[self.name]` generates 2 SQL queries when the value
# is not in cache: one that actually checks access rules for
# records, and the other one fetching the actual data. We use
# `self.read` instead to shortcut the first query.
missing_ids = list(records.env.cache.get_missing_ids(records, self))
if missing_ids:
self.read(records.browse(missing_ids))
old_relation = {record.id: set(record[self.name]._ids) for record in records}
new_relation = {x: set(ys) for x, ys in old_relation.items()}
# determine new relation {x: ys}
new_relation = defaultdict(set)
for x, ys in old_relation.items():
new_relation[x] = set(ys)
# operations on new relation
def relation_add(xs, y):
for x in xs:
new_relation[x].add(y)
def relation_remove(xs, y):
for x in xs:
new_relation[x].discard(y)
def relation_set(xs, ys):
for x in xs:
new_relation[x] = set(ys)
def relation_delete(ys):
# the pairs (x, y) have been cascade-deleted from relation
for ys1 in old_relation.values():
ys1 -= ys
for ys1 in new_relation.values():
ys1 -= ys
for recs, commands in records_commands_list:
to_create = [] # line vals to create
to_delete = [] # line ids to delete
for command in (commands or ()):
if not isinstance(command, (list, tuple)) or not command:
continue
if command[0] == 0:
to_create.append((recs._ids, command[2]))
elif command[0] == 1:
comodel.browse(command[1]).write(command[2])
elif command[0] == 2:
to_delete.append(command[1])
elif command[0] == 3:
relation_remove(recs._ids, command[1])
elif command[0] == 4:
relation_add(recs._ids, command[1])
elif command[0] in (5, 6):
# new lines must no longer be linked to records
to_create = [(set(ids) - set(recs._ids), vals) for (ids, vals) in to_create]
relation_set(recs._ids, command[2] if command[0] == 6 else ())
if to_create:
# create lines in batch, and link them
lines = comodel.create([vals for ids, vals in to_create])
for line, (ids, vals) in zip(lines, to_create):
relation_add(ids, line.id)
if to_delete:
# delete lines in batch
comodel.browse(to_delete).unlink()
relation_delete(to_delete)
# update the cache of self
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(new_relation[record.id]))
# process pairs to add (beware of duplicates)
pairs = [(x, y) for x, ys in new_relation.items() for y in ys - old_relation[x]]
if pairs:
if self.store:
query = "INSERT INTO {} ({}, {}) VALUES {} ON CONFLICT DO NOTHING".format(
self.relation, self.column1, self.column2, ", ".join(["%s"] * len(pairs)),
)
cr.execute(query, pairs)
# update the cache of inverse fields
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
for invf in records._field_inverses[self]:
domain = invf.get_domain_list(comodel)
valid_ids = set(records.filtered_domain(domain)._ids)
if not valid_ids:
continue
for y, xs in y_to_xs.items():
corecord = comodel.browse(y)
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(set(ids0) | (xs & valid_ids))
cache.set(corecord, invf, ids1)
except KeyError:
pass
# process pairs to remove
pairs = [(x, y) for x, ys in old_relation.items() for y in ys - new_relation[x]]
if pairs:
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
if self.store:
# express pairs as the union of cartesian products:
# pairs = [(1, 11), (1, 12), (1, 13), (2, 11), (2, 12), (2, 14)]
# -> y_to_xs = {11: {1, 2}, 12: {1, 2}, 13: {1}, 14: {2}}
# -> xs_to_ys = {{1, 2}: {11, 12}, {2}: {14}, {1}: {13}}
xs_to_ys = defaultdict(set)
for y, xs in y_to_xs.items():
xs_to_ys[frozenset(xs)].add(y)
# delete the rows where (id1 IN xs AND id2 IN ys) OR ...
COND = "{} IN %s AND {} IN %s".format(self.column1, self.column2)
query = "DELETE FROM {} WHERE {}".format(
self.relation, " OR ".join([COND] * len(xs_to_ys)),
)
params = [arg for xs, ys in xs_to_ys.items() for arg in [tuple(xs), tuple(ys)]]
cr.execute(query, params)
# update the cache of inverse fields
for invf in records._field_inverses[self]:
for y, xs in y_to_xs.items():
corecord = comodel.browse(y)
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(id_ for id_ in ids0 if id_ not in xs)
cache.set(corecord, invf, ids1)
except KeyError:
pass
return records.filtered(
lambda record: new_relation[record.id] != old_relation[record.id]
)
def write_new(self, records_commands_list):
""" Update self on new records. """
if not records_commands_list:
return
model = records_commands_list[0][0].browse()
comodel = model.env[self.comodel_name].with_context(**self.context)
new = lambda id_: id_ and NewId(id_)
# determine old and new relation {x: ys}
set = OrderedSet
old_relation = {record.id: set(record[self.name]._ids) for records, _ in records_commands_list for record in records}
new_relation = {x: set(ys) for x, ys in old_relation.items()}
ids = set(old_relation.keys())
records = model.browse(ids)
for recs, commands in records_commands_list:
for command in commands:
if not isinstance(command, (list, tuple)) or not command:
continue
if command[0] == 0:
line_id = comodel.new(command[2], ref=command[1]).id
for line_ids in new_relation.values():
line_ids.add(line_id)
elif command[0] == 1:
line_id = new(command[1])
comodel.browse([line_id]).update(command[2])
elif command[0] == 2:
line_id = new(command[1])
for line_ids in new_relation.values():
line_ids.discard(line_id)
elif command[0] == 3:
line_id = new(command[1])
for line_ids in new_relation.values():
line_ids.discard(line_id)
elif command[0] == 4:
line_id = new(command[1])
for line_ids in new_relation.values():
line_ids.add(line_id)
elif command[0] in (5, 6):
# new lines must no longer be linked to records
line_ids = command[2] if command[0] == 6 else ()
line_ids = set(new(line_id) for line_id in line_ids)
for id_ in recs._ids:
new_relation[id_] = set(line_ids)
if new_relation == old_relation:
return records.browse()
# update the cache of self
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(new_relation[record.id]))
# process pairs to add (beware of duplicates)
pairs = [(x, y) for x, ys in new_relation.items() for y in ys - old_relation[x]]
if pairs:
# update the cache of inverse fields
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
for invf in records._field_inverses[self]:
domain = invf.get_domain_list(comodel)
valid_ids = set(records.filtered_domain(domain)._ids)
if not valid_ids:
continue
for y, xs in y_to_xs.items():
corecord = comodel.browse([y])
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(set(ids0) | (xs & valid_ids))
cache.set(corecord, invf, ids1)
except KeyError:
pass
# process pairs to remove
pairs = [(x, y) for x, ys in old_relation.items() for y in ys - new_relation[x]]
if pairs:
# update the cache of inverse fields
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
for invf in records._field_inverses[self]:
for y, xs in y_to_xs.items():
corecord = comodel.browse([y])
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(id_ for id_ in ids0 if id_ not in xs)
cache.set(corecord, invf, ids1)
except KeyError:
pass
return records.filtered(
lambda record: new_relation[record.id] != old_relation[record.id]
)
class Id(Field):
""" Special case for field 'id'. """
type = 'integer'
column_type = ('int4', 'int4')
string = 'ID'
store = True
readonly = True
prefetch = False
def update_db(self, model, columns):
pass # this column is created with the table
def __get__(self, record, owner):
if record is None:
return self # the field is accessed through the class owner
# the code below is written to make record.id as quick as possible
ids = record._ids
size = len(ids)
if size == 0:
return False
elif size == 1:
return ids[0]
raise ValueError("Expected singleton: %s" % record)
def __set__(self, record, value):
raise TypeError("field 'id' cannot be assigned")
def prefetch_many2one_ids(record, field):
""" Return an iterator over the ids of the cached values of a many2one
field for the prefetch set of a record.
"""
records = record.browse(record._prefetch_ids)
ids = record.env.cache.get_values(records, field)
return unique(id_ for id_ in ids if id_ is not None)
def prefetch_x2many_ids(record, field):
""" Return an iterator over the ids of the cached values of an x2many
field for the prefetch set of a record.
"""
records = record.browse(record._prefetch_ids)
ids_list = record.env.cache.get_values(records, field)
return unique(id_ for ids in ids_list for id_ in ids)
def apply_required(model, field_name):
""" Set a NOT NULL constraint on the given field, if necessary. """
# At the time this function is called, the model's _fields may have been reset, although
# the model's class is still the same. Retrieve the field to see whether the NOT NULL
# constraint still applies
field = model._fields[field_name]
if field.store and field.required:
sql.set_not_null(model.env.cr, model._table, field_name)
# imported here to avoid dependency cycle issues
from .exceptions import AccessError, MissingError, UserError
from .models import check_pg_name, BaseModel, NewId, IdType, expand_ids, PREFETCH_MAX
| agpl-3.0 | 7,060,470,593,801,443,000 | 41.981563 | 132 | 0.57512 | false |
kansanmuisti/kamu | parliament/models/funding.py | 1 | 1287 | from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from parliament.models.member import Member
from parliament.models.session import Term
class FundingSource(models.Model):
TYPES = (
('co', _('Corporation')),
('ind', _('Individual')),
('party', _('Party')),
)
name = models.CharField(max_length=120, null=True, blank=True)
class Meta:
app_label = 'parliament'
class Funding(models.Model):
TYPES = (
('own', _('Own funds')),
('co', _('Corporation')),
('ind', _('Individual')),
('loan', _('Loan')),
('u_ind', _('Undefined individuals')),
('u_com', _('Undefined communities')),
('party', _('Party')),
('oth', _('Other')),
)
type = models.CharField(max_length=6, choices=TYPES)
member = models.ForeignKey(Member, on_delete=models.CASCADE, db_index=True)
term = models.ForeignKey(Term, on_delete=models.CASCADE)
source = models.ForeignKey(FundingSource, on_delete=models.CASCADE, null=True, blank=True)
amount = models.DecimalField(max_digits=10, decimal_places=2)
class Meta:
app_label = 'parliament'
unique_together = (('member', 'term', 'type', 'source'),)
| agpl-3.0 | -1,693,515,536,714,444,500 | 31.175 | 94 | 0.608392 | false |
legalsylvain/odoo-addons-oli | mrp_to_purchase/model/__init__.py | 1 | 1070 | # -*- encoding: utf-8 -*-
##############################################################################
#
# MRP To Purchase module for Odoo
# Copyright (C) 2015-Today Akretion (http://www.akretion.com)
# @author Sylvain LE GAL (https://twitter.com/legalsylvain)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import mrp_production
| agpl-3.0 | -7,551,090,035,109,000,000 | 45.521739 | 78 | 0.61215 | false |
dc3-plaso/dfvfs | dfvfs/path/fvde_path_spec.py | 1 | 2024 | # -*- coding: utf-8 -*-
"""The FileVault Drive Encryption (FVDE) path specification implementation."""
from dfvfs.lib import definitions
from dfvfs.path import factory
from dfvfs.path import path_spec
class FVDEPathSpec(path_spec.PathSpec):
"""Class that implements the FVDE path specification.
Attributes:
encrypted_root_plist (str): path to the EncryptedRoot.plist.wipekey file.
password (str): password.
recovery_password (str): recovery password.
"""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_FVDE
def __init__(
self, encrypted_root_plist=None, password=None, parent=None,
recovery_password=None, **kwargs):
"""Initializes the path specification.
Note that the FVDE path specification must have a parent.
Args:
encrypted_root_plist (Optionla[str]): path to the
EncryptedRoot.plist.wipekey file.
password (Optional[str]): password.
parent (Optional[PathSpec]): parent path specification.
recovery_password (Optional[str]): recovery password.
Raises:
ValueError: when parent is not set.
"""
if not parent:
raise ValueError(u'Missing parent value.')
super(FVDEPathSpec, self).__init__(parent=parent, **kwargs)
self.encrypted_root_plist = encrypted_root_plist
self.password = password
self.recovery_password = recovery_password
@property
def comparable(self):
"""str: comparable representation of the path specification."""
string_parts = []
if self.encrypted_root_plist:
string_parts.append(u'encrypted_root_plist: {0:s}'.format(
self.encrypted_root_plist))
if self.password:
string_parts.append(u'password: {0:s}'.format(self.password))
if self.recovery_password:
string_parts.append(u'recovery_password: {0:s}'.format(
self.recovery_password))
return self._GetComparable(sub_comparable_string=u', '.join(string_parts))
# Register the path specification with the factory.
factory.Factory.RegisterPathSpec(FVDEPathSpec)
| apache-2.0 | 3,737,688,749,816,931,300 | 31.126984 | 78 | 0.700593 | false |
emilydolson/avida-spatial-tools | avidaspatial/utils.py | 1 | 11662 | # This file contains functions that are used throuhgout avida-spatial-tools
from math import sqrt, log, floor, ceil
from copy import deepcopy
import pysal
import numpy as np
from .environment_file import *
import seaborn as sns
def get_kwargs(grid, kwargs, phenotypes=False):
"""
Helper function to figure out what denom and palette to use, based on the
kwargs and the grid being plotted. The optional (default: false) argument
indicates whether the grid contains phenotypes, as opposed to resources.
"""
denom = None
if "denom" in kwargs:
denom = kwargs["denom"]
if "palette" in kwargs:
palette = kwargs["palette"]
if denom is None:
denom = len(palette)
elif "environment" in kwargs or isinstance(grid, EnvironmentFile):
if "environment" in kwargs:
env = kwargs["environment"]
else:
env = grid
if phenotypes:
palette = env.task_palette
if denom is None:
denom = len(env.tasks)
else:
palette = env.resource_palette
if denom is None:
denom = len(env.resources)
else:
length = get_pallete_length(grid)
palette = sns.hls_palette(length, s=1)
denom = length
return denom, palette
def get_pallete_length(grid):
"""
Takes a 2d grid and figures out how many different elements are in it, so
that we know how big to make the palette. Also avoids the unfortunate
red/green palette that results from too few elements.
Returns int indicating the length the palette should have.
"""
elements = list(set(flatten_array(grid)))
length = len(elements)
if type(elements[0]) is str:
lengths = [len(el) for el in elements if not el.startswith("-")]
if max(lengths) < 5: # Mixing red and green
length += 2 # is not pretty so let's avoid it
return length
def agg_grid(grid, agg=None):
"""
Many functions return a 2d list with a complex data type in each cell.
For instance, grids representing environments have a set of resources,
while reading in multiple data files at once will yield a list
containing the values for that cell from each file. In order to visualize
these data types it is helpful to summarize the more complex data types
with a single number. For instance, you might want to take the length
of a resource set to see how many resource types are present. Alternately,
you might want to take the mode of a list to see the most common phenotype
in a cell.
This function facilitates this analysis by calling the given aggregation
function (agg) on each cell of the given grid and returning the result.
agg - A function indicating how to summarize grid contents. Default: len.
"""
grid = deepcopy(grid)
if agg is None:
if type(grid[0][0]) is list and type(grid[0][0][0]) is str:
agg = string_avg
else:
agg = mode
for i in range(len(grid)):
for j in range(len(grid[i])):
grid[i][j] = agg(grid[i][j])
return grid
def slice_3d_grid(grid, n):
"""
Takes a three dimensional array and an integer (n) and returns a 2d array
containing the Nth value from the 3rd dimension at each location in the
grid.
"""
phen_grid = initialize_grid((len(grid[0]), len(grid)), 0)
for i in range(len(grid)):
for j in range(len(grid[i])):
phen_grid[i][j] = grid[i][j][n]
return phen_grid
def flatten_array(grid):
"""
Takes a multi-dimensional array and returns a 1 dimensional array with the
same contents.
"""
grid = [grid[i][j] for i in range(len(grid)) for j in range(len(grid[i]))]
while type(grid[0]) is list:
grid = flatten_array(grid)
return grid
def prepend_zeros_to_lists(ls):
"""
Takes a list of lists and appends 0s to the beggining of each sub_list
until they are all the same length. Used for sign-extending binary numbers.
"""
longest = max([len(l) for l in ls])
for i in range(len(ls)):
while len(ls[i]) < longest:
ls[i].insert(0, "0")
def dict_increment(d, key, amount):
if key in d:
d[key] += amount
else:
d[key] = amount
def squared_toroidal_dist(p1, p2, world_size=(60, 60)):
"""
Separated out because sqrt has a lot of overhead
"""
halfx = world_size[0]/2.0
if world_size[0] == world_size[1]:
halfy = halfx
else:
halfy = world_size[1]/2.0
deltax = p1[0] - p2[0]
if deltax < -halfx:
deltax += world_size[0]
elif deltax > halfx:
deltax -= world_size[0]
deltay = p1[1] - p2[1]
if deltay < -halfy:
deltay += world_size[1]
elif deltay > halfy:
deltay -= world_size[1]
return deltax*deltax + deltay*deltay
def toroidal_dist(p1, p2, world_size=(60, 60)):
return sqrt(squared_toroidal_dist(p1, p2, world_size))
# return sqrt(min((p1[0] - p2[0])**2, (p1[0]+world_x - p2[0])**2) + \
# min((p1[1] - p2[1])**2, (p1[1]+world_y - p2[1])**2))
def dist(p1, p2):
"""
Returns the distance between the two given tuples.
"""
return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)
def function_with_args(func, *args):
"""
Returns a function that calls a function with the specified arguments.
The returned function still takes one argument representing the first
positional argument.
This is mostly a helper function for using agg_grid with functions
requiring more information than the cell contents.
"""
def inner(arg):
return func(arg, *args)
return inner
def convert_world_to_phenotype(world):
"""
Converts sets indicating the resources present in a single cell to binary
strings (bit order is based on the order of resources in world.resources).
TODO: Figure out how to handle relationship between resources and tasks
Inputs: world - an EnvironmentFile object with a grid of resource sets
Returns: an EnvironmentFile object with a grid of binary strings
"""
if set(world.resources) != set(world.tasks):
print("Warning: world phenotypes don't correspond to phenotypes")
if set(world.resources).issubset(set(world.tasks)):
conversion_func = function_with_args(res_set_to_phenotype, world.tasks)
else:
conversion_func = \
function_with_args(res_set_to_phenotype, world.resources)
grid = agg_grid(deepcopy(world), conversion_func)
return grid
def phenotype_to_res_set(phenotype, resources):
"""
Converts a binary string to a set containing the resources indicated by
the bits in the string.
Inputs: phenotype - a binary string
resources - a list of string indicating which resources correspond
to which indices of the phenotype
returns: A set of strings indicating resources
"""
assert(phenotype[0:2] == "0b")
phenotype = phenotype[2:]
# Fill in leading zeroes
while len(phenotype) < len(resources):
phenotype = "0" + phenotype
res_set = set()
for i in range(len(phenotype)):
if phenotype[i] == "1":
res_set.add(resources[i])
assert(phenotype.count("1") == len(res_set))
return res_set
def res_set_to_phenotype(res_set, full_list):
"""
Converts a set of strings indicating resources to a binary string where
the positions of 1s indicate which resources are present.
Inputs: res_set - a set of strings indicating which resources are present
full_list - a list of strings indicating all resources which could
could be present, and the order in which they should
map to bits in the phenotype
returns: A binary string
"""
full_list = list(full_list)
phenotype = len(full_list) * ["0"]
for i in range(len(full_list)):
if full_list[i] in res_set:
phenotype[i] = "1"
assert(phenotype.count("1") == len(res_set))
# Remove uneceesary leading 0s
while phenotype[0] == "0" and len(phenotype) > 1:
phenotype = phenotype[1:]
return "0b"+"".join(phenotype)
def weighted_hamming(b1, b2):
"""
Hamming distance that emphasizes differences earlier in strings.
"""
assert(len(b1) == len(b2))
hamming = 0
for i in range(len(b1)):
if b1[i] != b2[i]:
# differences at more significant (leftward) bits
# are more important
if i > 0:
hamming += 1 + 1.0/i
# This weighting is completely arbitrary
return hamming
def n_tasks(dec_num):
"""
Takes a decimal number as input and returns the number of ones in the
binary representation.
This translates to the number of tasks being done by an organism with a
phenotype represented as a decimal number.
"""
bitstring = ""
try:
bitstring = dec_num[2:]
except:
bitstring = bin(int(dec_num))[2:] # cut off 0b
# print bin(int(dec_num)), bitstring
return bitstring.count("1")
def convert_to_pysal(data):
"""
Pysal expects a distance matrix, and data formatted in a numpy array.
This functions takes a data grid and returns those things.
"""
w = pysal.lat2W(len(data[0]), len(data))
data = np.array(data)
data = np.reshape(data, (len(data)*len(data[0]), 1))
return w, data
# ~~~~~~~~~~~~~~~~~~~~~~AGGREGATION FUNCTIONS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Provided for easy use with agg_grid
def mode(ls):
"""
Takes a list as an argument and returns the mode of (most common item in)
that list.
"""
return max(set(ls), key=ls.count)
def mean(ls):
"""
Takes a list and returns the mean.
"""
return float(sum(ls))/len(ls)
def median(ls):
"""
Takes a list and returns the median.
"""
ls = sorted(ls)
return ls[int(floor(len(ls)/2.0))]
def string_avg(strings, binary=True):
"""
Takes a list of strings of equal length and returns a string containing
the most common value from each index in the string.
Optional argument: binary - a boolean indicating whether or not to treat
strings as binary numbers (fill in leading zeros if lengths differ).
"""
if binary: # Assume this is a binary number and fill leading zeros
strings = deepcopy(strings)
longest = len(max(strings, key=len))
for i in range(len(strings)):
while len(strings[i]) < longest:
split_string = strings[i].split("b")
strings[i] = "0b0" + split_string[1]
avg = ""
for i in (range(len(strings[0]))):
opts = []
for s in strings:
opts.append(s[i])
avg += max(set(opts), key=opts.count)
return avg
def get_world_dimensions(gridfile, delim=" "):
"""
This function takes the name of a file in grid_task format and returns
the dimensions of the world it represents.
"""
infile = open(gridfile)
lines = infile.readlines()
infile.close()
world_x = len(lines[0].strip().split(delim))
world_y = len(lines)
return (world_x, world_y)
def initialize_grid(world_size, inner):
"""
Creates an empty grid (2d list) with the dimensions specified in
world_size. Each element is initialized to the inner argument.
"""
data = []
for i in range(world_size[1]):
data.append([])
for j in range(world_size[0]):
data[i].append(deepcopy(inner))
return data
| mit | -5,382,053,387,468,629,000 | 28.6743 | 79 | 0.621763 | false |
squaresLab/Houston | experiments/filter_truth.py | 1 | 2853 | from typing import Iterator, Tuple, Set, List, Dict, Any, Optional, Type
import argparse
import logging
import sys
import os
import concurrent.futures
from ruamel.yaml import YAML
import yaml
from houston.mission import Mission
from compare_traces import load_file as load_traces_file
from compare_traces import is_truth_valid
logger = logging.getLogger('houston') # type: logging.Logger
logger.setLevel(logging.DEBUG)
DESCRIPTION = "Filter out ground truth data."
VALID_LIST_OUTPUT = "valid_list.yml"
def setup_logging(verbose: bool = False) -> None:
log_to_stdout = logging.StreamHandler()
log_to_stdout.setLevel(logging.DEBUG if verbose else logging.INFO)
logging.getLogger('houston').addHandler(log_to_stdout)
logging.getLogger('experiment').addHandler(log_to_stdout)
def parse_args():
p = argparse.ArgumentParser(description=DESCRIPTION)
p.add_argument('oracle', type=str, help='path to oracle trace directory.')
p.add_argument('--threads', type=int, default=1,
help='number of threads')
p.add_argument('--verbose', action='store_true',
help='increases logging verbosity')
return p.parse_args()
def validate_truth(dir_oracle: str, fn_trace: str) -> bool:
mission, oracle_traces = load_traces_file(os.path.join(dir_oracle, fn_trace))
oracle_traces = [t for t in oracle_traces if t.commands]
return is_truth_valid(oracle_traces, 3), fn_trace
def filter_truth_traces(dir_oracle: str,
threads: int) -> List[str]:
trace_filenames = \
[fn for fn in os.listdir(dir_oracle) if fn.endswith('.json')]
valid_traces = []
futures = []
with concurrent.futures.ProcessPoolExecutor(threads) as e:
for fn in trace_filenames:
future = e.submit(validate_truth, dir_oracle, fn)
futures.append(future)
logger.debug("submitted all candidates")
for future in concurrent.futures.as_completed(futures):
valid, trace = future.result()
if valid:
valid_traces.append(trace)
logger.info("trace %s is valid", trace)
else:
logger.info("trace %s is invalid", trace)
logger.debug("finished all")
return valid_traces
def main():
args = parse_args()
setup_logging(verbose=args.verbose)
dir_oracle = args.oracle
if not os.path.exists(dir_oracle):
logger.error("oracle directory not found: %s", dir_oracle)
sys.exit(1)
# obtain a list of oracle traces
trace_filenames = filter_truth_traces(dir_oracle, threads=args.threads)
logger.info("Total number of %d valid truth", len(trace_filenames))
with open(os.path.join(dir_oracle, VALID_LIST_OUTPUT), "w") as f:
YAML().dump(trace_filenames, f)
if __name__ == '__main__':
main()
| mit | -7,573,749,851,091,781,000 | 31.793103 | 81 | 0.661059 | false |
FluVigilanciaBR/seasonality | methods/mem/sinan_mem_inset_thresholds.py | 1 | 45032 | # coding:utf8
__author__ = 'Marcelo Ferreira da Costa Gomes'
import rpy2.robjects as ro
from numpy import *
from pandas import *
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
pandas2ri.activate()
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
import logging
from argparse import RawDescriptionHelpFormatter
import matplotlib.font_manager as fm
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.ticker as ticker
from scipy.stats.mstats import gmean
module_logger = logging.getLogger('update_system.sinan_mem_inset_thresholds')
# Load R MEM package:
try:
mem = importr('mem')
except:
mem = importr('mem', lib_loc="/home/marfcg/R/x86_64-pc-linux-gnu-library/4.0")
try:
ro.r.require('mem')
except:
ro.r.require('mem', lib_loc="/home/marfcg/R/x86_64-pc-linux-gnu-library/4.0")
# UF codes
tabela_ufnome = {'11': 'Rondônia',
'12': 'Acre',
'13': 'Amazonas',
'14': 'Roraima',
'15': 'Pará',
'16': 'Amapá',
'17': 'Tocantins',
'21': 'Maranhão',
'22': 'Piauí',
'23': 'Ceará',
'24': 'Rio Grande do Norte',
'25': 'Paraíba',
'26': 'Pernambuco',
'27': 'Alagoas',
'28': 'Sergipe',
'29': 'Bahia',
'31': 'Minas Gerais',
'32': 'Espírito Santo',
'33': 'Rio de Janeiro',
'35': 'São Paulo',
'41': 'Paraná',
'42': 'Santa Catarina',
'43': 'Rio Grande do Sul',
'50': 'Mato Grosso do Sul',
'51': 'Mato Grosso',
'52': 'Goiás',
'53': 'Distrito Federal',
'RegN': 'Regional Norte',
'RegC': 'Regional Centro',
'RegL': 'Regional Leste',
'RegS': 'Regional Sul',
'BR': 'Brasil',
'S': 'Região Sul',
'N': 'Região Norte',
'CO': 'Região Centro-oeste',
'NE': 'Região Nordeste',
'SE': 'Região Sudeste'}
tabela_ufcod = {v: k for k, v in tabela_ufnome.items()}
fontproplgd = fm.FontProperties('Oswald')
fontproplgd.set_size(28)
fontproplbl = fm.FontProperties('Oswald')
fontproplbl.set_size(42)
fontproplblinset = fm.FontProperties('Oswald')
fontproplblinset.set_size(30)
fontpropticks = fontproplblinset.copy()
fontpropticks.set_size(24)
fontpropticksinset = fontpropticks.copy()
fontpropticksinset.set_size(20)
def to_bool(v):
if v in [True, 'T', 't', 'True', 'true', 1, '1']:
v = True
else:
v = False
return v
def discardseasons(df, seasons, gdthres=2.0, smin=5):
"""
Calculate peak variability in order to keep only seasons with relatively low variability rdthres.
Always mantain at least smin seasons.
:param df: data frame with seasons by columns
:param seasons: list of column names corresponding to each season
:param gdthres: maximum geometric deviation from median
:param smin: minimum number of seasons maintained
:return drop_seasons: list with seasons to be dropped
"""
drop_seasons = []
seasons = seasons.copy()
# Drop null seasons
series = df[seasons].max()
drop_seasons = list(series[series == 0].index)
series.drop(drop_seasons, axis=0, inplace=True)
# If resulting data contains less than smin seasons, return
nseasons = len(series)
nmax = nseasons - smin
if nmax <= 0:
return drop_seasons
####### Test removing one by one ######
# Take log of geometric deviation threshold for simplicity
gdthres = np.log(gdthres)
for n in range(nmax):
# Current maxima
tmp_series = df[list(set(seasons).difference(drop_seasons))].max()
# Grab current geometric mean
series_gmean = np.log(gmean(tmp_series))
# Calculate maximum geometric deviation from geometric mean
mgd = abs(np.log(tmp_series) - series_gmean).max()
if mgd > gdthres:
idx = abs(np.log(tmp_series) - series_gmean).idxmax()
drop_seasons.append(idx)
return drop_seasons
def applymem(df, discarded_seasons=None, wdw_method=2, lower_bound=5.0):
#rdf = pandas2ri.py2ri(df)
rdf = ro.conversion.py2rpy(df)
seasons = sorted(list(df.columns))
# Discard 2009 season if present:
seasons = sorted(set(seasons).difference(discarded_seasons))
rseasons = ro.StrVector(seasons)
ro.globalenv['df'] = rdf
ro.globalenv['seasons'] = rseasons
# # Method for obtaining typical time series evolution (default 2)
# ro.globalenv['par.type.curve'] = 2
# # Method for obtaining pre/post-epidemic threshold (default 4)
# ro.globalenv['par.type.threshold'] = 2
# # Method for obtaining intensity thresholds (default 4)
# ro.globalenv['par.type.intensity'] = 2
# # Method for obtaining outbreak start and length (default 6)
# ro.globalenv['par.type.other'] = 2
# # Total number of points to obtain pre/post-threshold (will take n/seasons from each)
# ro.globalenv['par.n.max'] = 30
# # Confidence interval for modelled curve
# ro.globalenv['par.level.curve'] = 0.90
# # Confidence interval for pre/post-thresold
# ro.globalenv['par.level.threshold'] = 0.95
# # Quantiles for intensity thresholds
# ro.globalenv['par.level.intensity'] = ro.FloatVector([0.40, 0.90, 0.975])
#
# epimemrslt = ro.r('memmodel(i.data=subset(df, select=seasons), i.type.curve=par.type.curve,' +
# 'i.type.threshold=par.type.threshold, i.type.intensity=par.type.intensity,' +
# 'i.type.other=par.type.other, i.n.max=par.n.max, i.level.curve=par.level.curve,' +
# 'i.level.threshold=par.level.threshold, i.level.intensity=par.level.intensity)')
ro.globalenv['df'] = rdf
ro.globalenv['seasons'] = rseasons
ro.globalenv['par.method'] = wdw_method
ro.globalenv['par.type.curve'] = 2
ro.globalenv['par.n.max'] = 20
ro.globalenv['par.level.curve'] = 0.95
ro.globalenv['par.level.threshold'] = 0.95
ro.globalenv['par.type.intensity'] = 6
ro.globalenv['par.level.intensity'] = ro.FloatVector([0.40, 0.90, 0.975])
epimemrslt = ro.r('memmodel(i.data=subset(df, select=seasons), i.type.curve=par.type.curve, i.method=par.method,' +
'i.n.max=par.n.max, i.level.curve=par.level.curve, i.level.threshold=par.level.threshold,' +
'i.type.intensity=par.type.intensity, i.level.intensity=par.level.intensity)')
# Pre-epidemic threshold:
epithreshold = max(lower_bound, epimemrslt.rx2('pre.post.intervals')[0, 2])
typrealcurve = pd.DataFrame(epimemrslt.rx2('typ.real.curve'))
# Check for seasons below threshold:
dropseasons = set()
for s in seasons:
if df[s].max() < epithreshold:
dropseasons.add(s)
# Drop seasons below threshold and rerun algorithm:
episeasons = list(seasons)
if len(dropseasons) > 0 and len(dropseasons) < len(seasons):
episeasons = sorted(list(set(seasons).difference(dropseasons)))
ro.globalenv['episeasons'] = ro.StrVector(episeasons)
# epimemrslt = ro.r('memmodel(i.data=subset(df, select=episeasons), i.type.curve=par.type.curve,' +
# 'i.type.threshold=par.type.threshold, i.type.intensity=par.type.intensity,' +
# 'i.type.other=par.type.other, i.n.max=par.n.max, i.level.curve=par.level.curve,' +
# 'i.level.threshold=par.level.threshold, i.level.intensity=par.level.intensity)')
epimemrslt = ro.r('memmodel(i.data=df[episeasons], i.type.curve=par.type.curve,' +
'i.method=par.method,' +
'i.n.max=par.n.max, i.level.curve=par.level.curve, i.level.threshold=par.level.threshold,' +
'i.type.intensity=par.type.intensity, i.level.intensity=par.level.intensity)')
# Store results in python dictionary of objects
pyepimemrslt = {}
tgt_names = [
'pre.post.intervals',
'mean.start',
'ci.start',
'mean.length',
'ci.length',
'epi.intervals',
'typ.real.curve',
'typ.curve',
'moving.epidemics',
'n.seasons'
]
for name in tgt_names:
rdata = epimemrslt.rx2(name)
if name == 'call':
pyepimemrslt.update({name: str(rdata)})
elif ndim(rdata) == 1:
pyepimemrslt.update({name: rdata[0]})
else:
pyepimemrslt.update({name: pd.DataFrame(rdata)})
# typ.curve is the typical curve obtained from averaging over epidemic seasons with time rescaled
# so that the start of the epidemic period coincides with mean.start
pyepimemrslt['typ.curve'].rename(columns={0: 'baixo', 1: 'mediano', 2: 'alto'}, inplace=True)
pyepimemrslt['typ.curve']['mediano'].fillna(0, inplace=True)
pyepimemrslt['typ.curve']['baixo'] = pyepimemrslt['typ.curve']['baixo'].where(
pyepimemrslt['typ.curve']['baixo'] >= 0,
other=0)
pyepimemrslt['typ.curve']['baixo'] = pyepimemrslt['typ.curve']['baixo']. \
where((-pyepimemrslt['typ.curve']['baixo'].isnull()), other=pyepimemrslt['typ.curve']['mediano'])
pyepimemrslt['typ.curve']['alto'] = pyepimemrslt['typ.curve']['alto']. \
where((-pyepimemrslt['typ.curve']['alto'].isnull()), other=pyepimemrslt['typ.curve']['mediano'])
pyepimemrslt['pre.post.intervals'].rename(index={0: 'pre', 1: 'post'}, inplace=True)
# typ.real.curve is the typical curve without time shift, that is, respecting the original weeks from data
# this curve is better to keep all seasons, not only the epidemic ones.
pyepimemrslt['typ.real.curve'] = typrealcurve.copy()
pyepimemrslt['typ.real.curve'].rename(columns={0: 'baixo', 1: 'mediano', 2: 'alto'}, inplace=True)
pyepimemrslt['typ.real.curve']['mediano'].fillna(0, inplace=True)
pyepimemrslt['typ.real.curve'].loc[pyepimemrslt['typ.real.curve']['baixo'] < 0, 'baixo'] = 0
pyepimemrslt['typ.real.curve']['baixo'] = pyepimemrslt['typ.real.curve']['baixo']. \
where((-pyepimemrslt['typ.real.curve']['baixo'].isnull()), other=pyepimemrslt['typ.real.curve']['mediano'])
pyepimemrslt['typ.real.curve']['alto'] = pyepimemrslt['typ.real.curve']['alto']. \
where((-pyepimemrslt['typ.real.curve']['alto'].isnull()), other=pyepimemrslt['typ.real.curve']['mediano'])
newcols = {}
for k, v in enumerate(episeasons):
newcols[k] = str(v) + ' transladado'
pyepimemrslt['moving.epidemics'].rename(columns=newcols, inplace=True)
return pyepimemrslt, dropseasons
def extract_typ_real_curve(df, discarded_seasons=None, wdw_method=2, lower_bound=5.0):
seasons = sorted(list(df.columns))
seasons = sorted(set(seasons).difference(discarded_seasons))
#rdf = pandas2ri.py2ri(df)
rdf = ro.conversion.py2rpy(df)
rseasons = ro.StrVector(seasons)
ro.globalenv['df'] = rdf
ro.globalenv['seasons'] = rseasons
ro.globalenv['par.method'] = wdw_method
ro.globalenv['par.type.curve'] = 2
ro.globalenv['par.level.curve'] = 0.95
epimemrslt = ro.r('t(apply(subset(df, select=seasons), 1, memci, i.type.curve=par.type.curve, ' +
'i.level.curve=par.level.curve))')
# Pre-epidemic threshold:
typrealcurve = pd.DataFrame(epimemrslt)
# Store results in python dictionary of objects
pyepimemrslt = {}
# typ.real.curve is the typical curve without time shift, that is, respecting the original weeks from data
# this curve is better to keep all seasons, not only the epidemic ones.
pyepimemrslt['typ.real.curve'] = typrealcurve.copy()
pyepimemrslt['typ.real.curve'].rename(columns={0: 'baixo', 1: 'mediano', 2: 'alto'}, inplace=True)
pyepimemrslt['typ.real.curve']['mediano'].fillna(0, inplace=True)
pyepimemrslt['typ.real.curve'].loc[pyepimemrslt['typ.real.curve']['baixo'] < 0, 'baixo'] = 0
pyepimemrslt['typ.real.curve']['baixo'] = pyepimemrslt['typ.real.curve']['baixo']. \
where((-pyepimemrslt['typ.real.curve']['baixo'].isnull()), other=pyepimemrslt['typ.real.curve']['mediano'])
pyepimemrslt['typ.real.curve']['alto'] = pyepimemrslt['typ.real.curve']['alto']. \
where((-pyepimemrslt['typ.real.curve']['alto'].isnull()), other=pyepimemrslt['typ.real.curve']['mediano'])
return pyepimemrslt
def plotmemcurve(uf, dftmp, dftmpinset, thresholds, seasons, lastseason, epicols):
sns.set_style('darkgrid')
sns.set_context("talk")
sns.set_palette('Set2', len(seasons) + 4)
colorcode = sns.color_palette('Set2', len(seasons) + 4)
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=[20, 20])
plt.subplots_adjust(hspace=0.3)
# Set ymax at least = 1:
maxval1 = dftmp[list(set(seasons).union(['corredor alto', 'intensidade muito alta']).
difference(['SRAG2009']))].max().max()
maxval2 = dftmp[list(set(seasons).union(['curva epi. alta', 'intensidade muito alta']).
difference(['SRAG2009']))].max().max()
if maxval1 < 1:
ax[0].set_ylim([0, 1])
ax[1].set_ylim([0, 1])
else:
ax[0].set_ylim([0, maxval1])
ax[1].set_ylim([0, maxval2])
# if uf == 33:
# ax[0].set_ylim([0,0.25])
# elif uf == 32:
# ax[0].set_ylim([0,0.3])
ax[0].fill_between(dftmp['epiweek'], 0, dftmp['corredor baixo'], color='green', alpha=0.5)
ax[0].fill_between(dftmp['epiweek'], dftmp['corredor baixo'], dftmp['corredor mediano'], color='yellow',
alpha=0.5)
ax[0].fill_between(dftmp['epiweek'], dftmp['corredor mediano'], dftmp['corredor alto'], color='orange',
alpha=0.5)
dftmp.plot(ax=ax[0], x='epiweek', y=seasons)
dftmp.plot(ax=ax[0], x='epiweek', y=lastseason, color='k', lw=3)
dftmp.plot(ax=ax[0], x='epiweek', y='limiar pré-epidêmico', style='--', color='red', alpha=0.8)
# dftmp.plot(ax=ax[0], x='epiweek', y='intensidade baixa', style='--')
dftmp.plot(ax=ax[0], x='epiweek', y='intensidade alta', style='--')
dftmp.plot(ax=ax[0], x='epiweek', y='intensidade muito alta', style='--', color=colorcode[-1])
# Check for maximum value on y-axis and fill from 'corredor alto' to maxy
dftmp.plot(ax=ax[0], x='epiweek', y='corredor alto', legend=False, alpha=0)
miny, maxy = ax[0].get_ylim()
del (ax[0].lines[-1])
ax[0].fill_between(dftmp['epiweek'], dftmp['corredor alto'], maxy, color='red', alpha=0.5)
ax[0].set_ylim([miny, maxy])
for label in ax[0].get_xticklabels():
label.set_fontproperties(fontpropticks)
for label in ax[0].get_yticklabels():
label.set_fontproperties(fontpropticks)
#### Start absolute value plot as inset ####
sns.set_style('whitegrid')
axinset = inset_axes(ax[0], width='35%', height='35%', loc=1)
maxval = dftmpinset[list(set(seasons).union([lastseason]).difference(['SRAG2009']))].max().max()
if maxval < 1:
axinset.set_ylim([0, 1])
else:
axinset.set_ylim([0, maxval])
dftmpinset.plot(ax=axinset, x='epiweek', y=seasons)
dftmpinset.plot(ax=axinset, x='epiweek', y=lastseason, color='k', lw=3)
dftmpinset.plot(ax=axinset, x='epiweek', y='limiar pré-epidêmico', style='--', color='red', alpha=0.8)
axinset.legend_.remove()
axinset.set_xlabel('SE', fontproperties=fontproplblinset)
axinset.set_ylabel('Casos', fontproperties=fontproplblinset)
axinset.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
for label in axinset.get_xticklabels():
label.set_fontproperties(fontpropticksinset)
for label in axinset.get_yticklabels():
label.set_fontproperties(fontpropticksinset)
#### Start plot relative to outbreak typical curve ####
ax[1].fill_between(dftmp['SE relativa ao início do surto'], 0, dftmp['curva epi. baixa'], color='green',
alpha=0.5)
ax[1].fill_between(dftmp['SE relativa ao início do surto'], dftmp['curva epi. baixa'],
dftmp['curva epi. mediana'], color='yellow', alpha=0.5)
ax[1].fill_between(dftmp['SE relativa ao início do surto'], dftmp['curva epi. mediana'],
dftmp['curva epi. alta'], color='orange', alpha=0.5)
dftmp.plot(ax=ax[1], x='SE relativa ao início do surto', y='curva epi. mediana', color='silver',
label='tendência mediana')
dftmp.plot(ax=ax[1], x='SE relativa ao início do surto', y='limiar pré-epidêmico', style='--',
color='red', alpha=0.8)
dftmp.plot(ax=ax[1], x='SE relativa ao início do surto', y='limiar pós-epidêmico', style='--',
color='green', alpha=0.5)
epicolor = []
for s in epicols:
s = s.strip(' transladado')
n = list(seasons).index(s)
epicolor.append(colorcode[n])
dftmp.plot(ax=ax[1], x='SE relativa ao início do surto', y=epicols, color=epicolor)
# Check for maximum value on y-axis and fill from 'corredor alto' to maxy
dftmp.plot(ax=ax[1], x='SE relativa ao início do surto', y='curva epi. alta', legend=False, alpha=0)
miny, maxy = ax[1].get_ylim()
del (ax[1].lines[-1])
ax[1].fill_between(dftmp['SE relativa ao início do surto'], dftmp['curva epi. alta'], maxy, color='red',
alpha=0.5)
ax[1].set_ylim([miny, maxy])
ax[1].plot([0, 0], [miny, maxy], '--', color='silver')
duracao = int(thresholds['mean.length'][0])
ax[1].plot([duracao, duracao], [miny, maxy], '--', color='silver')
ax[1].set_title('Tendência ao longo do surto', fontproperties=fontproplbl)
epistart = int(thresholds['mean.start'][0])
ax[1].set_xlabel('SE em relação à semana típica de início do surto (SE=%s)' % epistart,
fontproperties=fontproplbl)
minx, maxx = ax[1].get_xlim()
xticks = sort(np.append(np.arange(0, int(minx), -4), np.arange(4, int(maxx), 4)))
ax[1].set_xticks(xticks)
ax[1].set_xticklabels(xticks, fontproperties=fontpropticks)
for label in ax[0].get_yticklabels():
label.set_fontproperties(fontpropticks)
ax[1].set_ylabel('Incidência (por 100mil habitantes)', fontproperties=fontproplbl)
box = ax[1].get_position()
ax[1].set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax[1].legend(prop=fontproplgd, loc='center left', bbox_to_anchor=(1, 0.5))
ax[0].set_title(tabela_ufnome[uf], fontproperties=fontproplbl)
ax[0].set_xlabel('SE', fontproperties=fontproplbl)
ax[0].set_ylabel('Incidência (por 100mil habitantes)', fontproperties=fontproplbl)
xticks = np.arange(4, 53, 4)
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(xticks)
# Shrink current axis by 10%
box = ax[0].get_position()
ax[0].set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax[0].legend(prop=fontproplgd, loc='center left', bbox_to_anchor=(1, 0.5))
return fig
def plotmemfailedcurve(uf, dftmp, dftmpinset, seasons, lastseason):
sns.set_style('darkgrid')
sns.set_context("talk")
sns.set_palette('Set2', len(seasons) + 4)
colorcode = sns.color_palette('Set2', len(seasons) + 4)
fig, axi = plt.subplots(nrows=1, ncols=1, figsize=[20, 10])
ax = [axi]
maxval1 = dftmp[list(set(seasons).union([lastseason]).difference(['SRAG2009']))].max().max()
if maxval1 < 1:
ax[0].set_ylim([0, 1])
else:
ax[0].set_ylim([0, maxval1])
# if uf == 33:
# ax[0].set_ylim([0,0.25])
# elif uf == 32:
# ax[0].set_ylim([0,0.3])
dftmp.plot(ax=ax[0], x='epiweek', y=seasons)
dftmp.plot(ax=ax[0], x='epiweek', y=lastseason, color='k', lw=3)
dftmp.plot(ax=ax[0], x='epiweek', y='limiar pré-epidêmico', style='--', color='red', alpha=0.8)
# dftmp.plot(ax=ax[0], x='epiweek', y='intensidade baixa', style='--')
dftmp.plot(ax=ax[0], x='epiweek', y='intensidade alta', style='--')
dftmp.plot(ax=ax[0], x='epiweek', y='intensidade muito alta', style='--', color=colorcode[-1])
for label in ax[0].get_xticklabels():
label.set_fontproperties(fontpropticks)
for label in ax[0].get_yticklabels():
label.set_fontproperties(fontpropticks)
#### Start absolute value plot as inset ####
sns.set_style('whitegrid')
axinset = inset_axes(ax[0], width='35%', height='35%', loc=1)
maxval = dftmpinset[list(set(seasons).union([lastseason]).difference(['SRAG2009']))].max().max()
if maxval < 1:
axinset.set_ylim([0, 1])
else:
axinset.set_ylim([0, maxval])
dftmpinset.plot(ax=axinset, x='epiweek', y=seasons)
dftmpinset.plot(ax=axinset, x='epiweek', y=lastseason, color='k', lw=3)
dftmpinset.plot(ax=axinset, x='epiweek', y='limiar pré-epidêmico', style='--', color='red', alpha=0.8)
axinset.legend_.remove()
axinset.set_xlabel('SE', fontproperties=fontproplblinset)
axinset.set_ylabel('Casos', fontproperties=fontproplblinset)
axinset.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
for label in axinset.get_xticklabels():
label.set_fontproperties(fontpropticksinset)
for label in axinset.get_yticklabels():
label.set_fontproperties(fontpropticksinset)
ax[0].set_title(tabela_ufnome[uf], fontproperties=fontproplbl)
ax[0].set_xlabel('SE', fontproperties=fontproplbl)
ax[0].set_ylabel('Incidência (por 100mil habitantes)', fontproperties=fontproplbl)
xticks = np.arange(4, 53, 4)
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(xticks)
# Shrink current axis by 10%
box = ax[0].get_position()
ax[0].set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax[0].legend(prop=fontproplgd, loc='center left', bbox_to_anchor=(1, 0.5))
return fig
def recalc_incidence(x, popnorm):
# Recalculate incidence based on integer values.
# Useful for values calculated assuming continuous functions.
return round(x/popnorm)*popnorm
def main(fname, plot_curves=False, sep=',', uflist='all', out_pref=''):
pref = ('.'.join(fname.replace('-incidence', '').split('.')[:-1])).split('/')[-1]
fname = fname.replace('covid', 'flu')
df = pd.read_csv(fname, sep=sep, encoding='utf-8')
dfinset = pd.read_csv(fname.replace('-incidence', ''), sep=sep, encoding='utf-8')
if 'Região' in list(df.columns):
df.rename(columns={'Região': 'UF'}, inplace=True)
dfinset.rename(columns={'Região': 'UF'}, inplace=True)
df.UF = df.UF.astype(str)
dfinset.UF = dfinset.UF.astype(str)
plt.interactive(False)
if uflist == 'all':
uflist = list(df.UF.unique())
dfpop = pd.read_csv('../data/populacao_uf_regional_atual.csv', encoding='utf-8')
dfreport = pd.DataFrame()
dfcorredor = pd.DataFrame()
dfreport_cases = pd.DataFrame()
dfcorredor_cases = pd.DataFrame()
cols_report = ['UF', 'População', 'Média geométrica do pico de infecção das temporadas regulares',
'região de baixa atividade típica',
'limiar pré-epidêmico', 'intensidade alta', 'intensidade muito alta',
'SE típica do início do surto',
'SE típica do início do surto - IC inferior (2,5%)',
'SE típica do início do surto - IC superior (97,5%)',
'duração típica do surto',
'duração típica do surto - IC inferior (2,5%)',
'duração típica do surto - IC superior (97,5%)',
'temporadas utilizadas para os corredores endêmicos',
'ano']
cols_corredor = ['UF', 'População', 'epiweek', 'corredor baixo', 'corredor mediano', 'corredor alto', 'ano']
# Define outbreak window method.
# Check epitiming function from MEM package for detail
# 1: original method published in Vega et al.
# 2: second derivative fixed criterium
wdw_method = 2
wdw_method_lbl = {1: 'original', 2: 'criterium'}
mem_calc = {'SUCCESS': [], 'FAILED': []}
for uf in uflist:
if uf not in list(df.UF.unique()):
continue
dftmp = df[df.UF == uf].reset_index().drop('index', axis=1).copy()
dftmpinset = dfinset[dfinset.UF == uf].reset_index().drop('index', axis=1).copy()
seasons = sorted([x for x in dftmp.columns if 'SRAG' in x])
lastseason = seasons[-1]
dftmp['ano'] = lastseason.strip('SRAG')
dftmpinset['ano'] = lastseason.strip('SRAG')
seasons = list(np.delete(seasons, -1))
# Select "regular seasons" by comparing geometric distance of corresponding peaks
# discard season 2009 by default
tmpseasons = seasons.copy()
if 'SRAG2009' in tmpseasons:
tmpseasons.remove('SRAG2009')
if 'SRAG2020' in tmpseasons:
tmpseasons.remove('SRAG2020')
discarded_seasons = discardseasons(df=dftmp, seasons=tmpseasons, gdthres=2.8, smin=4)
discarded_seasons.extend(['SRAG2009'])
if lastseason != 'SRAG2020':
discarded_seasons.extend(['SRAG2020'])
discarded_seasons.extend([lastseason])
# Calculate incidence normalization factor, per 100.000
incidence_norm = np.float(100000 / dfpop.loc[dfpop['Código'] == str(uf), 'Total'])
lowseasons = set()
dftmp['região de baixa atividade típica'] = 0
try:
if dftmpinset[list(set(seasons).difference(discarded_seasons))].max().max() < 3:
dftmp['região de baixa atividade típica'] = 1
thresholds, lowseasons = applymem(dftmp[seasons],
discarded_seasons,
wdw_method,
lower_bound=1*incidence_norm)
if thresholds['pre.post.intervals'].loc['pre', 2] >= 1*incidence_norm:
dftmp['mediana pré-epidêmica'] = recalc_incidence(thresholds['pre.post.intervals'].loc['pre', 1], incidence_norm)
dftmp['limiar pré-epidêmico'] = recalc_incidence(thresholds['pre.post.intervals'].loc['pre', 2],
incidence_norm)
dftmp['SE relativa ao início do surto'] = dftmp['epiweek'] - thresholds['mean.start']
dftmp['SE típica do início do surto'] = thresholds['mean.start']
# Confidence interval for epi.start
cimin = thresholds['ci.start'].loc[0, 0]
cimax = thresholds['ci.start'].loc[0, 2]
dftmp['SE típica do início do surto - IC inferior (2,5%)'] = cimin
dftmp['SE típica do início do surto - IC superior (97,5%)'] = cimax
dftmp['duração típica do surto'] = thresholds['mean.length']
# Confidence interval for epi.length
cimin = thresholds['ci.length'].loc[1, 0]
cimax = thresholds['ci.length'].loc[1, 2]
dftmp['duração típica do surto - IC inferior (2,5%)'] = cimin
dftmp['duração típica do surto - IC superior (97,5%)'] = cimax
else:
dftmp['região de baixa atividade típica'] = 1
dftmp['mediana pré-epidêmica'] = np.nan
dftmp['limiar pré-epidêmico'] = 1 * incidence_norm
dftmp['SE relativa ao início do surto'] = np.nan
dftmp['SE típica do início do surto'] = np.nan
# Confidence interval for epi.start
cimin = np.nan
cimax = np.nan
dftmp['SE típica do início do surto - IC inferior (2,5%)'] = cimin
dftmp['SE típica do início do surto - IC superior (97,5%)'] = cimax
dftmp['duração típica do surto'] = np.nan
# Confidence interval for epi.length
cimin = np.nan
cimax = np.nan
dftmp['duração típica do surto - IC inferior (2,5%)'] = cimin
dftmp['duração típica do surto - IC superior (97,5%)'] = cimax
dftmp['limiar pós-epidêmico'] = recalc_incidence(thresholds['pre.post.intervals'].loc['post', 2],
incidence_norm)
dftmp['intensidade baixa'] = recalc_incidence(thresholds['epi.intervals'].loc[0, 3], incidence_norm)
dftmp['intensidade alta'] = recalc_incidence(max([2*incidence_norm, thresholds['epi.intervals'].loc[1,
3]]),
incidence_norm)
dftmp['intensidade muito alta'] = recalc_incidence(max([3*incidence_norm, thresholds[
'epi.intervals'].loc[2, 3]]), incidence_norm)
dftmp['corredor baixo'] = recalc_incidence(thresholds['typ.real.curve']['baixo'], incidence_norm)
dftmp['corredor mediano'] = recalc_incidence(thresholds['typ.real.curve']['mediano'], incidence_norm)
dftmp['corredor alto'] = recalc_incidence(thresholds['typ.real.curve']['alto'], incidence_norm)
dftmp['População'] = int(dfpop.loc[dfpop['Código'] == str(uf), 'Total'])
dftmp['curva epi. baixa'] = recalc_incidence(thresholds['typ.curve']['baixo'], incidence_norm)
dftmp['curva epi. mediana'] = recalc_incidence(thresholds['typ.curve']['mediano'], incidence_norm)
dftmp['curva epi. alta'] = recalc_incidence(thresholds['typ.curve']['alto'], incidence_norm)
epicols = list(thresholds['moving.epidemics'].columns)
dftmp[epicols] = thresholds['moving.epidemics']
dftmp['n.seasons'] = thresholds['n.seasons']
dftmp['temporadas utilizadas para os corredores endêmicos'] = ', '.join(str(x).strip('SRAG') for x in
sorted(set(
seasons).difference(discarded_seasons)))
# Geometric mean of regular seasons' peak:
dftmp_peaks = dftmp[seasons].max()
peak_gmean = gmean(dftmp_peaks[list(set(seasons).difference(discarded_seasons))])
dftmp_peaks_inset = dftmpinset[seasons].max()
peak_gmean_inset = gmean(dftmp_peaks_inset[list(set(seasons).difference(discarded_seasons))])
dftmp['Média geométrica do pico de infecção das temporadas regulares'] = peak_gmean
dftmpinset['Média geométrica do pico de infecção das temporadas regulares'] = peak_gmean_inset
for lbl in seasons:
peak = dftmp_peaks[lbl]
peak_inset = dftmp_peaks_inset[lbl]
if peak == 0:
textval = '-'
textval_inset = '-'
else:
geom_dist = np.log(peak) - np.log(peak_gmean)
geom_dist_inset = np.log(peak_inset) - np.log(peak_gmean_inset)
textval = '%.2f' % np.e ** abs(geom_dist) + ' vez(es) ' + ("maior" if geom_dist > 0 else "menor")
textval_inset = '%.2f' % np.e ** abs(geom_dist_inset) + ' vez(es) ' + ("maior" if geom_dist > 0
else "menor")
dftmp['Distância geométrica do pico na temporada %s' % lbl] = textval
dftmpinset['Distância geométrica do pico na temporada %s' % lbl] = textval_inset
dftmp.to_csv('./mem-data/%s-mem-%s-incidencia-dropgdist%s-droplow%s-%s_method.csv' % (
pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
index=False, encoding='utf-8')
dftmpinset['região de baixa atividade típica'] = dftmp['região de baixa atividade típica']
dftmpinset['limiar pré-epidêmico'] = max([1, round(
thresholds['pre.post.intervals'].loc['pre', 2]/incidence_norm)])
dftmpinset['limiar pós-epidêmico'] = max([1, round(
thresholds['pre.post.intervals'].loc['post', 2]/incidence_norm)])
dftmpinset['intensidade baixa'] = round(thresholds['epi.intervals'].loc[0, 3]/incidence_norm)
dftmpinset['intensidade alta'] = max([2, round(
thresholds['epi.intervals'].loc[1, 3]/incidence_norm)])
dftmpinset['intensidade muito alta'] = max([3, round(
thresholds['epi.intervals'].loc[2, 3]/incidence_norm)])
dftmpinset['corredor baixo'] = round(dftmp['corredor baixo']/incidence_norm)
dftmpinset['corredor mediano'] = round(dftmp['corredor mediano']/incidence_norm)
dftmpinset['corredor alto'] = round(dftmp['corredor alto']/incidence_norm)
dftmpinset['SE relativa ao início do surto'] = dftmp['SE relativa ao início do surto']
dftmpinset['SE típica do início do surto'] = dftmp['SE típica do início do surto']
dftmpinset['SE típica do início do surto - IC inferior (2,5%)'] = \
dftmp['SE típica do início do surto - IC inferior (2,5%)']
dftmpinset['SE típica do início do surto - IC superior (97,5%)'] = \
dftmp['SE típica do início do surto - IC superior (97,5%)']
dftmpinset['duração típica do surto'] = dftmp['duração típica do surto']
dftmpinset['duração típica do surto - IC inferior (2,5%)'] = \
dftmp['duração típica do surto - IC inferior (2,5%)']
dftmpinset['duração típica do surto - IC superior (97,5%)'] = \
dftmp['duração típica do surto - IC superior (97,5%)']
dftmpinset['curva epi. baixa'] = round(dftmp['curva epi. baixa']/incidence_norm)
dftmpinset['curva epi. mediana'] = round(dftmp['curva epi. mediana']/incidence_norm)
dftmpinset['curva epi. alta'] = round(dftmp['curva epi. alta']/incidence_norm)
epicols = list(thresholds['moving.epidemics'].columns)
dftmpinset[epicols] = thresholds['moving.epidemics']
dftmpinset['n.seasons'] = thresholds['n.seasons']
dftmpinset['População'] = dftmp['População']
dftmpinset['temporadas utilizadas para os corredores endêmicos'] = \
dftmp['temporadas utilizadas para os corredores endêmicos']
dftmpinset.to_csv(
'./mem-data/%s-mem-%s-dropgdist%s-droplow%s-%s_method.csv' % (pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace(
'SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
index=False, encoding='utf-8')
if plot_curves:
fig = plotmemcurve(uf=uf, dftmp=dftmp, dftmpinset=dftmpinset, thresholds=thresholds, seasons=seasons,
lastseason=lastseason, epicols=epicols)
fig.savefig(
'./mem-data/%s-%s-inset-dropgdist%s-droplow%s-%s_method.svg' %
(pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
bbox_inches='tight')
fig.savefig(
'./mem-data/%s-%s-inset-dropgdist%s-droplow%s-%s_method.png' %
(pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
bbox_inches='tight')
plt.clf()
plt.close()
mem_calc['SUCCESS'].extend([uf])
except:
mem_calc['FAILED'].extend([uf])
dftmp['região de baixa atividade típica'] = 1
dftmpinset['região de baixa atividade típica'] = 1
thresholds = extract_typ_real_curve(dftmp[seasons], discarded_seasons, wdw_method,
lower_bound=1*incidence_norm)
dftmp['Média geométrica do pico de infecção das temporadas regulares'] = np.nan
dftmp['mediana pré-epidêmica'] = np.nan
dftmp['limiar pré-epidêmico'] = 1 * incidence_norm
dftmp['limiar pós-epidêmico'] = 1 * incidence_norm
dftmp['intensidade baixa'] = 0
dftmp['intensidade alta'] = 2 * incidence_norm
dftmp['intensidade muito alta'] = 3 * incidence_norm
dftmp['corredor baixo'] = recalc_incidence(thresholds['typ.real.curve']['baixo'], incidence_norm)
dftmp['corredor mediano'] = recalc_incidence(thresholds['typ.real.curve']['mediano'], incidence_norm)
dftmp['corredor alto'] = recalc_incidence(thresholds['typ.real.curve']['alto'], incidence_norm)
dftmp['SE típica do início do surto'] = np.nan
dftmp['duração típica do surto'] = np.nan
dftmp['Média geométrica do pico de infecção das temporadas regulares'] = np.nan
dftmp['SE típica do início do surto - IC inferior (2,5%)'] = np.nan
dftmp['SE típica do início do surto - IC superior (97,5%)'] = np.nan
dftmp['duração típica do surto - IC inferior (2,5%)'] = np.nan
dftmp['duração típica do surto - IC superior (97,5%)'] = np.nan
dftmp['População'] = int(dfpop.loc[dfpop['Código'] == str(uf), 'Total'])
dftmp['temporadas utilizadas para os corredores endêmicos'] = np.nan
dftmp.to_csv('./mem-data/%s-memfailed-%s-incidencia-dropgdist%s-%s_method.csv' %
(pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''), wdw_method_lbl[wdw_method]), index=False,
encoding='utf-8')
dftmpinset['Média geométrica do pico de infecção das temporadas regulares'] = np.nan
dftmpinset['limiar pré-epidêmico'] = 1
dftmpinset['limiar pós-epidêmico'] = 1
dftmpinset['intensidade baixa'] = 0
dftmpinset['intensidade alta'] = 2
dftmpinset['intensidade muito alta'] = 3
dftmpinset['corredor baixo'] = round(dftmp['corredor baixo']/incidence_norm)
dftmpinset['corredor mediano'] = round(dftmp['corredor mediano']/incidence_norm)
dftmpinset['corredor alto'] = round(dftmp['corredor alto']/incidence_norm)
dftmpinset['SE relativa ao início do surto'] = np.nan
dftmpinset['SE típica do início do surto'] = np.nan
dftmpinset['SE típica do início do surto - IC inferior (2,5%)'] = np.nan
dftmpinset['SE típica do início do surto - IC superior (97,5%)'] = np.nan
dftmpinset['duração típica do surto'] = dftmp['duração típica do surto']
dftmpinset['duração típica do surto - IC inferior (2,5%)'] = np.nan
dftmpinset['duração típica do surto - IC superior (97,5%)'] = np.nan
dftmpinset['curva epi. baixa'] = np.nan
dftmpinset['curva epi. mediana'] = np.nan
dftmpinset['curva epi. alta'] = np.nan
dftmpinset['n.seasons'] = 0
dftmpinset['População'] = dftmp['População']
dftmpinset['temporadas utilizadas para os corredores endêmicos'] = np.nan
dftmpinset.to_csv(
'./mem-data/%s-memfailed-%s-dropgdist%s-%s_method.csv' % (pref, tabela_ufnome[uf].
replace(' ', '_'),
'-'.join(discarded_seasons).replace(
'SRAG', ''),
wdw_method_lbl[wdw_method]),
index=False, encoding='utf-8')
if plot_curves:
fig = plotmemfailedcurve(uf=uf, dftmp=dftmp, dftmpinset=dftmpinset, seasons=seasons,
lastseason=lastseason)
fig.savefig(
'./mem-data/%s-%s-inset-dropgdist%s-droplow%s-%s_method.svg' %
(pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
bbox_inches='tight')
fig.savefig(
'./mem-data/%s-%s-inset-dropgdist%s-droplow%s-%s_method.png' %
(pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
bbox_inches='tight')
plt.clf()
plt.close()
dfreport = dfreport.append(dftmp[cols_report].head(1), ignore_index=True, sort=True)
dfcorredor = dfcorredor.append(dftmp[cols_corredor], ignore_index=True, sort=True)
dfreport_cases = dfreport_cases.append(dftmpinset[cols_report].head(1), ignore_index=True, sort=True)
dfcorredor_cases = dfcorredor_cases.append(dftmpinset[cols_corredor], ignore_index=True, sort=True)
for dfloop in [dfreport, dfcorredor]:
dfloop['Unidade da Federação'] = dfloop.UF.map(tabela_ufnome)
dfloop['Tipo'] = 'Estado'
dfloop.loc[dfloop['UF'].isin(['RegN', 'RegL', 'RegC', 'RegS']) ,'Tipo'] = 'Regional'
dfloop.loc[dfloop['UF'].isin(['N', 'S', 'CO', 'SE', 'NE']) ,'Tipo'] = 'Região'
dfloop.loc[dfloop['UF'] == 'BR' ,'Tipo'] = 'País'
dfreport.to_csv('./mem-data/%s-mem-report-%s-method.csv' % (pref, wdw_method_lbl[wdw_method]), index=False)
dfreport.to_csv('../clean_data/%smem-report.csv' % out_pref, index=False)
dfreport_cases[['Unidade da Federação', 'Tipo']] = dfreport[['Unidade da Federação', 'Tipo']]
dfreport_cases.to_csv('./mem-data/%s-mem-report_cases-%s-method.csv' % (pref, wdw_method_lbl[wdw_method]),
index=False)
dfreport_cases.to_csv('../clean_data/%smem-report_cases.csv' % out_pref, index=False)
dfcorredor.to_csv('./mem-data/%s-mem-typical-%s-method.csv' % (pref, wdw_method_lbl[wdw_method]), index=False)
dfcorredor.to_csv('../clean_data/%smem-typical.csv' % out_pref, index=False)
dfcorredor_cases[['Unidade da Federação', 'Tipo']] = dfcorredor[['Unidade da Federação', 'Tipo']]
dfcorredor_cases.to_csv('./mem-data/%s-mem-typical_cases-%s-method.csv' % (pref, wdw_method_lbl[wdw_method]),
index=False)
dfcorredor_cases.to_csv('../clean_data/%smem-typical_cases.csv' % out_pref, index=False)
module_logger.info('MEM calculation outcome:\n - SUCCESS: %(SUCCESS)s\n - FAILED: %(FAILED)s' % mem_calc)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate MEM analysis from cleaned SINAN-SRAG data,\n" +
"for specified Federal Units, if any. If none specified, runs for all.\n" +
"Example usage:\n" +
"python3 sinan-mem-inset-thresholds.py --path clean_data4mem-incidence.csv " +
"--plot False --uflist Aw Cf\n",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--path', help='Path to data file')
parser.add_argument('--sep', help='Column separator', default=',')
parser.add_argument('--plot', help='Plot curves', default=False)
parser.add_argument('--uflist', nargs='*', default='all')
args = parser.parse_args()
args.plot = to_bool(args.plot)
print(args)
main(fname=args.path, plot_curves=bool(args.plot), sep=args.sep, uflist=args.uflist)
| gpl-3.0 | -6,955,606,979,555,097,000 | 51.231039 | 129 | 0.583441 | false |
Kuniwak/vint | vint/bootstrap.py | 1 | 1382 | import importlib
import pkgutil
from pathlib import Path
from vint.linting.cli import start_cli
import logging
LOG_FORMAT = 'vint %(levelname)s: %(message)s'
def init_logger():
logging.basicConfig(format=LOG_FORMAT)
def init_linter():
import_all_policies()
def init_cli():
start_cli()
def import_all_policies():
""" Import all policies that were registered by vint.linting.policy_registry.
Dynamic policy importing is comprised of the 3 steps
1. Try to import all policy modules (then we can't know what policies exist)
2. In policy module, register itself by using vint.linting.policy_registry
3. After all policies registered by itself, we can get policy classes
"""
pkg_name = _get_policy_package_name_for_test()
pkg_path_list = pkg_name.split('.')
pkg_path = str(Path(_get_vint_root(), *pkg_path_list).resolve())
for _, module_name, is_pkg in pkgutil.iter_modules([pkg_path]):
if not is_pkg:
module_fqn = pkg_name + '.' + module_name
logging.debug('Loading the policy module: `{fqn}`'.format(fqn=module_fqn))
importlib.import_module(module_fqn)
def _get_vint_root():
return Path(__file__).parent.parent
def _get_policy_package_name_for_test():
""" Test hook method that returns a package name for policy modules. """
return 'vint.linting.policy'
| mit | 1,752,616,255,235,649,500 | 26.64 | 86 | 0.675109 | false |
california-civic-data-coalition/django-calaccess-processed-data | calaccess_processed_elections/proxies/opencivicdata/elections/__init__.py | 1 | 1472 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Proxy models for augmenting our source data tables with methods useful for processing.
"""
from .ballotmeasurecontests import (
OCDBallotMeasureContestProxy,
OCDBallotMeasureContestIdentifierProxy,
OCDBallotMeasureContestOptionProxy,
OCDBallotMeasureContestSourceProxy
)
from .candidatecontests import (
OCDCandidateContestProxy,
OCDCandidateContestPostProxy,
OCDCandidateContestSourceProxy
)
from .candidacies import (
OCDCandidacyProxy,
OCDCandidacySourceProxy
)
from .elections import (
OCDElectionProxy,
OCDElectionIdentifierProxy,
OCDElectionSourceProxy
)
from .parties import OCDPartyProxy
from .retentioncontests import (
OCDRetentionContestProxy,
OCDRetentionContestIdentifierProxy,
OCDRetentionContestOptionProxy,
OCDRetentionContestSourceProxy
)
__all__ = (
"OCDBallotMeasureContestProxy",
"OCDBallotMeasureContestIdentifierProxy",
"OCDBallotMeasureContestOptionProxy",
"OCDBallotMeasureContestSourceProxy",
"OCDCandidateContestProxy",
"OCDCandidateContestPostProxy",
"OCDCandidateContestSourceProxy",
"OCDCandidacyProxy",
"OCDCandidacySourceProxy",
"OCDElectionProxy",
"OCDElectionIdentifierProxy",
"OCDElectionSourceProxy",
"OCDPartyProxy",
"OCDRetentionContestProxy",
"OCDRetentionContestIdentifierProxy",
"OCDRetentionContestOptionProxy",
"OCDRetentionContestSourceProxy"
)
| mit | -3,196,519,617,878,607,400 | 26.773585 | 86 | 0.777853 | false |
charles-cooper/raiden | tools/create_compilation_dump.py | 1 | 6453 | #!/usr/bin/env python
# -*- coding: utf-8
from __future__ import print_function
import json
from ethereum import tester
from ethereum import slogging
from raiden.utils import privatekey_to_address, get_contract_path, safe_lstrip_hex
slogging.configure(":INFO")
log = slogging.getLogger(__name__)
TARGETS = dict(
registry='Registry.sol',
discovery='EndpointRegistry.sol',
token='HumanStandardToken.sol',
)
DEFAULT_KEY = ('1' * 64).decode('hex')
DEFAULT_ACCOUNT = privatekey_to_address(DEFAULT_KEY)
def deploy_all(token_groups=None):
if not token_groups:
token_groups = dict()
log.DEV( # pylint: disable=no-member
'default key',
raw=tester.DEFAULT_KEY,
enc=tester.DEFAULT_KEY.encode('hex'),
)
log.DEV( # pylint: disable=no-member
'default account',
raw=tester.DEFAULT_ACCOUNT,
enc=tester.DEFAULT_ACCOUNT.encode('hex'),
)
tester.DEFAULT_KEY = DEFAULT_KEY
tester.DEFAULT_ACCOUNT = DEFAULT_ACCOUNT
tester.keys[0] = DEFAULT_KEY
tester.accounts[0] = DEFAULT_ACCOUNT
log.DEV( # pylint: disable=no-member
'default key',
raw=tester.DEFAULT_KEY,
enc=tester.DEFAULT_KEY.encode('hex'),
)
log.DEV( # pylint: disable=no-member
'default account',
raw=tester.DEFAULT_ACCOUNT,
enc=tester.DEFAULT_ACCOUNT.encode('hex'),
)
state = tester.state(num_accounts=1)
log.DEV( # pylint: disable=no-member
'state',
coinbase=state.block.coinbase.encode('hex'),
balance=state.block.get_balance(DEFAULT_ACCOUNT),
)
tester.gas_limit = 10 * 10 ** 6
state.block.number = 1158001
deployed = dict()
tokens = dict()
for name, group in token_groups.items():
token_name, address = create_and_distribute_token(state, group, name)
tokens[token_name] = address
deployed[token_name] = address
libraries = dict()
deployed.update(
deploy_with_dependencies(
TARGETS['registry'],
state,
libraries=libraries
)
)
deployed.update(
deploy_with_dependencies(
TARGETS['discovery'],
state
)
)
genesis_alloc = dict()
for account_address in deployed.itervalues():
genesis_alloc[account_address] = account_alloc = dict()
for key, value in state.block.account_to_dict(account_address).iteritems():
account_alloc[key] = safe_lstrip_hex(value)
raiden_flags = (
'--registry-contract-address {Registry}'
' --discovery-contract-address {EndpointRegistry}'
).format(**deployed)
blockchain_config = dict(
raiden_flags=raiden_flags,
token_groups=tokens,
)
blockchain_config['contract_addresses'] = deployed
return (genesis_alloc, blockchain_config)
def create_and_distribute_token(state,
receivers,
name=None,
amount_per_receiver=1000000):
proxy = state.abi_contract(
None,
path=get_contract_path(TARGETS['token']),
language='solidity',
listen=False,
sender=DEFAULT_KEY,
constructor_parameters=(
len(receivers) * amount_per_receiver,
name,
2,
name[:4].upper()
)
)
for receiver in receivers:
proxy.transfer(receiver, amount_per_receiver)
state.mine(number_of_blocks=1)
return (name, proxy.address.encode('hex'))
def deploy_with_dependencies(contract_name, state, libraries=None):
if not libraries:
libraries = dict()
dependencies = find_dependencies(get_contract_path(contract_name))
dependency_names = [d.split('.')[0] for d in dependencies]
for key in list(libraries.keys()):
if key not in dependency_names:
libraries.pop(key)
log.DEV( # pylint: disable=no-member
'in deploy_with_dependencies',
contract=contract_name,
dependencies=dependencies,
)
for dependency in dependencies:
# 'Contract's are included in 'Registry' and should not be deployed alone
if 'Contract' in dependency:
continue
log.DEV('deploying dependency', name=dependency) # pylint: disable=no-member
log.DEV('known libraries', libraries=libraries) # pylint: disable=no-member
deployed = state.abi_contract(
None,
path=get_contract_path(dependency),
listen=False,
language='solidity',
libraries=libraries,
sender=DEFAULT_KEY,
)
libraries[dependency.split('.')[0]] = deployed.address.encode('hex')
state.mine()
log.DEV('deploying target', name=contract_name) # pylint: disable=no-member
log.DEV('known libraries', libraries=libraries) # pylint: disable=no-member
contract = state.abi_contract(
None,
path=get_contract_path(contract_name),
language='solidity',
libraries=libraries,
sender=DEFAULT_KEY,
)
libraries[contract_name.split('.')[0]] = contract.address.encode('hex')
state.mine()
return libraries
def find_dependencies(contract_file):
"""Resolve solidity dependencies depth first.
"""
dependencies = []
with open(contract_file) as handler:
for line in handler.readlines():
if line.startswith("import"):
dependency = line.split()[1].split('"')[1]
dependency = dependency.rsplit('/', 1)[-1]
if dependency not in dependencies:
dependencies.extend(find_dependencies(get_contract_path(dependency)))
dependencies.append(dependency)
cleaned = []
for dependency in dependencies:
if dependency not in cleaned:
cleaned.append(dependency)
dependencies = cleaned
cleaned = []
for dependency in dependencies:
with open(get_contract_path(dependency)) as handler:
if any(line.startswith('interface') for line in handler.readlines()):
continue
cleaned.append(dependency)
return cleaned
def main():
pretty = False
dump, blockchain_config = deploy_all()
print(json.dumps(dump, indent=2 if pretty else None))
print(json.dumps(blockchain_config, indent=2 if pretty else None))
if __name__ == '__main__':
main()
| mit | -7,197,976,191,324,780,000 | 28.600917 | 89 | 0.611808 | false |
AdamRTomkins/libSpineML | libSpineML/smlBundle.py | 1 | 7712 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""SpineML Bundle Module
This modual will form a convience class to bundle together related SpineML
objects into a single standard object which can be easily passed between
programs. The bundle will be able to interact with premade spineML objects
through the other support classes, or parse directly from XML
TODO:
## export all as a loop through
## export each element, as a pass through
## import a project file
"""
import os
import pdb
import tempfile
import smlExperiment # SpineML layer classes
import smlNetwork
import smlComponent
class Bundle(object):
"""Bundle instances are a container class for the various spineML specifications.
Each specification is stored a list of objects.
"""
def __init__(self, experiments=None, networks=None, components=None,project_dict=None):
self.experiments = []
self.components = []
self.networks = []
self.index = {}
if type(experiments) is not type(None):
if type(experiments) is smlExperiment.SpineMLType:
self.experiments.append(experiments)
elif type(experiments) is list:
for e in experiments:
if type(e) is not smlExperiment.SpineMLType:
raise TypeError('Invalid Experiment Input: %s' % str(type(e)))
else:
self.experiments.append(e)
else:
raise TypeError('Invalid Experiment Input: %s' % str(type(experiments)))
if type(networks) is not type(None):
if type(networks) is smlNetwork.SpineMLType:
self.networks.append(networks)
elif type(networks) is list:
for n in networks:
if type(n) is not smlNetwork.SpineMLType:
raise TypeError('Invalid Network Input: %s' % str(type(n)))
else:
self.networks.append(n)
else:
raise TypeError('Invalid Network Input: %s' % str(type(networks)))
if type(components) is not type(None):
if type(components) is smlComponent.SpineMLType:
self.components.append(components)
elif type(components) is list:
for c in components:
if type(c) is not smlComponent.SpineMLType:
raise TypeError('Invalid Component Input: %s' % str(type(c)))
else:
self.components.append(c)
else:
raise TypeError('Invalid Component Input: %s' % str(type(components)))
if type(project_dict) is not type(None):
assert 'experiment' in project_dict
assert 'network' in project_dict
assert 'components' in project_dict
# set experiment
# eg: 'experiment':('emperiment0.xml','<xml content>')
print project_dict['experiment']
experiment_file, experiment_xml = project_dict['experiment']
with tempfile.NamedTemporaryFile() as temp:
temp.write(experiment_xml)
temp.flush()
temp.seek(0)
exp_obj = smlExperiment.parse(temp,True)
self.experiments.append(exp_obj)
# build up the experiment index
self.index[experiment_file] = {}
self.index[experiment_file]['experiment'] = {experiment_file:exp_obj}
# set network
# eg: 'network':('model.xml','<xml content>')
network_file, network_xml = project_dict['network']
with tempfile.NamedTemporaryFile() as temp:
temp.write(network_xml)
temp.flush()
temp.seek(0)
net_obj = smlNetwork.parse(temp,True)
self.networks.append(net_obj)
self.index[experiment_file]['network'] = {}
self.index[experiment_file]['network'][network_file] = net_obj
# set components
for component_file,component_xml in project_dict['components']:
with tempfile.NamedTemporaryFile() as temp:
temp.write(component_xml)
temp.flush()
temp.seek(0)
comp_obj = smlComponent.parse(temp,True)
self.components.append(comp_obj)
self.index[experiment_file]['component'] = {}
self.index[experiment_file]['component'][component_file] = comp_obj
def add_experiment(self, experiment,recursive=False):
"""Add a SpineML Experiment stored as SpineMLType types, to the bundle
Setting recursive=True will enable the experiment to add further subcomponents
which it accesses, such as the network file and the component file.
Adding an experiment using the recursive option also builds an index, which
may provide a more organic structure
"""
if type(experiment) is smlExperiment.SpineMLType:
self.experiments.append(experiment)
elif type(experiment) is str:
exp_obj = smlExperiment.parse(experiment,True)
self.experiments.append(exp_obj)
exp_file = os.path.basename(experiment)
# build up the experiment index
self.index[exp_file] = {}
self.index[exp_file]['experiment'] = {exp_file:exp_obj}
if recursive:
# Add the linked model files if recursive is set to true.
path = os.path.dirname(experiment) + '/'
if path == '/':
path = ''
for e in exp_obj.Experiment:
self.add_network(path+e.Model.network_layer_url,True,exp_file)
else:
raise TypeError('Invalid Experiment Input: %s' % str(type(experiment)))
def add_network(self, network,recursive=False,index=None):
"""Add a SpineML Network stored as a SpineMLType, to the bundle
When building an index recursively, pass the experiment file name as the index
"""
if type(network) is smlNetwork.SpineMLType:
self.networks.append(network)
elif type(network) is str:
net_file = os.path.basename(network)
path = os.path.dirname(network) + '/'
if path == '/':
path = ''
net_obj = smlNetwork.parse(network,True)
self.networks.append(net_obj)
if recursive:
if index is not None:
self.index[index]['network'] = {net_file:net_obj}
# Add the linked component files if recursive is set to true
for n in net_obj.Population:
self.add_component(smlComponent.parse(path + n.Neuron.url,True))
if index is not None:
self.index[index]['component'] = {n.Neuron.url:self.components[-1]}
else:
raise TypeError('Invalid Network Input %s' % str(type(network)))
def add_component(self, component):
"""Add a SpineML Component of SpineMLType type to the bundle
"""
if type(component) is smlComponent.SpineMLType:
self.components.append(component)
elif type(component) is str:
self.components.append(smlComponent.parse(component,True))
else:
raise TypeError('Invalid Component Input %s' % str(type(component)))
| gpl-3.0 | -8,174,739,726,877,663,000 | 37.56 | 91 | 0.566131 | false |
hbp-brain-charting/public_protocols | mtt/paradigm_descriptors/paradigm_descriptor_mtt.py | 1 | 8624 | # -*- coding: utf-8 -*-
"""
Script for paradigm descriptors' extraction on the Mental-Time-Travel protocol
for both models
author: Ana Luisa Pinho
e-mail: [email protected]
Last update: November 2019
Compatibility: Python 3.5
"""
import os
import glob
import csv
import numpy as np
# %%
# ========================== GENERAL PARAMETERS ===============================
REFERENCES_WE = ['lermite_observe', 'debit_reduit',
'les_animaux_broutent', 'premiere_rencontre',
'seconde_rencontre']
REFERENCES_SN = ['dolmens_sous_la_pluie', 'le_grand_pretre_observe',
'les_feux_follets_sallument', 'premier_rituel',
'second_rituel']
CUES_SPACE = ['sud_ou_nord', 'sud_ou_nord', 'ouest_ou_est', 'ouest_ou_est']
CUES_TIME = ['avant_ou_apres', 'avant_ou_apres']
# *****************************************************************************
# #######################################################
# # Island story
# island = 'we'
# # Participants' list
# participant_list = [1, 4, 5, 7, 8, 9, 12, 13, 14]
# # Which file to load? (numbering starts from 0)
# input_no = 0
# # Sessions's ID (numbering starts from 0)
# first_sess = 0
# last_sess = 2
# #######################################################
'''
Exceptions for IBC participants of island "we":
Participant: input_no, first_sess, last_sess
sub-06: 0, 0, 0
sub-06: 1, 1, 2
sub-11: 0, 0, 1
sub-11: 1, 2, 2
sub-15: 0, 0, 0 (very incomplete)
sub-15: 1, 1, 2
'''
# # Island story
# island = 'we'
# # Participants' list
# participant_list = [06]
# # Which file to load? (numbering starts from 0)
# input_no = 0
# # Sessions's ID (numbering starts from 0)
# first_sess = 0
# last_sess = 0
# #######################################################
# # Island story
# island = 'sn'
# # Participants' list
# participant_list = [1, 4, 5, 6, 7, 9, 11, 12, 13, 14]
# # Which file to load? (numbering starts from 0)
# input_no = 0
# # Sessions's ID (numbering starts from 0)
# first_sess = 0
# last_sess = 2
'''
Exceptions for IBC participants of island "sn":
sub-15: no runs
'''
# #######################################################
# *****************************************************************************
# #### DEFINE PATHWAYS ####
# Parent directory
main_dir = '../../../../analysis_pipeline/ibc_main/neurospin_data/info'
# Subject folder
# fname_prefix = 'pilot'
fname_prefix = 'sub'
# Name of the task protocol
protocol = 'mtt'
# fname of folder with log_files
raw_fname = 'log_' + island
# %%
# ============================== FUNCTIONS ====================================
def create_new_dir(dir_path):
"""
Creates directory of output files
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def load_log_file(input_dir, prefix, subject, task, logdir, no):
"""
Load the log files
"""
filename_participant_id = prefix + "-" + "%02d" % subject
# Set the pathway of the input files
inputs_path = os.path.join(input_dir, filename_participant_id, task,
logdir)
inputs = glob.glob(os.path.join(inputs_path, "*.xpd"))
inputs.sort()
fname = inputs[no]
# Load the file
inlist = []
inlist = [line for line in csv.reader(open(fname), delimiter=',')]
return inlist
def stack_descriptors(onsets, durations, names):
"""
Create table of paradigm descriptors
"""
# Headers of the paradigm descriptors' files according to BIDS
header = ['onset', 'duration', 'trial_type']
table = np.vstack((header, np.vstack((onsets, durations, names)).T))
return table
def save_output(file_path, liste):
"""
Save output file
"""
with open(file_path, 'w') as fp:
a = csv.writer(fp, delimiter='\t')
a.writerows(liste)
# %%
# ============================== PARSER =======================================
# %%
# Create a file for each participant and ...
for participant in participant_list:
# Clean or create output folders
path1 = os.path.join(main_dir, fname_prefix + '-' + '%02d' % participant,
protocol, 'absolute_model_' + island)
path2 = os.path.join(main_dir, fname_prefix + '-' + '%02d' % participant,
protocol, 'relative_model_' + island)
create_new_dir(path1)
create_new_dir(path2)
# Load input files
input_list = load_log_file(main_dir, fname_prefix, participant, protocol,
raw_fname, input_no)
# Parse the necessary information
for r, row in enumerate(input_list):
if row[0] == str(participant):
break
else:
continue
input_list = input_list[r:]
# Create a list of sessions' list
data_list = []
length = 0
for b, block in enumerate(np.arange(first_sess, last_sess + 1)):
data_block = []
idx = b * length
for dl, line in enumerate(input_list[idx:]):
if line[1] == str(block):
data_block.append(line)
else:
length = dl
break
data_list.append(data_block)
continue
# ... for every block
for n, data in enumerate(data_list):
# Read the table
onset = []
duration = []
name_abs = []
name_relat = []
for datum in data:
if participant == 15 and datum[1] == '0' and datum[2] != '0' and \
island == 'we':
print(datum[8])
break
datum = datum[4:]
# Onsets and durations of conditions
onset.append(float(datum[5]) / 1000)
duration.append(float(datum[6]) / 1000)
# Names of conditions for both models
# Beginning of a trial
if datum[4] in REFERENCES_WE + REFERENCES_SN:
# References of relative model
name_relat.append(datum[0] + '_all_reference')
elif datum[4] in CUES_SPACE:
# References of absolute model for space
name_abs.append(datum[0] + '_' + datum[1] + '_reference')
# Space cues
name_abs.append(datum[0] + '_all_reference_space_cue')
name_relat.append(datum[0] + '_all_space_cue')
elif datum[4] in CUES_TIME:
# References of absolute model for time
name_abs.append(datum[0] + '_' + datum[2] + '_reference')
# Time cues
name_abs.append(datum[0] + '_all_reference_time_cue')
name_relat.append(datum[0] + '_all_time_cue')
elif datum[4] == 'response':
# Events of the relative model...
# ... for time
if datum[9] in ['before', 'after']:
name_abs.append(datum[0] + '_' + datum[2] + \
'_reference_' + datum[3] + '_event')
name_relat.append(datum[0] + '_' + datum[9] + '_' + \
datum[3] + '_event')
# ... for space
else:
name_abs.append(datum[0] + '_' + datum[1] + \
'_reference_' + datum[3] + '_event')
name_relat.append(datum[0] + '_' + datum[9] + 'side_' + \
datum[3] + '_event')
# Responses for both models
name_abs.append(datum[0] + '_all_reference_response')
name_relat.append(datum[0] + '_all_event_response')
# Events of the absolute model
else:
continue
# Stack onset, duration and trial_type arrays
abs_descriptors = stack_descriptors(onset, duration, name_abs)
relat_descriptors = stack_descriptors(onset, duration, name_relat)
# Output files
abs_fname = 'paradigm_descriptors_mtt_absolute-model' + '_' + \
island + '_' + fname_prefix + '-' + \
'%02d' % participant + '_run' + \
'%01d' % (n + first_sess) + '.tsv'
relat_fname = 'paradigm_descriptors_mtt_relative-model' + '_' + \
island + '_' + fname_prefix + '-' + \
'%02d' % participant + '_run' + \
'%01d' % (n + first_sess) + '.tsv'
output1 = os.path.join(path1, abs_fname)
output2 = os.path.join(path2, relat_fname)
print(output1, output2)
# Save files
save_output(output1, abs_descriptors)
save_output(output2, relat_descriptors)
| bsd-3-clause | -978,880,912,529,410,800 | 32.297297 | 79 | 0.50487 | false |
GabrielCasarin/Allegri | Meta_Compilador/minimizador.py | 1 | 8819 | # Copyright (c) 2016 Gabriel Casarin da Silva, All Rights Reserved.
from comum.automatos import AutomatoFinito
from comum.automatos.estado import Estado, EstadoNaoDeterministico
def eliminar_transicoes_em_vazio(automato):
def epsilon_closure(estado):
fecho = [estado]
pilha = list(fecho)
while pilha:
el = pilha.pop()
if '' in el.simbolos:
for el2 in el['']:
if el2 not in fecho:
fecho.append(el2)
pilha.append(el2)
return fecho
def delta1(qi, simbolo, fecho):
D1 = []
for qj in fecho:
if simbolo in qj.simbolos:
for qk in qj[simbolo]:
for el in epsilon_closure(qk):
if el not in D1:
D1.append(el)
return D1
for Si in automato:
fecho = epsilon_closure(Si)
for simbolo in automato.alfabeto:
if simbolo != '':
D1 = delta1(Si, simbolo, fecho)
for el in D1:
Si[simbolo] = el
for Sj in fecho:
if not Si.final and Sj.final:
Si.final = True
for Si in automato:
del Si['']
def eliminar_indeterminismos(automato):
class EstadoContainer(EstadoNaoDeterministico):
def __init__(self, conjunto_estados):
# inicializa-se o objeto como um estado sem nome e não-final
super(EstadoContainer, self).__init__(nome='')
# a idéia aqui é encontrar os estados-raiz de cada elemento de conjunto_estados
self.conjunto_estados = []
for el in conjunto_estados:
if isinstance(el, EstadoContainer):
for estado in el.conjunto_estados:
if estado not in self.conjunto_estados:
self.conjunto_estados.append(estado)
elif isinstance(el, Estado):
if el not in self.conjunto_estados:
self.conjunto_estados.append(el)
self.conjunto_estados = sorted(self.conjunto_estados, key=lambda e: e.nome)
for estado in self.conjunto_estados:
self.nome += estado.nome
self.merge(estado, True)
def compara_conjunto(self, conjunto_estados):
temp = list(conjunto_estados)
for el in conjunto_estados:
if isinstance(el, EstadoContainer):
temp.remove(el)
for estado in el.conjunto_estados:
if estado not in temp:
temp.append(estado)
if len(self.conjunto_estados) == len(temp):
for el in self.conjunto_estados:
if el not in temp:
return False
return True
else:
return False
def cria_novo_estado(conjunto_estados):
"""
cria um novo estado a partir da fusao de dois ou mais outros
"""
novo_estado = EstadoContainer(conjunto_estados)
automato.estados[novo_estado.nome] = novo_estado
for simbolo in novo_estado.transicoes.keys():
if len(novo_estado[simbolo]) > 1:
lista_indeterminismos.append((novo_estado, simbolo))
for estado in automato:
for simbolo in estado.transicoes.keys():
if novo_estado.compara_conjunto(estado[simbolo]):
lista_indeterminismos.remove((estado, simbolo))
del estado[simbolo]
estado[simbolo] = novo_estado
def converter_para_deterministico(automato):
old_estados = automato.estados.values()
automato.deterministico = True
automato.estados = {}
for q in old_estados:
automato.add_estado(q.nome)
automato[q.nome].final = q.final
automato[q.nome].submaquinas_chamadas = q.submaquinas_chamadas
for s in q.transicoes.keys():
automato.add_estado(q[s][0].nome)
automato[q.nome][s] = automato[q[s][0].nome]
# cria uma lista inicial de indeterminismos
lista_indeterminismos = []
for estado in automato:
for simbolo in estado.transicoes.keys():
if len(estado[simbolo]) > 1:
lista_indeterminismos.append((estado, simbolo))
# itera por todos os indeterminismos
while lista_indeterminismos:
estado, simbolo = lista_indeterminismos[0]
cria_novo_estado(estado[simbolo])
# finalmente
converter_para_deterministico(automato)
def eliminar_estados_inacessiveis(automato, inicial='q0'):
estados = list(automato.estados.values())
visitados = []
pilha = [automato.estados[inicial]]
while pilha:
estadoAtual = pilha.pop()
visitados.append(estadoAtual)
for simbolo in estadoAtual.transicoes.keys():
if automato.deterministico:
proxEstado = estadoAtual[simbolo]
if (proxEstado not in visitados
and proxEstado not in pilha):
pilha.insert(0, proxEstado)
else: # se não é deterministico
for proxEstado in estadoAtual[simbolo]:
if (proxEstado not in visitados
and proxEstado not in pilha):
pilha.insert(0, proxEstado)
a_serem_removidos = [q.nome for q in estados if q not in visitados]
for estado in a_serem_removidos:
del automato.estados[estado]
def minimizador_de_Hopcroft(automato):
'''Retorna uma partição do conjunto de estados de um autômato
correspondente às classes de equivalência obtidas segundo
o algoritmo de minimização de Hopcroft.'''
def delta_R(P, a):
conj = []
for q in automato:
if a in q.simbolos and q[a] in P:
conj.append(q)
return conj
Grupos = [[],[]]
for q in automato:
if q.final:
Grupos[1].append(q)
else:
Grupos[0].append(q)
Ativo = [list(Grupos[1])]
while Ativo:
A = Ativo.pop()
for a in automato.alfabeto:
for G in Grupos:
delta = delta_R(A, a)
# G1 = G inter delta
G1 = [x for x in G if x in delta]
# G2 = G - G1
G2 = [x for x in G if x not in G1]
if G1 and G2:
Grupos.remove(G)
Grupos.append(G1)
Grupos.append(G2)
if G in Ativo:
Ativo.remove(G)
Ativo.append(G1)
Ativo.append(G2)
else:
if len(G1) < len(G2):
Ativo.append(G1)
else:
Ativo.append(G2)
return Grupos
def particao_para_automato_finito(particao, nome=None, alfabeto=None, inicial=0, apendice=None):
def acha(nome_estado):
for i in range(len(particao)):
for estado in particao[i]:
if estado.nome == nome_estado:
return i
return None
def gera_nome(n):
return 'q' + str(n)
if apendice is None:
af = AutomatoFinito(nome=nome, estados=[gera_nome(inicial)], estadoInicial=gera_nome(inicial), alfabeto=alfabeto)
else:
af = apendice
pilha = []
finais = []
transicoes_chamada = []
i = acha('q0')
nomes_classes = {
i: gera_nome(inicial)
}
pilha.append(particao[i][0])
cont = inicial
while pilha:
estado_atual = pilha.pop()
j = acha(estado_atual.nome)
qi = nomes_classes[j]
for s, qj in estado_atual.transicoes.items():
if qj is not None:
# acha o indice do conjunto, dentro da partição, a que pertence qj
i = acha(qj.nome)
if not i in nomes_classes:
cont += 1
nova_classe = gera_nome(cont)
nomes_classes[i] = nova_classe
pilha.append(particao[i][0])
af.add_estado(nova_classe)
af.add_transicao(de=qi, com=s, para=nomes_classes[i])
if s in estado_atual.submaquinas_chamadas:
transicoes_chamada.append((qi, s, nomes_classes[i]))
af[qi].final = estado_atual.final
if af[qi].final: finais.append(qi)
af[qi].submaquinas_chamadas = estado_atual.submaquinas_chamadas
if apendice is None:
return af
else:
return cont, transicoes_chamada, finais
| gpl-3.0 | -5,765,172,779,519,239,000 | 34.504032 | 121 | 0.535718 | false |
3324fr/spinalcordtoolbox | scripts/isct_test_function.py | 1 | 17025 | #!/usr/bin/env python
#########################################################################################
#
# This function allows to run a function on a large dataset with a set of parameters.
# Results are extracted and saved in a way that they can easily be compared with another set.
#
# Data should be organized as the following:
# (names of images can be changed but must be passed as parameters to this function)
#
# data/
# ......subject_name_01/
# ......subject_name_02/
# .................t1/
# .........................subject_02_anything_t1.nii.gz
# .........................some_landmarks_of_vertebral_levels.nii.gz
# .........................subject_02_manual_segmentation_t1.nii.gz
# .................t2/
# .........................subject_02_anything_t2.nii.gz
# .........................some_landmarks_of_vertebral_levels.nii.gz
# .........................subject_02_manual_segmentation_t2.nii.gz
# .................t2star/
# .........................subject_02_anything_t2star.nii.gz
# .........................subject_02_manual_segmentation_t2star.nii.gz
# ......subject_name_03/
# .
# .
# .
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Sara Dupont, Benjamin De Leener
# Modified: 2015-09-30
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys
import commands
import platform
import signal
from time import time, strftime
from msct_parser import Parser
import sct_utils as sct
import os
import copy_reg
import types
import pandas as pd
import json
# get path of the toolbox
# TODO: put it back below when working again (julien 2016-04-04)
# <<<
# OLD
# status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# NEW
path_script = os.path.dirname(__file__)
path_sct = os.path.dirname(path_script)
# >>>
# append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
sys.path.append(path_sct + '/testing')
def _pickle_method(method):
"""
Author: Steven Bethard (author of argparse)
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
cls_name = ''
if func_name.startswith('__') and not func_name.endswith('__'):
cls_name = cls.__name__.lstrip('_')
if cls_name:
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
"""
Author: Steven Bethard
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
def generate_data_list(folder_dataset, json_requirements=None, verbose=1):
"""
Construction of the data list from the data set
This function return a list of directory (in folder_dataset) in which the contrast is present.
:return data:
"""
data_subjects, subjects_dir = [], []
# each directory in folder_dataset should be a directory of a subject
for subject_dir in os.listdir(folder_dataset):
if not subject_dir.startswith('.') and os.path.isdir(folder_dataset + subject_dir):
if read_json(folder_dataset + subject_dir, json_requirements=json_requirements):
data_subjects.append(folder_dataset + subject_dir + '/')
subjects_dir.append(subject_dir)
if not data_subjects:
sct.printv('ERROR: No subject data were found in ' + folder_dataset + '. '
'Please organize your data correctly or provide a correct dataset.',
verbose=verbose, type='error')
return data_subjects, subjects_dir
def read_json(path_dir, json_requirements=None, fname_json='dataset_description.json'):
path_dir = sct.slash_at_the_end(path_dir, slash=1)
if fname_json not in os.listdir(path_dir) and json_requirements is not None:
accept_subject = False
elif json_requirements is None:
accept_subject = True
else:
json_file = open(path_dir+fname_json)
dic_info = json.load(json_file)
json_file.close()
# pass keys and items to lower case
dic_info = dict((k.lower(), v.lower()) for k, v in dic_info.iteritems())
# if no condition is not verified, accept subject
accept_subject = True
# read requirements:
list_conditions = json_requirements.split(',')
for condition in list_conditions:
key, val = condition.split('=')
key, val = key.lower(), val.lower()
# if key do not exist, do not accept subject
if key not in dic_info.keys():
accept_subject = False
# if value for this key is not the one required, do not accept subject
elif dic_info[key] != val:
accept_subject = False
return accept_subject
def process_results(results, subjects_name, function, folder_dataset, parameters):
try:
results_dataframe = pd.concat([result[2] for result in results])
results_dataframe.loc[:, 'subject'] = pd.Series(subjects_name, index=results_dataframe.index)
results_dataframe.loc[:, 'script'] = pd.Series([function]*len(subjects_name), index=results_dataframe.index)
results_dataframe.loc[:, 'dataset'] = pd.Series([folder_dataset]*len(subjects_name), index=results_dataframe.index)
results_dataframe.loc[:, 'parameters'] = pd.Series([parameters] * len(subjects_name), index=results_dataframe.index)
return results_dataframe
except KeyboardInterrupt:
return 'KeyboardException'
except Exception as e:
sct.printv('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), 1, 'warning')
sct.printv(str(e), 1, 'warning')
sys.exit(2)
def function_launcher(args):
import importlib
# append local script to PYTHONPATH for import
sys.path.append(os.path.abspath(os.curdir))
script_to_be_run = importlib.import_module('test_' + args[0]) # import function as a module
try:
output = script_to_be_run.test(*args[1:])
except:
import traceback
print('%s: %s' % ('test_' + args[0], traceback.format_exc()))
# output = (1, 'ERROR: Function crashed', 'No result')
from pandas import DataFrame
status_script = 1
output_script = 'ERROR: Function crashed.'
output = (status_script, output_script, DataFrame(data={'status': int(status_script), 'output': output_script}, index=['']))
return output
# return script_to_be_run.test(*args[1:])
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def test_function(function, folder_dataset, parameters='', nb_cpu=None, json_requirements=None, verbose=1):
"""
Run a test function on the dataset using multiprocessing and save the results
:return: results
# results are organized as the following: tuple of (status, output, DataFrame with results)
"""
# generate data list from folder containing
data_subjects, subjects_name = generate_data_list(folder_dataset, json_requirements=json_requirements)
# All scripts that are using multithreading with ITK must not use it when using multiprocessing on several subjects
os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = "1"
from multiprocessing import Pool
# create datasets with parameters
import itertools
data_and_params = itertools.izip(itertools.repeat(function), data_subjects, itertools.repeat(parameters))
pool = Pool(processes=nb_cpu, initializer=init_worker)
try:
async_results = pool.map_async(function_launcher, data_and_params).get(9999999)
# results = process_results(async_results.get(9999999), subjects_name, function, folder_dataset, parameters) # get the sorted results once all jobs are finished
pool.close()
pool.join() # waiting for all the jobs to be done
results = process_results(async_results, subjects_name, function, folder_dataset, parameters) # get the sorted results once all jobs are finished
except KeyboardInterrupt:
print "\nWarning: Caught KeyboardInterrupt, terminating workers"
pool.terminate()
pool.join()
# return
# raise KeyboardInterrupt
# sys.exit(2)
except Exception as e:
sct.printv('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), 1, 'warning')
sct.printv(str(e), 1, 'warning')
pool.terminate()
pool.join()
# raise Exception
# sys.exit(2)
return results
def get_parser():
# Initialize parser
parser = Parser(__file__)
# Mandatory arguments
parser.usage.set_description("")
parser.add_option(name="-f",
type_value="str",
description="Function to test.",
mandatory=True,
example="sct_propseg")
parser.add_option(name="-d",
type_value="folder",
description="Dataset directory.",
mandatory=True,
example="dataset_full/")
parser.add_option(name="-p",
type_value="str",
description="Arguments to pass to the function that is tested. Please put double-quotes if there are spaces in the list of parameters.\n"
"Image paths must be contains in the arguments list.",
mandatory=False)
parser.add_option(name="-json",
type_value="str",
description="Requirements on center, study, ... that must be satisfied by the json file of each tested subjects\n"
"Syntax: center=unf,study=errsm,gm_model=0",
mandatory=False)
parser.add_option(name="-cpu-nb",
type_value="int",
description="Number of CPU used for testing. 0: no multiprocessing. If not provided, "
"it uses all the available cores.",
mandatory=False,
default_value=0,
example='42')
parser.add_option(name="-log",
type_value='multiple_choice',
description="Redirects Terminal verbose to log file.",
mandatory=False,
example=['0', '1'],
default_value='1')
parser.add_option(name="-v",
type_value="multiple_choice",
description="Verbose. 0: nothing, 1: basic, 2: extended.",
mandatory=False,
example=['0', '1', '2'],
default_value='1')
return parser
# ====================================================================================================
# Start program
# ====================================================================================================
if __name__ == "__main__":
# get parameters
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
function_to_test = arguments["-f"]
dataset = arguments["-d"]
dataset = sct.slash_at_the_end(dataset, slash=1)
parameters = ''
if "-p" in arguments:
parameters = arguments["-p"]
json_requirements = None
if "-json" in arguments:
json_requirements = arguments["-json"]
nb_cpu = None
if "-cpu-nb" in arguments:
nb_cpu = arguments["-cpu-nb"]
create_log = int(arguments['-log'])
verbose = arguments["-v"]
# start timer
start_time = time()
# create single time variable for output names
output_time = strftime("%y%m%d%H%M%S")
print 'Testing started on: '+strftime("%Y-%m-%d %H:%M:%S")
# build log file name
if create_log:
file_log = 'results_test_'+function_to_test+'_'+output_time
orig_stdout = sys.stdout
fname_log = file_log+'.log'
handle_log = file(fname_log, 'w')
# redirect to log file
sys.stdout = handle_log
print 'Testing started on: '+strftime("%Y-%m-%d %H:%M:%S")
# get path of the toolbox
path_script = os.path.dirname(__file__)
path_sct = os.path.dirname(path_script)
# fetch true commit number and branch (do not use commit.txt which is wrong)
path_curr = os.path.abspath(os.curdir)
os.chdir(path_sct)
sct_commit = commands.getoutput('git rev-parse HEAD')
if not sct_commit.isalnum():
print 'WARNING: Cannot retrieve SCT commit'
sct_commit = 'unknown'
sct_branch = 'unknown'
else:
sct_branch = commands.getoutput('git branch --contains '+sct_commit).strip('* ')
# with open (path_sct+"/version.txt", "r") as myfile:
# version_sct = myfile.read().replace('\n', '')
# with open (path_sct+"/commit.txt", "r") as myfile:
# commit_sct = myfile.read().replace('\n', '')
print 'SCT commit/branch: '+sct_commit+'/'+sct_branch
os.chdir(path_curr)
# check OS
platform_running = sys.platform
if (platform_running.find('darwin') != -1):
os_running = 'osx'
elif (platform_running.find('linux') != -1):
os_running = 'linux'
print 'OS: '+os_running+' ('+platform.platform()+')'
# check hostname
print 'Hostname:', platform.node()
# Check number of CPU cores
from multiprocessing import cpu_count
# status, output = sct.run('echo $ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS', 0)
print 'CPU cores: ' + str(cpu_count()) # + ', Used by SCT: '+output
# check RAM
sct.checkRAM(os_running, 0)
# test function
try:
results = test_function(function_to_test, dataset, parameters, nb_cpu, json_requirements, verbose)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
results_subset = results.drop('script', 1).drop('dataset', 1).drop('parameters', 1).drop('output', 1)
results_display = results_subset
# save panda structure
if create_log:
results_subset.to_pickle(file_log+'.pickle')
# mean
results_mean = results_subset[results_subset.status != 200].mean(numeric_only=True)
results_mean['subject'] = 'Mean'
results_mean.set_value('status', float('NaN')) # set status to NaN
# results_display = results_display.append(results_mean, ignore_index=True)
# std
results_std = results_subset[results_subset.status != 200].std(numeric_only=True)
results_std['subject'] = 'STD'
results_std.set_value('status', float('NaN')) # set status to NaN
# results_display = results_display.append(results_std, ignore_index=True)
# count tests that passed
count_passed = results_subset.status[results_subset.status == 0].count()
count_crashed = results_subset.status[results_subset.status == 1].count()
# count tests that ran
count_ran = results_subset.status[results_subset.status != 200].count()
# results_display = results_display.set_index('subject')
# jcohenadad, 2015-10-27: added .reset_index() for better visual clarity
results_display = results_display.set_index('subject').reset_index()
print '\nCommand: "' + function_to_test + ' ' + parameters
print 'Dataset: ' + dataset
# display general results
print '\nGLOBAL RESULTS:'
elapsed_time = time() - start_time
print 'Duration: ' + str(int(round(elapsed_time)))+'s'
# display results
print 'Passed: ' + str(count_passed) + '/' + str(count_ran)
print 'Crashed: ' + str(count_crashed) + '/' + str(count_ran)
# build mean/std entries
dict_mean = results_mean.to_dict()
dict_mean.pop('status')
dict_mean.pop('subject')
print 'Mean: ' + str(dict_mean)
dict_std = results_std.to_dict()
dict_std.pop('status')
dict_std.pop('subject')
print 'STD: ' + str(dict_std)
# print detailed results
print '\nDETAILED RESULTS:'
print results_display.to_string()
print 'Status: 0: Passed | 1: Crashed | 99: Failed | 200: Input file(s) missing | 201: Ground-truth file(s) missing'
except Exception as err:
print err
# stop file redirection
if create_log:
sys.stdout.close()
sys.stdout = orig_stdout
# display log file to Terminal
handle_log = file(fname_log, 'r')
print handle_log.read()
| mit | 3,687,924,555,566,886,400 | 38.228111 | 169 | 0.589662 | false |
SerpentCS/purchase-workflow | purchase_request/models/purchase_request.py | 1 | 10281 | # -*- coding: utf-8 -*-
# Copyright 2016 Eficent Business and IT Consulting Services S.L.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl-3.0).
from openerp import api, fields, models
import openerp.addons.decimal_precision as dp
_STATES = [
('draft', 'Draft'),
('to_approve', 'To be approved'),
('approved', 'Approved'),
('rejected', 'Rejected')
]
class PurchaseRequest(models.Model):
_name = 'purchase.request'
_description = 'Purchase Request'
_inherit = ['mail.thread', 'ir.needaction_mixin']
@api.model
def _company_get(self):
company_id = self.env['res.company']._company_default_get(self._name)
return self.env['res.company'].browse(company_id.id)
@api.model
def _get_default_requested_by(self):
return self.env['res.users'].browse(self.env.uid)
@api.model
def _get_default_name(self):
return self.env['ir.sequence'].get('purchase.request')
@api.model
def _default_picking_type(self):
type_obj = self.env['stock.picking.type']
company_id = self.env.context.get('company_id') or \
self.env.user.company_id.id
types = type_obj.search([('code', '=', 'incoming'),
('warehouse_id.company_id', '=', company_id)])
if not types:
types = type_obj.search([('code', '=', 'incoming'),
('warehouse_id', '=', False)])
return types[:1]
@api.multi
@api.depends('state')
def _compute_is_editable(self):
for rec in self:
if rec.state in ('to_approve', 'approved', 'rejected'):
rec.is_editable = False
else:
rec.is_editable = True
@api.multi
def _track_subtype(self, init_values):
for rec in self:
if 'state' in init_values and rec.state == 'to_approve':
return 'purchase_request.mt_request_to_approve'
elif 'state' in init_values and rec.state == 'approved':
return 'purchase_request.mt_request_approved'
elif 'state' in init_values and rec.state == 'rejected':
return 'purchase_request.mt_request_rejected'
return super(PurchaseRequest, self)._track_subtype(init_values)
name = fields.Char('Request Reference', size=32, required=True,
default=_get_default_name,
track_visibility='onchange')
origin = fields.Char('Source Document', size=32)
date_start = fields.Date('Creation date',
help="Date when the user initiated the "
"request.",
default=fields.Date.context_today,
track_visibility='onchange')
requested_by = fields.Many2one('res.users',
'Requested by',
required=True,
track_visibility='onchange',
default=_get_default_requested_by)
assigned_to = fields.Many2one('res.users', 'Approver',
track_visibility='onchange')
description = fields.Text('Description')
company_id = fields.Many2one('res.company', 'Company',
required=True,
default=_company_get,
track_visibility='onchange')
line_ids = fields.One2many('purchase.request.line', 'request_id',
'Products to Purchase',
readonly=False,
copy=True,
track_visibility='onchange')
state = fields.Selection(selection=_STATES,
string='Status',
index=True,
track_visibility='onchange',
required=True,
copy=False,
default='draft')
is_editable = fields.Boolean(string="Is editable",
compute="_compute_is_editable",
readonly=True)
picking_type_id = fields.Many2one('stock.picking.type',
'Picking Type', required=True,
default=_default_picking_type)
@api.multi
def copy(self, default=None):
default = dict(default or {})
self.ensure_one()
default.update({
'state': 'draft',
'name': self.env['ir.sequence'].get('purchase.request'),
})
return super(PurchaseRequest, self).copy(default)
@api.model
def create(self, vals):
request = super(PurchaseRequest, self).create(vals)
if vals.get('assigned_to'):
request.message_subscribe_users(user_ids=[request.assigned_to.id])
return request
@api.multi
def write(self, vals):
res = super(PurchaseRequest, self).write(vals)
for request in self:
if vals.get('assigned_to'):
self.message_subscribe_users(user_ids=[request.assigned_to.id])
return res
@api.multi
def button_draft(self):
for rec in self:
rec.state = 'draft'
return True
@api.multi
def button_to_approve(self):
for rec in self:
rec.state = 'to_approve'
return True
@api.multi
def button_approved(self):
for rec in self:
rec.state = 'approved'
return True
@api.multi
def button_rejected(self):
for rec in self:
rec.state = 'rejected'
return True
class PurchaseRequestLine(models.Model):
_name = "purchase.request.line"
_description = "Purchase Request Line"
_inherit = ['mail.thread', 'ir.needaction_mixin']
@api.multi
@api.depends('product_id', 'name', 'product_uom_id', 'product_qty',
'analytic_account_id', 'date_required', 'specifications')
def _compute_is_editable(self):
for rec in self:
if rec.request_id.state in ('to_approve', 'approved', 'rejected'):
rec.is_editable = False
else:
rec.is_editable = True
@api.multi
def _compute_supplier_id(self):
for rec in self:
if rec.product_id:
if rec.product_id.seller_ids:
rec.supplier_id = rec.product_id.seller_ids[0].name
product_id = fields.Many2one(
'product.product', 'Product',
domain=[('purchase_ok', '=', True)],
track_visibility='onchange')
name = fields.Char('Description', size=256,
track_visibility='onchange')
product_uom_id = fields.Many2one('product.uom', 'Product Unit of Measure',
track_visibility='onchange')
product_qty = fields.Float('Quantity', track_visibility='onchange',
digits_compute=dp.get_precision(
'Product Unit of Measure'))
request_id = fields.Many2one('purchase.request',
'Purchase Request',
ondelete='cascade', readonly=True)
company_id = fields.Many2one('res.company',
related='request_id.company_id',
string='Company',
store=True, readonly=True)
analytic_account_id = fields.Many2one('account.analytic.account',
'Analytic Account',
track_visibility='onchange')
requested_by = fields.Many2one('res.users',
related='request_id.requested_by',
string='Requested by',
store=True, readonly=True)
assigned_to = fields.Many2one('res.users',
related='request_id.assigned_to',
string='Assigned to',
store=True, readonly=True)
date_start = fields.Date(related='request_id.date_start',
string='Request Date', readonly=True,
store=True)
description = fields.Text(related='request_id.description',
string='Description', readonly=True,
store=True)
origin = fields.Char(related='request_id.origin',
size=32, string='Source Document', readonly=True,
store=True)
date_required = fields.Date(string='Request Date', required=True,
track_visibility='onchange',
default=fields.Date.context_today)
is_editable = fields.Boolean(string='Is editable',
compute="_compute_is_editable",
readonly=True)
specifications = fields.Text(string='Specifications')
request_state = fields.Selection(string='Request state',
readonly=True,
related='request_id.state',
selection=_STATES,
store=True)
supplier_id = fields.Many2one('res.partner',
string='Preferred supplier',
compute="_compute_supplier_id")
procurement_id = fields.Many2one('procurement.order',
'Procurement Order',
readonly=True)
@api.onchange('product_id')
def onchange_product_id(self):
if self.product_id:
name = self.product_id.name
if self.product_id.code:
name = '[%s] %s' % (name, self.product_id.code)
if self.product_id.description_purchase:
name += '\n' + self.product_id.description_purchase
self.product_uom_id = self.product_id.uom_id.id
self.product_qty = 1
self.name = name
| agpl-3.0 | -740,625,282,075,647,100 | 40.289157 | 79 | 0.508803 | false |
elzaggo/pydoop | pydoop/__init__.py | 1 | 5094 | # BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
# DEV NOTE: some of the variables defined here (docstring included)
# are parsed by setup.py, check it before modifying them.
"""
Pydoop: a Python MapReduce and HDFS API for Hadoop
--------------------------------------------------
Pydoop is a Python interface to Hadoop that allows you to write
MapReduce applications and interact with HDFS in pure Python.
"""
import os
import errno
from importlib import import_module
import pydoop.hadoop_utils as hu
from pydoop.utils.py3compat import configparser, parser_read
try:
from pydoop.version import version as __version__
except ImportError: # should only happen at compile time
__version__ = None
_PATH_FINDER = hu.PathFinder()
_HADOOP_INFO = _PATH_FINDER.find() # fill the cache ASAP
__author__ = ", ".join((
"Simone Leo",
"Gianluigi Zanetti",
"Luca Pireddu",
"Francesco Cabras",
"Mauro Del Rio",
"Marco Enrico Piras",
))
__author_email__ = ", ".join((
"<[email protected]>",
"<[email protected]>",
"<[email protected]>",
"<[email protected]>",
"<[email protected]>",
"<[email protected]>",
))
__url__ = "http://crs4.github.io/pydoop"
__propfile_basename__ = "pydoop.properties"
def reset():
_PATH_FINDER.reset()
def hadoop_home():
return _PATH_FINDER.hadoop_home()
def hadoop_exec(hadoop_home=None):
return _PATH_FINDER.hadoop_exec(hadoop_home)
def mapred_exec(hadoop_home=None):
return _PATH_FINDER.mapred_exec(hadoop_home)
def hadoop_version(hadoop_home=None):
return _PATH_FINDER.hadoop_version(hadoop_home)
def hadoop_version_info(hadoop_home=None):
return _PATH_FINDER.hadoop_version_info(hadoop_home)
def has_mrv2(hadoop_home=None):
return _PATH_FINDER.hadoop_version_info(hadoop_home).has_mrv2()
def is_apache(hadoop_home=None):
return _PATH_FINDER.is_apache(hadoop_home)
def is_cloudera(hadoop_home=None):
return _PATH_FINDER.is_cloudera(hadoop_home)
def is_hortonworks(hadoop_home=None):
return _PATH_FINDER.is_hortonworks(hadoop_home)
def hadoop_conf(hadoop_home=None):
return _PATH_FINDER.hadoop_conf(hadoop_home)
def hadoop_params(hadoop_conf=None, hadoop_home=None):
return _PATH_FINDER.hadoop_params(hadoop_conf, hadoop_home)
def hadoop_native(hadoop_home=None):
return _PATH_FINDER.hadoop_native(hadoop_home)
def hadoop_classpath(hadoop_home=None):
return _PATH_FINDER.hadoop_classpath(hadoop_home)
def package_dir():
return os.path.dirname(os.path.abspath(__file__))
##############################
# Since Pydoop 1.0, we've stopped supporting installations for multiple
# Hadoop versions, so we only have a single module, so the following
# functions now return the same value regardless of the Hadoop version.
##############################
def jar_name(hadoop_vinfo=None):
return "pydoop.jar"
def jar_path(hadoop_vinfo=None):
path = os.path.join(package_dir(), jar_name())
if os.path.exists(path):
return path
else:
return None
def complete_mod_name(module, hadoop_vinfo=None):
return "%s.%s" % (__package__, module)
def import_version_specific_module(name):
return import_module(name)
# --- get properties ---
PROP_FN = os.path.join(
os.path.dirname(os.path.abspath(__file__)), __propfile_basename__
)
# http://stackoverflow.com/questions/2819696
class AddSectionWrapper(object):
SEC_NAME = 'dummy'
def __init__(self, f):
self.f = f
self.sechead = '[dummy]' + os.linesep
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readline(self):
if self.sechead:
try:
return self.sechead
finally:
self.sechead = None
else:
return self.f.readline()
def read_properties(fname):
parser = configparser.SafeConfigParser()
parser.optionxform = str # preserve key case
try:
with open(fname) as f:
parser_read(parser, AddSectionWrapper(f))
except IOError as e:
if e.errno != errno.ENOENT:
raise
return None # compile time, prop file is not there
return dict(parser.items(AddSectionWrapper.SEC_NAME))
class LocalModeNotSupported(RuntimeError):
def __init__(self):
msg = 'ERROR: Hadoop is configured to run in local mode'
super(LocalModeNotSupported, self).__init__(msg)
| apache-2.0 | 339,911,344,888,792,400 | 24.59799 | 77 | 0.665096 | false |
mckinseyacademy/xblock-diagnosticfeedback | diagnostic_feedback/mixins.py | 1 | 3760 | from __future__ import absolute_import
import pkg_resources
from django import utils
from xblockutils.resources import ResourceLoader
from .config import student_assets, studio_assets
loader = ResourceLoader(__name__)
class XBlockWithTranslationServiceMixin(object):
"""
Mixin providing access to i18n service
"""
def _(self, text):
""" Translate text """
# noinspection PyUnresolvedReferences
return self.runtime.service(self, "i18n").ugettext(text)
class ResourceMixin(object):
"""
contain method to load css/js/htmll resource for student and studio view
"""
def sort_resources_by_order(self, lst):
return sorted(lst, key=lambda x: x[1])
@staticmethod
def resource_string(path):
"""Handy helper for getting resources."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def get_translation_content(self):
"""
Returns JS content containing translations for user's language.
"""
try:
return self.resource_string('public/js/translations/{lang}/textjs.js'.format(
lang=utils.translation.to_locale(utils.translation.get_language()),
))
except IOError:
return self.resource_string('public/js/translations/en/textjs.js')
@property
def i18n_service(self):
""" Obtains translation service """
return self.runtime.service(self, "i18n")
def add_templates(self, fragment, context, view):
# add templates in html fragment for studio/student view
templates = self.sort_resources_by_order(student_assets.get('templates', [])
if view == 'student' else studio_assets.get('templates', [])
)
for template_obj in templates:
template = template_obj[0]
fragment.add_content(loader.render_django_template(template, context, i18n_service=self.i18n_service))
fragment.add_javascript(self.get_translation_content())
def add_css(self, fragment, view):
# add css in fragment for studio/student view
css_resources = self.sort_resources_by_order(student_assets.get('css', [])
if view == 'student' else studio_assets.get('css', [])
)
for css_obj in css_resources:
css = css_obj[0]
if css.startswith('http'):
fragment.add_css_url(css)
else:
fragment.add_css_url(self.runtime.local_resource_url(self, css))
def add_js(self, fragment, view):
# add css in fragment for studio/student view
js_resources = self.sort_resources_by_order(student_assets.get('js', [])
if view == 'student' else studio_assets.get('js', [])
)
for js_obj in js_resources:
js = js_obj[0]
if js.startswith('http'):
fragment.add_javascript_url(js)
else:
fragment.add_javascript_url(self.runtime.local_resource_url(self, js))
def initialize_js_classes(self, fragment, view, json_args):
# initialize js
js_classes = self.sort_resources_by_order(student_assets.get('js_classes', [])
if view == 'student' else studio_assets.get('js_classes', [])
)
for _class_obj in js_classes:
_class = _class_obj[0]
fragment.initialize_js(_class, json_args)
| agpl-3.0 | -4,865,101,097,367,604,000 | 37.367347 | 114 | 0.5625 | false |
CStaich/Repository01 | RPG.py | 1 | 11997 | import random
from math import ceil
import simpy #for simulating battle
#Program written by Charlie Staich
# [email protected]
# in fulfillment of Katas excercise for Roto
# To use, simply run in a console. You will be prompted with an easy menu.
#Purpose: an RPG item generator and battle simulator
# Battle Process:
#give each player random head, chest, feet armor and random weapon
#begin battle
#repeat below until a player's health < 0
#player with higher Agility attacks first
#check for attack hit
# - miss: pass
# - hit: check for counterattacks
# - no counter: hit lands (damage stat and chance)
# - counter: deflect up to 1/3 damage back
#wait to swing again until after (atkspeed) seconds
#player with lower agility attacks
#same as above
class Item:
#usage: newItem = Item()
#usage for specific itemtype: newItem = Item(5)
#itemtypes listed below [0,5]
itemtypes = ["Head Armor", "Chest Armor", "Feet Armor", "Melee Weapon", "Ranged Weapon", "Magic Weapon"]
def __init__ (self, decltypeid=None):
#initialize item variables
if decltypeid is not None: #option to specify armor type
self.typeid = decltypeid
else:
self.typeid = random.randint(0,5)
self.level = random.randint(0,10)
self.type = Item.itemtypes[self.typeid]
self.itemclass = int(ceil((self.typeid+1)/3.0)) #1 = armor, 2 = weapon
#Weapons: all
if self.itemclass == 2:
self.atkspeed = random.uniform(1.5, 2.5)
self.atkchance = 0.9 + (self.level * 0.05)
self.atkdamage = random.randint(5,9) * self.level
self.dps = (self.atkspeed * self.atkdamage) * self.atkchance
#Weapon modifiers: Ranged
if self.typeid == 4:
self.atkspeed = self.atkspeed * 0.75
self.atkdamage = self.atkdamage * 0.5
self.atkchance = self.atkchance * 0.75
#Weapon modifiers: Magic
if self.typeid == 5:
self.atkspeed = self.atkspeed * 1.5
self.atkdamage = self.atkdamage * 2.0
self.atkchance = self.atkchance * 0.9
#Armor: percent dmg reduction (30%/45%/25% head/chest/feet)
elif self.typeid == 0: #head armor
self.dmgabsorb = 0.30 * self.level / 10.0 * random.uniform(0.8,1.0)
elif self.typeid == 1: #chest armor
self.dmgabsorb = 0.45 * self.level / 10.0 * random.uniform(0.8,1.0)
elif self.typeid ==2: #foot armor
self.dmgabsorb = 0.25 * self.level / 10.0 * random.uniform(0.8,1.0)
#stat boosts
self.stats = [0,0,0] #Strength, Agility, Health
self.allstats = 0
for i in range(2):
statchance = self.level * 0.08
if random.uniform(0.0,1.0) <= statchance:
statboost = self.level/2 * random.uniform(1.0, 4.0)
self.stats[i] = self.stats[i] + statboost
self.allstats = self.allstats + statboost
#store
if self.itemclass == 1: #armor pricing (no dps)
self.buyprice = (((self.dmgabsorb * 100) * self.level) + (self.level * self.allstats)) * 100
elif self.itemclass == 2: #weapon pricing
self.buyprice = ((self.dps * self.level) + (self.level * self.allstats)) * 100
self.sellprice = self.buyprice * random.uniform(2.0,5.0) / 10.0
self.name = self.namegen()
def namegen(self):
#Generates a name for an item based on type and level
if self.typeid == 0: #Helm
root = random.choice(["Helm", "Headpiece", "Mask", "Helmet", "Hood", "Cowl"])
elif self.typeid == 1: #Chest
root = random.choice(["Armor", "Chestplate", "Cuirass"])
elif self.typeid == 2: #Feet
root = random.choice(["Greaves", "Boots", "Leggings", "Legs", "Shin Guards"])
elif self.typeid == 3: #Melee Weapon
root = random.choice(["Sword", "Scimitar", "Lance", "Greatsword", "Axe", "War Axe", "Dagger", "Mace", "Warhammer"])
elif self.typeid == 4: #Ranged Weapon
root = random.choice(["Sling", "Bow", "Longbow", "Handcannon"])
elif self.typeid == 5: #Magic Weapon
root = random.choice(["Flame Staff", "Water Staff", "Earth Staff", "Air Staff"])
#Prefix
if self.level == 10:
prefix = "Legendary"
elif self.level > 8:
if self.itemclass == 1: #Armor
prefix = "Epic"
else: #Weapon
prefix = "Brutal"
elif self.level > 6:
if self.itemclass == 1:
prefix = "Reinforced"
elif self.typeid == 5:
prefix = "Wicked" #staff
else:
prefix = "Tempered" #other weapons
elif self.level > 4:
if self.itemclass == 1:
prefix = "Rugged"
elif self.typeid == 5: #staff
prefix = "Twisted"
else:
prefix = "Worn"
elif self.level > 2:
if self.itemclass == 1:
prefix = "Tattered"
elif self.typeid == 5:
prefix = "Battered"
else:
prefix = "Dull"
else:
prefix = "Broken"
#Suffix
if self.allstats == 0:
suffix = ""
elif (self.stats[0] >= self.stats[1]) and (self.stats[0] >= self.stats[2]):
#Strength Dominant
suffix = " of Strength"
elif self.stats[1] >= self.stats[2]:
#Agility Dominant
suffix = " of Agility"
else:
#Health Dominant
suffix = " of Health"
return(prefix + " " + root + suffix)
class Player:
#generate player with random stats, armor, and weapon.
def __init__(self, name):
self.name = name
self.helmet = Item(0)
self.chest = Item(1)
self.feet = Item(2)
self.weapontype = random.randint(3,5)
self.weapon = Item(self.weapontype)
self.armorlevel = self.helmet.dmgabsorb + self.chest.dmgabsorb + self.feet.dmgabsorb
self.dps = self.weapon.dps
self.basestats = [random.randint(10,20),random.randint(10,20),random.randint(0,25)]
self.statups = [sum(x) for x in zip(self.helmet.stats, self.chest.stats, self.feet.stats, self.weapon.stats)]
self.stats = [sum(x) for x in zip(self.basestats, self.statups)]
self.health = self.stats[2] + 100
#adjusted atkspeed with agility multiplier
self.atkspeed = self.weapon.atkspeed * (1 + (self.stats[1]/100))
#ajusted atkdamage with strength multiplier
self.atkdamage = self.weapon.atkdamage + self.stats[0]
self.atkchance = self.weapon.atkchance
def describe(self):
print "Player: %s Class: %s" % (self.name, self.weapon.type)
print " STR: %.1f AGI: %.1f HLT: %.1f" % (self.stats[0], self.stats[1], self.stats[2])
print " DMG: %.1f RATE: %.2f " % (self.atkdamage, self.atkspeed)
print " ARMOR: %.1f COUNTER: %.1f " % (self.armorlevel, self.stats[1]/100)
print "Equipped (TOTAL LVL %d): " % (self.weapon.level + self.helmet.level + self.chest.level + self.feet.level)
print " %s: LVL %d" % (self.weapon.name, self.weapon.level)
print " %s: LVL %d" % (self.helmet.name, self.helmet.level)
print " %s: LVL %d" % (self.chest.name, self.chest.level)
print " %s: LVL %d" % (self.feet.name, self.feet.level)
def attack(env, thisplayer, opponent):
#SimPy simulation for an attacking player
#player with lower agility swings first
if thisplayer.stats[1] < opponent.stats[1]:
yield env.timeout(thisplayer.atkspeed)
while True:
#check if both players are alive
if opponent.health <= 0:
winner = thisplayer.name
loser = opponent.name
print("[%.2f]: %s has slain %s! The battle is over." % (env.now, winner, loser))
env.exit(value=thisplayer.name)
elif thisplayer.health <= 0:
winner = opponent.name
loser = thisplayer.name
env.exit(value=opponent.name)
#swing attempt
if random.random() <= thisplayer.atkchance:
if random.random() <= opponent.stats[1]/200:
#opponent counterattacks up to 1/3 damage
armordeflect = random.uniform(thisplayer.armorlevel/2.0, thisplayer.armorlevel)
counterdamage = thisplayer.atkdamage * armordeflect * random.uniform(0.0,0.33)
print("[%.2f]: %s attacks, but %s counters with %s for %d damage" % (env.now, thisplayer.name, opponent.name, opponent.weapon.name, counterdamage))
thisplayer.health = thisplayer.health - counterdamage
else:
#hit
armordeflect = random.uniform(opponent.armorlevel/2.0, opponent.armorlevel)
hitdamage = thisplayer.atkdamage * armordeflect
print("[%.2f]: %s attacks %s with %s for %d damage" % (env.now, thisplayer.name, opponent.name, thisplayer.weapon.name, hitdamage))
opponent.health = opponent.health - hitdamage
else:
#miss
print("[%.2f]: %s misses %s" % (env.now, thisplayer.name, opponent.name))
yield env.timeout(thisplayer.atkspeed)
def runbattle():
print("= = = = =")
player1 = Player("Cain")
player2 = Player("Abel")
player1.describe()
print("= = = = =")
player2.describe()
env = simpy.rt.RealtimeEnvironment(initial_time=0, factor=1.0, strict=True)
env.process(attack(env, player1, player2))
env.process(attack(env, player2, player1))
print("= = = = =")
print("Running Simulation")
print("[time]: event")
env.run()
print("Simulation Complete")
print("= = = = =")
def main():
menu = {}
menu['1']="Generate random loot"
menu['2']="Generate specific type of loot"
menu['3']="Generate player with random loot"
menu['4']="Simulate battle between random players"
menu['5']="Exit"
typemenu = {}
typemenu['1']="Headpiece"
typemenu['2']="Chestpiece"
typemenu['3']="Footpiece"
typemenu['4']="Melee Weapon"
typemenu['5']="Ranged Weapon"
typemenu['6']="Magic Weapon"
while True:
print("= = = = = = = = = =")
options = menu.keys()
options.sort()
for entry in options:
print entry, menu[entry]
sel = raw_input("Enter # of sel: ")
if sel == '1':
newItem = Item()
print("= = = = =")
print newItem.name + " with attributes:"
print(vars(newItem))
elif sel == '2':
typeoptions = typemenu.keys()
typeoptions.sort()
for entry in typeoptions:
print " ", entry, typemenu[entry]
typesel = raw_input(" Enter # of sel: ")
newItem = Item(int(typesel) - 1)
print("= = = = =")
print newItem.name + " with attributes:"
print(vars(newItem))
elif sel == '3':
newName = raw_input( "Enter name for player: ")
newPlayer = Player(newName)
print("= = = = =")
newPlayer.describe()
elif sel == '4':
print("= = = = =")
runbattle()
elif sel == '5':
break
else:
print "Unknown Selection, try again."
if __name__ == "__main__":
main()
| mit | -8,551,126,958,085,172,000 | 41.154676 | 163 | 0.541385 | false |
macosforge/ccs-calendarserver | txdav/common/datastore/podding/test/test_resource.py | 1 | 7770 | ##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.python.clsprop import classproperty
import txweb2.dav.test.util
from txweb2 import http_headers, responsecode
from txweb2.dav.util import allDataFromStream
from txweb2.test.test_server import SimpleRequest
from twisted.internet.defer import inlineCallbacks, succeed
from txdav.caldav.datastore.scheduling.ischedule.localservers import (
ServersDB, Server
)
from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
import json
from txdav.common.datastore.podding.conduit import PoddingConduit
class ConduitPOST (CommonCommonTests, txweb2.dav.test.util.TestCase):
class FakeConduit(PoddingConduit):
def recv_fake(self, txn, j):
return succeed({
"back2u": j["echo"],
"more": "bits",
})
@inlineCallbacks
def setUp(self):
yield super(ConduitPOST, self).setUp()
serversDB = ServersDB()
self.thisServer = Server("A", "http://127.0.0.1", "A", True)
serversDB.addServer(self.thisServer)
yield self.buildStoreAndDirectory(serversDB=serversDB)
self.site.resource.putChild("conduit", ConduitResource(self.site.resource, self.storeUnderTest()))
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
@classproperty(cache=False)
def requirements(cls): # @NoSelf
return {
"user01": {
"calendar_1": {
},
"inbox": {
},
},
"user02": {
"calendar_1": {
},
"inbox": {
},
},
"user03": {
"calendar_1": {
},
"inbox": {
},
},
}
@inlineCallbacks
def test_receive_no_secret(self):
"""
Cross-pod request fails when there is no shared secret header present.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("text/plain",)
}),
content="""Hello, World!
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.FORBIDDEN)
@inlineCallbacks
def test_receive_wrong_mime(self):
"""
Cross-pod request fails when Content-Type header is wrong.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("text/plain",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""Hello, World!
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_invalid_json(self):
"""
Cross-pod request fails when request data is not JSON.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""Hello, World!
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_bad_json(self):
"""
Cross-pod request fails when JSON data does not have an "action".
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"foo":"bar"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_ping(self):
"""
Cross-pod request works with the "ping" action.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"action":"ping"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.OK)
data = (yield allDataFromStream(response.stream))
j = json.loads(data)
self.assertTrue("result" in j)
self.assertEqual(j["result"], "ok")
@inlineCallbacks
def test_receive_fake_conduit_no_action(self):
"""
Cross-pod request fails when conduit does not support the action.
"""
store = self.storeUnderTest()
self.patch(store, "conduit", self.FakeConduit(store))
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"action":"bogus",
"echo":"bravo"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_fake_conduit(self):
"""
Cross-pod request works when conduit does support the action.
"""
store = self.storeUnderTest()
self.patch(store, "conduit", self.FakeConduit(store))
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"action":"fake",
"echo":"bravo"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.OK)
data = (yield allDataFromStream(response.stream))
j = json.loads(data)
self.assertTrue("result" in j)
self.assertEqual(j["result"], "ok")
self.assertTrue("value" in j)
self.assertEqual(j["value"], {"back2u": "bravo", "more": "bits"})
| apache-2.0 | 5,385,579,506,552,117,000 | 28.884615 | 106 | 0.566924 | false |
cbode/ssr | ssr_algore.py | 1 | 8571 | #!/usr/bin/env python
############################################################################
#
# MODULE: ssr_algore.py
# AUTHOR: Collin Bode, UC Berkeley
#
# PURPOSE:
# Al Gore Rhythm combines r.sun model with Light Penetration Index (LPI).
# Merges all the r.sun solar radiation runs into a single estimate of
# Total Solar Radiation in watt-hours per meter squared per day.
# Optional clear sky vs real sky. <-- only clear sky for now.
#
# Modified: Collin Bode, October, 2012
# Migrated to unified parameter set.
# Simplified all the tweaks: JuneLPI kept, removed normalization for LPI
# R.sun calibration now serparated from algorithm ("HalfDiff")
#
# COPYRIGHT: (c) 2011 Collin Bode
# (c) 2006 Hamish Bowman, and the GRASS Development Team
# (c) 2008 Glynn Clements, and the GRASS Development Team
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#
#############################################################################
# GLOBALS
global lf
global cores
global gisbase
global gisdbase
# MODULES
# GRASS & SSR environment setup for external use
from ssr_params import *
import os
import sys
gisdbase = os.path.abspath(gisdbase)
os.environ['GISBASE'] = gisbase
sys.path.append(os.path.join(os.environ['GISBASE'], "etc", "python"))
import grass.script as grass
import grass.script.setup as gsetup
# ssr_utilities must go after grass.script imports
from ssr_utilities import *
def main():
gsetup.init(gisbase, gisdbase, location, 'PERMANENT')
# Algorithms for combining Diffuse and Direct
# 'd' = old default value of 1,
# 'pl' = Power Law,Diffuse = 1.1224 * x^0.3157, R2 = 0.41. Direct = = 1.2567 * x, R2 = 0.78
# 'nl' = Natural Log,
# 'cl' = Cameau Linear, 'cn' = Cameau linear Normalized, nLPI = 1.428 * LPI, Diffuse = 0.94 * nLPI
# 'gn' = Gendron linear normalized, nLPI = 1.428 * LPI, Diffuse = 0.01719 + 1.024 * nLPI
# 'gl' = Gendron linear. no normalization. It overestimates field radiation.
# Input bare-earth r.sun diffuse is too high. Instead of changing Linke Turbidity, modified here.
# See weatherstations.xlsx for analysis.
# Open log file
tlog = dt.datetime.strftime(dt.datetime.now(),"%Y-%m-%d_h%Hm%M")
lf = open(gisdbase+os.sep+'ssr_'+tlog+'_algore.log', 'a')
# Overwrite files?
ow = int(algore_run -1)
# Print parameters
printout('---------------------------------------',lf)
printout('-- ALGORITHM FOR CLEAR SKY RADIATION --',lf)
printout(' LPI year: '+year,lf)
printout(' LPI pref: '+lpipref,lf)
printout(' region: '+bregion,lf)
printout(' sun mapset: '+msun,lf)
printout(' SSR output mapset: '+mssr,lf)
printout(' max veg height: '+maxheight,lf)
printout(' Algorithm code: '+algore,lf)
printout('keep intermediates: '+str(keeptemp),lf)
printout(' overwrite files: '+str(ow),lf)
printout('---------------------------------------',lf)
# Run Algorithm
r1start = dt.datetime.now()
printout("Starting Al Gore Rhythm at "+str(r1start),lf)
# Goto Correct Mapset and make sure Region is correctly set (ssr_utilities)
mapset_gotocreate(mssr,bregion,C,lf)
# For each week
for doyn in range(5,366,7):
doy = str(doyn).zfill(3)
month = dt.datetime.strftime(dt.datetime(2011,1,1) + dt.timedelta(doyn -1),"%m")
printout("Processing Day of Year " + doy + " in month "+month,lf)
# Input Raster Layers
sundem = bregion + C + 'mdem'
suncan = bregion + C + 'mcan'
dembeam = sundem + doy + 'beam@'+msun
demdiff = sundem + doy + 'diff@'+msun
canbeam = suncan + doy + 'beam@'+msun
candiff = suncan + doy + 'diff@'+msun
canglob = suncan + doy + 'glob'
veg = vegheight+'@PERMANENT'
lpi = lpipref + 'm'+ month + '@' + mlpi # lpi_c30y14s17m01
if(lpivsjune == True):
lpi = lpipref + '06@' + mlpi
# Output Raster Layers
lpipart = C + 'm' + year + 's' + boxsize + 'm' + algore
if(lpivsjune == True):
lpipart = C + 'm' + year + 's' + boxsize+'mjune' + algore
ssr = 'ssr_'+ lpipart + doy
opencanopy = 'opencanopy_' + lpipart + doy
subcanopy = 'subcanopy_' + lpipart + doy
lpibeam = 'subbeam_' + lpipart + doy
lpidiff = 'subdiff_' + lpipart + doy
###################################################################
#1. SUBCANOPY Merge LPI and Bare-earth by Algorithm
printout("DOY "+doy+" 1. merging lpi and dem using: "+algore,lf)
if(algore == 'cl'): # 'cl' Cameau Linear regression
grass.mapcalc("$tmp_lpidiff = 0.94 * $lpi * $diff", tmp_lpidiff = lpidiff, diff = demdiff, lpi = lpi,overwrite = ow)
grass.mapcalc("$tmp_lpibeam = $beam * $lpi", tmp_lpibeam = lpibeam, beam = dembeam, lpi = lpi,overwrite = ow)
elif(algore == 'cn'): # 'cn' Cameau Normalized - assumes halfdiff is set to True
grass.mapcalc("$tmp_lpidiff = 0.94 * (1.428 * $lpi) * $diff", tmp_lpidiff = lpidiff, diff = demdiff, lpi = lpi,overwrite = ow)
grass.mapcalc("$tmp_lpibeam = 1.428 * $beam * $lpi", tmp_lpibeam = lpibeam, beam = dembeam, lpi = lpi,overwrite = ow)
elif(algore == 'gn'): #gn Diffuse Gendron Linear Normalized. y = 0.01719 + 1.024 * nLPI
grass.mapcalc("$tmp_lpidiff = 0.01719 + 1.024 * (1.428 * $lpi) * $diff", tmp_lpidiff = lpidiff, diff = demdiff, lpi = lpi,overwrite = ow)
grass.mapcalc("$tmp_lpibeam = (1.428 * $lpi) * $beam", tmp_lpibeam = lpibeam, beam = dembeam, lpi = lpi,overwrite = ow)
elif(algore == 'gl'): #gl Diffuse Gendron Linear NON-normalized y = 0.01719 + 1.024 * LPI
grass.mapcalc("$tmp_lpidiff = 0.01719 + 1.024 * $lpi * $diff", tmp_lpidiff = lpidiff, diff = demdiff, lpi = lpi,overwrite = ow)
grass.mapcalc("$tmp_lpibeam = $lpi * $beam", tmp_lpibeam = lpibeam, beam = dembeam, lpi = lpi,overwrite = ow)
else: # 'pl' power law
grass.mapcalc("$tmp_lpidiff = 1.1224 * ($lpi^0.3157) * $diff", tmp_lpidiff = lpidiff, diff = demdiff, lpi = lpi,overwrite = ow)
grass.mapcalc("$tmp_lpibeam = 1.2567 * $beam * $lpi", tmp_lpibeam = lpibeam, beam = dembeam, lpi = lpi,overwrite = ow)
grass.mapcalc("$subcanopy = $tmp_lpibeam + $tmp_lpidiff", subcanopy = subcanopy, tmp_lpidiff = lpidiff, tmp_lpibeam = lpibeam, overwrite = ow)
###################################################################
#2. OPEN CANOPY: Remove areas under tall trees (maxheight meters or higher)
printout('DOY '+doy+' 2. set subcanopy values to -88',lf)
grass.mapcalc("$canglob = $canbeam + $candiff",canglob = canglob, canbeam = canbeam, candiff = candiff,overwrite = ow)
grass.mapcalc("$opencanopy = if($veg < $maxheight, $canglob,-88)",opencanopy = opencanopy, veg = veg, canglob = canglob, maxheight = maxheight,overwrite = ow)
###################################################################
#3. Merge lpi*bare-earth with cleaned canopy, keeping whichever is higher.
printout("DOY "+doy+" 3. Merge lpi*dem with canopy shade = "+ssr,lf)
grass.mapcalc("$ssr = if($opencanopy > $subcanopy, $opencanopy, $subcanopy)", opencanopy = opencanopy, subcanopy = subcanopy,ssr = ssr,overwrite = ow)
grass.run_command("r.colors",map = ssr, color = "bcyr")
#4. Remove temp maps
if(keeptemp == False):
for raster in [lpibeam,lpidiff,opencanopy,subcanopy,canglob]:
grass.run_command("g.remove",rast=raster)
# Reset GRASS env values
grass.run_command("g.mapset", mapset="PERMANENT")
grass.run_command("g.region", flags = "d")
r1end = dt.datetime.now()
printout("Al can shake his booty, 'cause...",lf)
printout("DONE! with Al Gore Rhythm at "+str(r1end),lf)
printout("--------------------------------------",lf)
lf.close()
sys.exit("FINISHED.")
if __name__ == "__main__":
main()
"""
try:
#options, flags = grass.parser()
main()
except:
printout('ERROR! quitting.')
print traceback.print_exc()
traceback.print_exc(file=lf)
traceback.print_exc(file=sys.stdout)
finally:
lf.close()
sys.exit("FINISHED.")
"""
| gpl-2.0 | -8,142,489,074,652,205,000 | 46.882682 | 166 | 0.575779 | false |
google/ehr-predictions | ehr_prediction_modeling/tasks/mlp_task_layer.py | 1 | 4033 | # coding=utf-8
# Copyright 2020 Google Health Research.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Modeling layers used by tasks."""
from typing import Any, Mapping
from ehr_prediction_modeling import types
from ehr_prediction_modeling.models import model_utils
from ehr_prediction_modeling.utils import activations
import sonnet as snt
import tensorflow.compat.v1 as tf
import tf_slim as slim
import tree
from ehr_prediction_modeling import configdict
HiddenTaskLayerType = snt.BatchApply
class MLPLayer():
"""MLP for dealing with task modeling layers."""
def __init__(self, config: configdict.ConfigDict,
num_task_targets: int) -> None:
self._config = config
self._task_layer = None
self._num_targets = num_task_targets
self._task_layer_sizes = self._config.get("task_layer_sizes", []).copy()
self._regularization_type = self._config.get("regularization_type",
types.RegularizationType.NONE)
self._regularization_weight = self._config.get("regularization_weight", 0.)
self._init_task_layer()
def _init_task_layer(self) -> None:
"""Initializes the fully connected task-specific layer of the model."""
self._task_layer = snt.BatchApply(
snt.nets.MLP(activate_final=False, **self.layer_kwargs))
@property
def layer_kwargs(self) -> Mapping[str, Any]:
"""Returns mapping of kwargs used for layer construction."""
layers = self._task_layer_sizes + [self._num_targets]
w_initializer = slim.xavier_initializer(
uniform=True, seed=None, dtype=tf.float32)
kwargs = {
"output_sizes":
layers,
"initializers": {
"w": w_initializer,
"b": tf.zeros_initializer
},
"activation":
activations.get_activation(self._config.get("activation", "relu")),
"name":
self._config.name
}
return kwargs
def get_hidden_layer(self) -> HiddenTaskLayerType:
"""Fetches the task layer this class manages."""
return self._task_layer
def get_regularization_loss(self) -> tf.Tensor:
"""Gets the regularization loss on task-specific layers."""
regularizer = model_utils.get_regularizer(self._regularization_type,
self._regularization_weight)
if not regularizer:
return tf.constant(0.)
return slim.apply_regularization(regularizer,
self._task_layer.trainable_variables)
def _layer_logits(self, model_output: tf.Tensor) -> tf.Tensor:
"""Passes model output through task-specific layer to get logits."""
return self._task_layer(model_output)
def get_logits(self, model_output: tf.Tensor) -> tf.Tensor:
"""Passes model output through task-specific layers and formats output.
Args:
model_output: tensor of shape [num_unroll, batch_size, dim_model_output]
or List[num_unroll, batch_size, dim_model_output] when using SNRNN as a
model.
Returns:
Tensor of shape [num_unroll, batch_size, num_targets].
"""
logits = self._layer_logits(model_output)
num_unroll, batch_size, _ = tree.flatten(model_output)[0].shape
logits.shape.assert_is_compatible_with(
[num_unroll, batch_size, self._num_targets])
# Reshape the tensor from wnt -> wnct [num_unroll, batch_size, num_targets]
# -> [num_unroll, batch_size, channel, num_targets]
logits = tf.expand_dims(logits, axis=2)
return logits
| apache-2.0 | -1,933,572,256,669,902,300 | 36 | 79 | 0.668237 | false |
joelfrederico/Blowout | blowout/support.py | 1 | 4201 | import h5py as _h5
import numpy as _np
import logging as _logging
import time as _time
_logger = _logging.getLogger(__name__)
import ipdb as pdb
import re as _re
def _timestamp2filename(cls, ftype, filename=None):
# ======================================
# Get filename from timestamp
# ======================================
if filename is not None:
filename = '{}.{}.h5'.format(filename, ftype)
else:
try:
timestamp = cls.timestamp
except RuntimeError as err:
_logger.debug('Handled exception: {}'.format(err))
timestamp = _time.localtime()
filename = _time.strftime('%Y.%m.%d.%H%M.%S.{}.h5'.format(ftype), timestamp)
return filename
class Timestamp(object):
def __init__(self):
self._timestamp = None
def _set_timestamp(self, timestamp):
self._timestamp = timestamp
@property
def timestamp(self):
if self._timestamp is not None:
return self._timestamp
else:
raise RuntimeError('No timestamp: simulation not completed.')
def _write_arrays(group, name, data, parent=None):
grefs = group.create_group('_refs_{}'.format(name))
ref_dtype = _h5.special_dtype(ref=_h5.Reference)
dname = group.create_dataset(name, (_np.size(data),), dtype=ref_dtype)
# ======================================
# Create datasets
# ======================================
for i, array in enumerate(data):
if array.dtype == _np.dtype(object):
# ======================================
# If dataset can't be created, nest
# ======================================
darray = _write_arrays(grefs, '{}'.format(i), array, parent=name)
else:
darray = grefs.create_dataset(name='{}'.format(i), data=array, shape=_np.shape(array), compression="gzip")
# ======================================
# Store reference in dataset
# ======================================
dname[i] = darray.ref
# if parent == 'hist':
# pdb.set_trace()
# ======================================
# Return created dataset
# ======================================
return dname
def _read_arrays(group, name):
refs = group[name]
arrays = _np.empty(shape=refs.size, dtype=object)
for i, ref in enumerate(refs):
arrays[i] = group.file[ref].value
return arrays
def _write_scalars(group, name, data):
return group.create_dataset(name=name, data=data, shape=_np.shape(data), compression="gzip")
def _write_data(group, name, data):
if data.dtype == _np.dtype(object):
_write_arrays(group, name, data)
else:
_write_scalars(group, name, data)
def _read_dict(group, name):
ret_group = group[name]
names = ret_group.keys()
valid_names = list()
underscore = _re.compile('_')
dict_layout = {'names': [], 'formats': []}
for nm in names:
if not underscore.match(nm):
valid_names.append(nm)
dict_layout['names'].append(nm)
if type(ret_group[nm].value[0]) == _h5.h5r.Reference:
dict_layout['formats'].append(object)
else:
raise NotImplementedError('Haven''t done this...')
results_flat = _np.zeros(len(ret_group[valid_names[0]]), dtype=dict_layout)
for nm in valid_names:
# if nm == 'hist':
# pdb.set_trace()
values = ret_group[nm]
for i, value in enumerate(values):
try:
array = group.file[value].value
if array.size > 0:
if type(array[0]) == _h5.h5r.Reference:
out = _np.empty(len(array), dtype=object)
for j, val in enumerate(array):
out[j] = group.file[val].value
else:
out = group.file[value].value
else:
out = _np.array([])
results_flat[nm][i] = out
except ValueError:
_logger.debug('There was a ValueError')
# pdb.set_trace()
return results_flat
| mit | 3,573,787,336,411,098,000 | 30.586466 | 118 | 0.502261 | false |
Saevon/spacebattle | menu/controllers.py | 1 | 4003 | from abstract.event_manager import EventManager, Mods
from pygame import locals as const
from ship import Ship
# Missing Mouse button constants
const.MOUSEKEY_LEFT = 1
const.MOUSEKEY_MIDDLE = 2
const.MOUSEKEY_RIGHT = 3
const.MOUSEKEY_SCROLLUP = 4
const.MOUSEKEY_SCROLLDOWN = 5
# Start up our pause_handler
pause_handler = EventManager()
# Start up our game_handler
game_handler = EventManager()
@pause_handler.quit
@pause_handler.shortcut(Mods.META, const.K_q)
@pause_handler.shortcut(Mods.ALT, const.K_q)
@game_handler.quit
@game_handler.shortcut(Mods.META, const.K_q)
@game_handler.shortcut(Mods.ALT, const.K_q)
def quit(context):
raise context.mediator.PopEvent()
@game_handler.keydown(const.K_SPACE)
def pause(context):
context.mediator.pause()
@pause_handler.keydown(const.K_SPACE)
def unpause(context):
raise context.mediator.ResumeEvent()
#################################################
# Ship Controls
# Player 1
@game_handler.keydown(const.K_d, const={'player': 1})
@game_handler.keydown(const.K_l, const={'player': 2})
@game_handler.keydown(const.K_RIGHT, const={'player': 3})
@game_handler.keydown(const.K_h, const={'player': 4})
def rotate_right(context, player):
context.mediator.players[player].rotate(Ship.ROT_RIGHT)
@game_handler.keyup(const.K_d, const={'player': 1})
@game_handler.keyup(const.K_l, const={'player': 2})
@game_handler.keyup(const.K_RIGHT, const={'player': 3})
@game_handler.keyup(const.K_h, const={'player': 4})
def rotate_right_stop(context, player):
context.mediator.players[player].rotate(Ship.ROT_RIGHT, stop=True)
@game_handler.keydown(const.K_a, const={'player': 1})
@game_handler.keydown(const.K_j, const={'player': 2})
@game_handler.keydown(const.K_LEFT, const={'player': 3})
@game_handler.keydown(const.K_f, const={'player': 4})
def rotate_left(context, player):
context.mediator.players[player].rotate(Ship.ROT_LEFT)
@game_handler.keyup(const.K_a, const={'player': 1})
@game_handler.keyup(const.K_j, const={'player': 2})
@game_handler.keyup(const.K_LEFT, const={'player': 3})
@game_handler.keyup(const.K_f, const={'player': 4})
def rotate_left_stop(context, player):
context.mediator.players[player].rotate(Ship.ROT_LEFT, stop=True)
@game_handler.keydown(const.K_w, const={'player': 1})
@game_handler.keydown(const.K_i, const={'player': 2})
@game_handler.keydown(const.K_UP, const={'player': 3})
@game_handler.keydown(const.K_t, const={'player': 4})
def move_up(context, player):
context.mediator.players[player].move(Ship.MOV_FORWARDS)
@game_handler.keyup(const.K_w, const={'player': 1})
@game_handler.keyup(const.K_i, const={'player': 2})
@game_handler.keyup(const.K_UP, const={'player': 3})
@game_handler.keyup(const.K_t, const={'player': 4})
def move_up_stop(context, player):
context.mediator.players[player].move(Ship.MOV_FORWARDS, stop=True)
@game_handler.keydown(const.K_s, const={'player': 1})
@game_handler.keydown(const.K_k, const={'player': 2})
@game_handler.keydown(const.K_DOWN, const={'player': 3})
@game_handler.keydown(const.K_g, const={'player': 4})
def move_down(context, player):
context.mediator.players[player].move(Ship.MOV_BACKWARDS)
@game_handler.keyup(const.K_s, const={'player': 1})
@game_handler.keyup(const.K_k, const={'player': 2})
@game_handler.keyup(const.K_DOWN, const={'player': 3})
@game_handler.keyup(const.K_g, const={'player': 4})
def move_down_stop(context, player):
context.mediator.players[player].move(Ship.MOV_BACKWARDS, stop=True)
#################################################
# Debug only code
DEBUG = False
if DEBUG:
def locate_key(val):
for key, value in const.__dict__.iteritems():
if value == val and (key.startswith('K_') or key.startswith('KMOD')):
print key
@game_handler.event(const.KEYDOWN)
def debug_keys(context):
print '-' * 15
print '-- Key'
locate_key(context.event.key)
print '-- Mod'
locate_key(context.event.mod)
print '-' * 15
| mit | 7,717,589,673,143,250,000 | 32.638655 | 81 | 0.681989 | false |
suutari-ai/shoop | shuup/campaigns/models/campaigns.py | 2 | 18214 | # This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import random
import string
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum
from parler.models import TranslatableModel, TranslatedFields
from shuup.campaigns.consts import (
CAMPAIGNS_CACHE_NAMESPACE, CATALOG_FILTER_CACHE_NAMESPACE,
CONTEXT_CONDITION_CACHE_NAMESPACE
)
from shuup.campaigns.models.basket_conditions import (
CategoryProductsBasketCondition, ProductsInBasketCondition
)
from shuup.campaigns.utils.campaigns import get_product_ids_and_quantities
from shuup.campaigns.utils.matcher import get_matching_for_product
from shuup.core import cache
from shuup.core.fields import InternalIdentifierField
from shuup.core.models import Category, Order, Shop
from shuup.core.utils import context_cache
from shuup.utils.analog import define_log_model
from shuup.utils.properties import MoneyPropped
class CampaignType(Enum):
CATALOG = 1
BASKET = 2
class CampaignQueryset(models.QuerySet):
def available(self, shop=None):
query = Q(
Q(active=True) &
(Q(start_datetime__isnull=True) | Q(start_datetime__lte=now())) &
(Q(end_datetime__isnull=True) | Q(end_datetime__gte=now()))
)
if shop:
query &= Q(shop=shop)
return self.filter(query)
class Campaign(MoneyPropped, TranslatableModel):
admin_url_suffix = None
shop = models.ForeignKey(Shop, verbose_name=_("shop"), help_text=_("The shop where the campaign is active."))
name = models.CharField(max_length=120, verbose_name=_("name"), help_text=_("The name for this campaign."))
# translations in subclass
identifier = InternalIdentifierField(unique=True)
active = models.BooleanField(default=False, verbose_name=_("active"), help_text=_(
"Check this if the campaign is currently active. Please also set a start and end date."
))
start_datetime = models.DateTimeField(null=True, blank=True, verbose_name=_("start date and time"), help_text=_(
"The date and time the campaign starts. This is only applicable if the campaign is marked as active."
))
end_datetime = models.DateTimeField(null=True, blank=True, verbose_name=_("end date and time"), help_text=_(
"The date and time the campaign ends. This is only applicable if the campaign is marked as active."
))
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("created by"))
modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("modified by"))
created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_("created on"))
modified_on = models.DateTimeField(auto_now=True, editable=False, verbose_name=_("modified on"))
objects = CampaignQueryset.as_manager()
class Meta:
abstract = True
verbose_name = _('Campaign')
verbose_name_plural = _('Campaigns')
def save(self, *args, **kwargs):
super(Campaign, self).save(*args, **kwargs)
cache.bump_version(CAMPAIGNS_CACHE_NAMESPACE)
cache.bump_version(CONTEXT_CONDITION_CACHE_NAMESPACE)
cache.bump_version(CATALOG_FILTER_CACHE_NAMESPACE)
def is_available(self):
if not self.active: # move to manager?
return False
if self.start_datetime and self.end_datetime:
if self.start_datetime <= now() <= self.end_datetime:
return True
return False
elif self.start_datetime and not self.end_datetime:
if self.start_datetime > now():
return False
elif not self.start_datetime and self.end_datetime:
if self.end_datetime < now():
return False
return True
@property
def type(self):
return CampaignType.BASKET if isinstance(self, BasketCampaign) else CampaignType.CATALOG
class CatalogCampaign(Campaign):
_queryset = None
admin_url_suffix = "catalog_campaign"
conditions = models.ManyToManyField('ContextCondition', blank=True, related_name='campaign')
filters = models.ManyToManyField('CatalogFilter', blank=True, related_name='campaign')
translations = TranslatedFields(public_name=models.CharField(max_length=120, blank=True, help_text=_(
"The campaign name to show in the store front."
)))
def __str__(self):
return force_text(_("Catalog Campaign: %(name)s" % dict(name=self.name)))
def save(self, *args, **kwargs):
super(CatalogCampaign, self).save(*args, **kwargs)
self.filters.update(active=self.active)
for f in self.filters.all():
for matching_product in f.get_matching_shop_products():
context_cache.bump_cache_for_shop_product(matching_product)
self.conditions.update(active=self.active)
def rules_match(self, context, shop_product, matching_catalog_filters, matching_context_conditions):
if not self.is_available():
return False
# If rule has filters, all of them has to match
for filter_pk in self.filters.values_list("pk", flat=True):
if filter_pk not in matching_catalog_filters:
return False
# All filters match so let's check that also all the conditions match
for condition_pk in self.conditions.values_list("pk", flat=True):
if condition_pk not in matching_context_conditions:
return False
return True
@classmethod
def get_for_product(cls, shop_product):
matching_filters = get_matching_for_product(shop_product, provide_category="campaign_catalog_filter")
matching_conditions = get_matching_for_product(shop_product, provide_category="campaign_context_condition")
query_filter = Q(Q(filters__in=matching_filters) | Q(conditions__in=matching_conditions))
return cls.objects.available(shop=shop_product.shop).filter(query_filter).distinct()
@classmethod
def get_matching(cls, context, shop_product):
prod_ctx_cache_elements = dict(
customer=context.customer.pk or 0,
shop=context.shop.pk,
product_id=shop_product.pk)
namespace = CAMPAIGNS_CACHE_NAMESPACE
key = "%s:%s" % (namespace, hash(frozenset(prod_ctx_cache_elements.items())))
cached_matching = cache.get(key, None)
if cached_matching is not None:
return cached_matching
from shuup.campaigns.models.matching import get_matching_context_conditions, get_matching_catalog_filters
matching_context_conditions = get_matching_context_conditions(context)
matching_catalog_filters = get_matching_catalog_filters(shop_product)
if not (matching_context_conditions or matching_catalog_filters):
return []
# Get all possible campaign id's for matching context_conditions
campaigns_based_on_conditions = set(
cls.objects.filter(
active=True,
shop=context.shop,
conditions__id__in=matching_context_conditions
).values_list("pk", flat=True)
)
campaigns_based_on_catalog_filters = set()
if hasattr(cls, "filters"):
# Get all possible campaigns for matching catalog_filters
campaigns_based_on_catalog_filters = set(
cls.objects.filter(
active=True,
shop=context.shop,
filters__id__in=matching_catalog_filters
).values_list("pk", flat=True)
)
all_possible_campaigns_ids = (campaigns_based_on_conditions | campaigns_based_on_catalog_filters)
matching = []
for campaign in cls.objects.filter(id__in=all_possible_campaigns_ids):
if campaign.rules_match(context, shop_product, matching_catalog_filters, matching_context_conditions):
matching.append(campaign)
cache.set(key, matching, timeout=None)
return matching
class BasketCampaign(Campaign):
admin_url_suffix = "basket_campaign"
basket_line_text = models.CharField(
max_length=120, verbose_name=_("basket line text"), help_text=_("This text will be shown in basket."))
conditions = models.ManyToManyField('BasketCondition', blank=True, related_name='campaign')
coupon = models.OneToOneField('Coupon', null=True, blank=True, related_name='campaign', verbose_name=_("coupon"))
translations = TranslatedFields(
public_name=models.CharField(max_length=120, verbose_name=_("public name"), help_text=_(
"The campaign name to show in the store front."
))
)
def __str__(self):
return force_text(_("Basket Campaign: %(name)s" % dict(name=self.name)))
def save(self, *args, **kwargs):
if self.coupon:
code_count_for_shop = BasketCampaign.objects.filter(
active=True, shop_id=self.shop.id, coupon__code=self.coupon.code)
if not self.id and code_count_for_shop.exists():
raise ValidationError(_("Can not have multiple active campaigns with same code."))
if self.id and code_count_for_shop.exclude(coupon_id=self.coupon.id).exists():
raise ValidationError(_("Can not have multiple active campaigns with same code."))
super(BasketCampaign, self).save(*args, **kwargs)
self.conditions.update(active=self.active)
@classmethod
def get_for_product(cls, shop_product):
matching_conditions = get_matching_for_product(
shop_product, provide_category="campaign_basket_condition")
matching_effects = get_matching_for_product(
shop_product, provide_category="campaign_basket_discount_effect_form")
matching_line_effects = get_matching_for_product(
shop_product, provide_category="campaign_basket_line_effect_form")
effects_q = Q(Q(line_effects__id__in=matching_line_effects) | Q(discount_effects__id__in=matching_effects))
matching_q = Q(Q(conditions__in=matching_conditions) | effects_q)
return cls.objects.available(shop=shop_product.shop).filter(matching_q).distinct()
@classmethod
def get_matching(cls, basket, lines):
matching = []
exclude_condition_ids = set()
product_id_to_qty = get_product_ids_and_quantities(basket)
# Get ProductsInBasketCondition's that can't match with the basket
products_in_basket_conditions_to_check = set(
ProductsInBasketCondition.objects.filter(
products__id__in=product_id_to_qty.keys()
).values_list("id", flat=True)
)
exclude_condition_ids |= set(
ProductsInBasketCondition.objects.exclude(
id__in=products_in_basket_conditions_to_check
).values_list("id", flat=True)
)
# Get CategoryProductsBasketCondition's that can't match with the basket
categories = set(Category.objects.filter(
shop_products__product_id__in=product_id_to_qty.keys()).values_list("id", flat=True))
category_products_in_basket_to_check = set(
CategoryProductsBasketCondition.objects.filter(categories__in=categories).values_list("id", flat=True)
)
exclude_condition_ids |= set(
CategoryProductsBasketCondition.objects.exclude(
id__in=category_products_in_basket_to_check
).values_list("id", flat=True)
)
queryset = cls.objects.filter(active=True, shop=basket.shop)
if exclude_condition_ids:
queryset = queryset.exclude(conditions__id__in=exclude_condition_ids)
for campaign in queryset.prefetch_related("conditions"):
if campaign.rules_match(basket, lines):
matching.append(campaign)
return matching
def rules_match(self, basket, lines):
"""
Check if basket rules match.
They will not match if
1) The campaign is not active
2) The campaign has attached coupon
which doesn't match or is not active
3) Any of the attached rules doesn't match
"""
if not self.is_available():
return False
if self.coupon and not (self.coupon.active and self.coupon.code.upper() in [c.upper() for c in basket.codes]):
return False
for rule in self.conditions.all():
if not rule.matches(basket, lines):
return False
return True
class CouponUsage(models.Model):
coupon = models.ForeignKey('Coupon', related_name='usages')
order = models.ForeignKey(Order, related_name='coupon_usages')
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("created by"))
modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("modified by"))
created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_("created on"))
modified_on = models.DateTimeField(auto_now=True, editable=False, verbose_name=_("modified on"))
@classmethod
def add_usage(cls, order, coupon):
return cls.objects.create(order=order, coupon=coupon)
@python_2_unicode_compatible
class Coupon(models.Model):
admin_url_suffix = "coupon"
name_field = "code" # TODO: Document me
search_fields = ["code"] # used by Select2Multiple to know which fields use to search by
code = models.CharField(max_length=12)
usage_limit_customer = models.PositiveIntegerField(
blank=True, null=True,
verbose_name=_("usage limit per customer"), help_text=_("Limit the amount of usages per a single customer."))
usage_limit = models.PositiveIntegerField(
blank=True, null=True,
verbose_name=_("usage limit"),
help_text=_("Set the absolute limit of usages for this coupon. "
"If the limit is zero (0) coupon cannot be used."))
active = models.BooleanField(default=False, verbose_name=_("is active"))
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("created by"))
modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("modified by"))
created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_("created on"))
modified_on = models.DateTimeField(auto_now=True, editable=False, verbose_name=_("modified on"))
def save(self, **kwargs):
campaign = BasketCampaign.objects.filter(active=True, coupon_id=self.id).first()
if campaign and BasketCampaign.objects.filter(
active=True, shop_id=campaign.shop.id, coupon__code=self.code).exclude(id=campaign.id).exists():
raise ValidationError(_("Can not have multiple active campaigns with same code."))
return super(Coupon, self).save(**kwargs)
@classmethod
def generate_code(cls, length=6):
if length > 12:
length = 12
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
@property
def exhausted(self):
val = bool(self.usage_limit and self.usages.count() >= self.usage_limit)
return val
@property
def attached(self):
return BasketCampaign.objects.filter(coupon=self).exists()
def attach_to_campaign(self, campaign):
if not self.attached:
self.campaign = campaign
@classmethod
def is_usable(cls, code, customer):
try:
code = cls.objects.get(code__iexact=code, active=True)
return code.can_use_code(customer)
except cls.DoesNotExist:
return False
def can_use_code(self, customer):
"""
Check if customer can use the code
:param customer:
:type customer: `Contact` or None
:rtype: True|False
"""
if not self.active:
return False
if not self.attached:
return False
if self.usage_limit_customer:
if not customer or customer.is_anonymous:
return False
if (self.usages.filter(order__customer=customer, coupon=self).count() >= self.usage_limit_customer):
return False
return not self.exhausted
def use(self, order):
return CouponUsage.add_usage(order=order, coupon=self)
def increase_customer_usage_limit_by(self, amount):
if self.usage_limit_customer:
new_limit = self.usage_limit_customer + amount
else:
new_limit = self.usages.count() + amount
self.usage_limit_customer = new_limit
def increase_usage_limit_by(self, amount):
self.usage_limit = self.usage_limit + amount if self.usage_limit else (self.usages.count() + amount)
def has_been_used(self, usage_count=1):
""" See if code is used the times given """
return CouponUsage.objects.filter(coupon=self).count() >= usage_count
def __str__(self):
return self.code
CatalogCampaignLogEntry = define_log_model(CatalogCampaign)
BasketCampaignLogEntry = define_log_model(BasketCampaign)
CouponLogEntry = define_log_model(Coupon)
CouponUsageLogEntry = define_log_model(CouponUsage)
| agpl-3.0 | 2,415,510,597,969,143,000 | 40.022523 | 118 | 0.65521 | false |
pade/sprinkler | src/channel.py | 1 | 3188 | # -*- coding: UTF-8 -*-
'''
Created on 29 août 2016
@author: dassierp
'''
import logging
from progdays import Progdays
class Channel():
'''
Control a water channel
'''
def __init__(self, pName, pChNumber, pHwInterface):
'''
Constructor
@param pName: channel name
@param pChNumber: channel number, from 0 to number
of physical channel - 1
@param pHwInterface: a class derived from BaseGpio
'''
self.__nb = pChNumber
self.__hw = pHwInterface
self.__logger = logging.getLogger('sprinkler')
self.__is_enable = False
self.__manual = "AUTO"
self.__name = pName
self.__progdays = [Progdays(), Progdays()]
# On initialisation, stop water
self.__running = False
self.__logger.debug(
"Initialisation channel {} ({})".format(self.__name, self.__nb))
def _get_nb(self):
return self.__nb
def _set_enable(self, pEnable):
'''
@param pEnable: True to enable the channel (can be used)
'''
self.__is_enable = pEnable
if pEnable:
self.__logger.info(
"Channel {} ({}) is enabled".format(self.__name, self.__nb))
else:
self.__logger.info(
"Channel {} ({}) is disabled".format(self.__name, self.__nb))
def _get_enable(self):
return self.__is_enable
def _get_name(self):
return self.__name
def _set_name(self, pName):
self.__name = pName
def _get_running(self):
return self.__running
def _set_running(self, pState):
'''
@param pState: boolean, if pState is True, then channel runs,
otherwise channel is not running
If channel is not enable, do nothing
'''
if self.isenable is True:
if pState is True:
self.__running = True
self.__logger.debug(
"Channel {} ({}) ON".format(self.name, self.nb))
else:
self.__running = False
self.__logger.debug(
"Channel {} ({}) OFF".format(self.name, self.nb))
self.__hw.write(self.__nb, self.__running)
def _get_prog(self):
return self.__progdays
def _set_prog(self, progs):
'''
Set a new program
@param progs: Table of Progdays class
'''
self.__progdays = progs
def _set_manual(self, action):
""" Manual command, superseds program
@param action: must be: "OFF", "ON", or "AUTO"
"""
if action == "ON":
self.__manual = "ON"
elif action == "OFF":
self.__manual = "OFF"
else:
self.__manual = "AUTO"
def _get_manual(self):
return self.__manual
nb = property(_get_nb, None, None, None)
running = property(_get_running, _set_running, None, None)
isenable = property(_get_enable, _set_enable, None, None)
name = property(_get_name, _set_name, None, None)
progs = property(_get_prog, _set_prog, None, None)
manual = property(_get_manual, _set_manual, None, None)
| gpl-3.0 | -1,962,895,549,548,024,300 | 27.711712 | 77 | 0.531534 | false |
Letractively/rdflib | rdflib/plugins/parsers/notation3.py | 1 | 78910 | #!/usr/bin/env python
u"""
notation3.py - Standalone Notation3 Parser
Derived from CWM, the Closed World Machine
Authors of the original suite:
* Dan Connolly <@@>
* Tim Berners-Lee <@@>
* Yosi Scharf <@@>
* Joseph M. Reagle Jr. <[email protected]>
* Rich Salz <[email protected]>
http://www.w3.org/2000/10/swap/notation3.py
Copyright 2000-2007, World Wide Web Consortium.
Copyright 2001, MIT.
Copyright 2001, Zolera Systems Inc.
License: W3C Software License
http://www.w3.org/Consortium/Legal/copyright-software
Modified by Sean B. Palmer
Copyright 2007, Sean B. Palmer. \u32E1
Modified to work with rdflib by Gunnar Aastrand Grimnes
Copyright 2010, Gunnar A. Grimnes
"""
# Python standard libraries
import types
import sys
import os
import re
import StringIO
import codecs
from binascii import a2b_hex
from decimal import Decimal
from rdflib.term import URIRef, BNode, Literal, Variable, _XSD_PFX, _unique_id
from rdflib.graph import QuotedGraph, ConjunctiveGraph
from rdflib import py3compat
b = py3compat.b
__all__ = ['URISyntaxError', 'BadSyntax', 'N3Parser', "verbosity", "setVerbosity", "progress", "splitFrag", "splitFragP", "join", "refTo", "base", "canonical", "runNamespace", "uniqueURI", "Canonicalize", "stripCR", "dummyWrite", "toBool", "stringToN3", "backslashUify", "hexify", "dummy"]
from rdflib.parser import Parser
# Incestuous.. would be nice to separate N3 and XML
# from sax2rdf import XMLtoDOM
def XMLtoDOM(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
# SWAP http://www.w3.org/2000/10/swap
# from diag import verbosity, setVerbosity, progress
def verbosity(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
def setVerbosity(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
def progress(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
def splitFrag(uriref):
"""split a URI reference between the fragment and the rest.
Punctuation is thrown away.
e.g.
>>> splitFrag("abc#def")
('abc', 'def')
>>> splitFrag("abcdef")
('abcdef', None)
"""
i = uriref.rfind("#")
if i >= 0:
return uriref[:i], uriref[i+1:]
else:
return uriref, None
def splitFragP(uriref, punct=0):
"""split a URI reference before the fragment
Punctuation is kept.
e.g.
>>> splitFragP("abc#def")
('abc', '#def')
>>> splitFragP("abcdef")
('abcdef', '')
"""
i = uriref.rfind("#")
if i >= 0:
return uriref[:i], uriref[i:]
else:
return uriref, ''
@py3compat.format_doctest_out
def join(here, there):
"""join an absolute URI and URI reference
(non-ascii characters are supported/doctested;
haven't checked the details of the IRI spec though)
here is assumed to be absolute.
there is URI reference.
>>> join('http://example/x/y/z', '../abc')
'http://example/x/abc'
Raise ValueError if there uses relative path
syntax but here has no hierarchical path.
>>> join('mid:foo@example', '../foo')
Traceback (most recent call last):
raise ValueError, here
ValueError: Base <mid:foo@example> has no slash after colon - with relative '../foo'.
>>> join('http://example/x/y/z', '')
'http://example/x/y/z'
>>> join('mid:foo@example', '#foo')
'mid:foo@example#foo'
We grok IRIs
>>> len(u'Andr\\xe9')
5
>>> join('http://example.org/', u'#Andr\\xe9')
%(u)s'http://example.org/#Andr\\xe9'
"""
assert(here.find("#") < 0), "Base may not contain hash: '%s'"% here # caller must splitFrag (why?)
slashl = there.find('/')
colonl = there.find(':')
# join(base, 'foo:/') -- absolute
if colonl >= 0 and (slashl < 0 or colonl < slashl):
return there
bcolonl = here.find(':')
assert(bcolonl >= 0), "Base uri '%s' is not absolute" % here # else it's not absolute
path, frag = splitFragP(there)
if not path:
return here + frag
# join('mid:foo@example', '../foo') bzzt
if here[bcolonl+1:bcolonl+2] != '/':
raise ValueError ("Base <%s> has no slash after colon - with relative '%s'." %(here, there))
if here[bcolonl+1:bcolonl+3] == '//':
bpath = here.find('/', bcolonl+3)
else:
bpath = bcolonl+1
# join('http://xyz', 'foo')
if bpath < 0:
bpath = len(here)
here = here + '/'
# join('http://xyz/', '//abc') => 'http://abc'
if there[:2] == '//':
return here[:bcolonl+1] + there
# join('http://xyz/', '/abc') => 'http://xyz/abc'
if there[:1] == '/':
return here[:bpath] + there
slashr = here.rfind('/')
while 1:
if path[:2] == './':
path = path[2:]
if path == '.':
path = ''
elif path[:3] == '../' or path == '..':
path = path[3:]
i = here.rfind('/', bpath, slashr)
if i >= 0:
here = here[:i+1]
slashr = i
else:
break
return here[:slashr+1] + path + frag
commonHost = re.compile(r'^[-_a-zA-Z0-9.]+:(//[^/]*)?/[^/]*$')
def refTo(base, uri):
"""figure out a relative URI reference from base to uri
>>> refTo('http://example/x/y/z', 'http://example/x/abc')
'../abc'
>>> refTo('file:/ex/x/y', 'file:/ex/x/q/r#s')
'q/r#s'
>>> refTo(None, 'http://ex/x/y')
'http://ex/x/y'
>>> refTo('http://ex/x/y', 'http://ex/x/y')
''
Note the relationship between refTo and join:
join(x, refTo(x, y)) == y
which points out certain strings which cannot be URIs. e.g.
>>> x='http://ex/x/y';y='http://ex/x/q:r';join(x, refTo(x, y)) == y
0
So 'http://ex/x/q:r' is not a URI. Use 'http://ex/x/q%3ar' instead:
>>> x='http://ex/x/y';y='http://ex/x/q%3ar';join(x, refTo(x, y)) == y
1
This one checks that it uses a root-realtive one where that is
all they share. Now uses root-relative where no path is shared.
This is a matter of taste but tends to give more resilience IMHO
-- and shorter paths
Note that base may be None, meaning no base. In some situations, there
just ain't a base. Slife. In these cases, relTo returns the absolute value.
The axiom abs(,rel(b,x))=x still holds.
This saves people having to set the base to "bogus:".
>>> refTo('http://ex/x/y/z', 'http://ex/r')
'/r'
"""
# assert base # don't mask bugs -danc # not a bug. -tim
if not base:
return uri
if base == uri:
return ""
# Find how many path segments in common
i = 0
while i < len(uri) and i<len(base):
if uri[i] == base[i]:
i = i + 1
else:
break
# print "# relative", base, uri, " same up to ", i
# i point to end of shortest one or first difference
m = commonHost.match(base[:i])
if m:
k = uri.find("//")
if k < 0:
k = -2 # no host
l = uri.find("/", k+2)
if uri[l+1:l+2] != "/" and base[l+1:l+2] != "/" and uri[:l] == base[:l]:
return uri[l:]
if uri[i:i+1] == "#" and len(base) == i:
return uri[i:] # fragment of base
while i > 0 and uri[i-1] != '/' :
i = i-1 # scan for slash
if i < 3:
return uri # No way.
if base.find("//", i-2) > 0 or uri.find("//", i-2) > 0:
return uri # An unshared "//"
if base.find(":", i) > 0:
return uri # An unshared ":"
n = base.count("/", i)
if n == 0 and i < len(uri) and uri[i] == '#':
return "./" + uri[i:]
elif n == 0 and i == len(uri):
return "./"
else:
return ("../" * n) + uri[i:]
def base():
"""The base URI for this process - the Web equiv of cwd
Relative or abolute unix-standard filenames parsed relative to
this yeild the URI of the file.
If we had a reliable way of getting a computer name,
we should put it in the hostname just to prevent ambiguity
"""
# return "file://" + hostname + os.getcwd() + "/"
return "file://" + _fixslash(os.getcwd()) + "/"
def _fixslash(argstr):
""" Fix windowslike filename to unixlike - (#ifdef WINDOWS)"""
s = argstr
for i in range(len(s)):
if s[i] == "\\":
s = s[:i] + "/" + s[i+1:]
if s[0] != "/" and s[1] == ":":
s = s[2:] # @@@ Hack when drive letter present
return s
URI_unreserved = b("ABCDEFGHIJJLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~")
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
@py3compat.format_doctest_out
def canonical(str_in):
"""Convert equivalent URIs (or parts) to the same string
There are many differenet levels of URI canonicalization
which are possible. See http://www.ietf.org/rfc/rfc3986.txt
Done:
- Converfting unicode IRI to utf-8
- Escaping all non-ASCII
- De-escaping, if escaped, ALPHA (%%41-%%5A and %%61-%%7A), DIGIT (%%30-%%39),
hyphen (%%2D), period (%%2E), underscore (%%5F), or tilde (%%7E) (Sect 2.4)
- Making all escapes uppercase hexadecimal
Not done:
- Making URI scheme lowercase
- changing /./ or /foo/../ to / with care not to change host part
>>> canonical("foo bar")
%(b)s'foo%%20bar'
>>> canonical(u'http:')
%(b)s'http:'
>>> canonical('fran%%c3%%83%%c2%%a7ois')
%(b)s'fran%%C3%%83%%C2%%A7ois'
>>> canonical('a')
%(b)s'a'
>>> canonical('%%4e')
%(b)s'N'
>>> canonical('%%9d')
%(b)s'%%9D'
>>> canonical('%%2f')
%(b)s'%%2F'
>>> canonical('%%2F')
%(b)s'%%2F'
"""
if type(str_in) == type(u''):
s8 = str_in.encode('utf-8')
else:
s8 = str_in
s = b('')
i = 0
while i < len(s8):
if py3compat.PY3:
n = s8[i]
ch = bytes([n])
else:
ch = s8[i]
n = ord(ch)
if (n > 126) or (n < 33) : # %-encode controls, SP, DEL, and utf-8
s += b("%%%02X" % ord(ch))
elif ch == b('%') and i+2 < len(s8):
ch2 = a2b_hex(s8[i+1:i+3])
if ch2 in URI_unreserved:
s += ch2
else:
s += b("%%%02X" % ord(ch2))
i = i + 3
continue
else:
s += ch
i = i + 1
return s
CONTEXT = 0
PRED = 1
SUBJ = 2
OBJ = 3
PARTS = PRED, SUBJ, OBJ
ALL4 = CONTEXT, PRED, SUBJ, OBJ
SYMBOL = 0
FORMULA = 1
LITERAL = 2
LITERAL_DT = 21
LITERAL_LANG = 22
ANONYMOUS = 3
XMLLITERAL = 25
Logic_NS = "http://www.w3.org/2000/10/swap/log#"
NODE_MERGE_URI = Logic_NS + "is" # Pseudo-property indicating node merging
forSomeSym = Logic_NS + "forSome"
forAllSym = Logic_NS + "forAll"
RDF_type_URI = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
RDF_NS_URI = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
OWL_NS = "http://www.w3.org/2002/07/owl#"
DAML_sameAs_URI = OWL_NS+"sameAs"
parsesTo_URI = Logic_NS + "parsesTo"
RDF_spec = "http://www.w3.org/TR/REC-rdf-syntax/"
List_NS = RDF_NS_URI # From 20030808
_Old_Logic_NS = "http://www.w3.org/2000/10/swap/log.n3#"
N3_first = (SYMBOL, List_NS + "first")
N3_rest = (SYMBOL, List_NS + "rest")
N3_li = (SYMBOL, List_NS + "li")
N3_nil = (SYMBOL, List_NS + "nil")
N3_List = (SYMBOL, List_NS + "List")
N3_Empty = (SYMBOL, List_NS + "Empty")
runNamespaceValue = None
def runNamespace():
"Return a URI suitable as a namespace for run-local objects"
# @@@ include hostname (privacy?) (hash it?)
global runNamespaceValue
if runNamespaceValue == None:
runNamespaceValue = join(base(), _unique_id()) + '#'
return runNamespaceValue
nextu = 0
def uniqueURI():
"A unique URI"
global nextu
nextu += 1
return runNamespace() + "u_" + `nextu`
class URISyntaxError(ValueError):
"""A parameter is passed to a routine that requires a URI reference"""
pass
tracking = False
chatty_flag = 50
from xml.dom import Node
try:
from xml.ns import XMLNS
except:
class XMLNS:
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
_attrs = lambda E: (E.attributes and E.attributes.values()) or []
_children = lambda E: E.childNodes or []
_IN_XML_NS = lambda n: n.namespaceURI == XMLNS.XML
_inclusive = lambda n: n.unsuppressedPrefixes == None
# Does a document/PI has lesser/greater document order than the
# first element?
_LesserElement, _Element, _GreaterElement = range(3)
def _sorter(n1, n2):
'''_sorter(n1, n2) -> int
Sorting predicate for non-NS attributes.'''
i = cmp(n1.namespaceURI, n2.namespaceURI)
if i:
return i
return cmp(n1.localName, n2.localName)
def _sorter_ns(n1, n2):
'''_sorter_ns((n,v),(n,v)) -> int
"(an empty namespace URI is lexicographically least)."'''
if n1[0] == 'xmlns':
return -1
if n2[0] == 'xmlns':
return 1
return cmp(n1[0], n2[0])
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n=="" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix:
return 1
return 0
#_in_subset = lambda subset, node: not subset or node in subset
_in_subset = lambda subset, node: subset is None or node in subset # rich's tweak
class _implementation:
'''Implementation class for C14N. This accompanies a node during it's
processing and includes the parameters and processing state.'''
# Handler for each node type; populated during module instantiation.
handlers = {}
def __init__(self, node, write, **kw):
'''Create and run the implementation.'''
self.write = write
self.subset = kw.get('subset')
self.comments = kw.get('comments', 0)
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
nsdict = kw.get('nsdict', { 'xml': XMLNS.XML, 'xmlns': XMLNS.BASE })
# Processing state.
self.state = (nsdict, {'xml':''}, {}) #0422
if node.nodeType == Node.DOCUMENT_NODE:
self._do_document(node)
elif node.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
if not _inclusive(self):
self._do_element(node)
else:
inherited = self._inherit_context(node)
self._do_element(node, inherited)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
elif node.nodeType == Node.TEXT_NODE:
self._do_text(node)
else:
raise TypeError, str(node)
def _inherit_context(self, node):
'''_inherit_context(self, node) -> list
Scan ancestors of attribute and namespace context. Used only
for single element node canonicalization, not for subset
canonicalization.'''
# Collect the initial list of xml:foo attributes.
xmlattrs = filter(_IN_XML_NS, _attrs(node))
# Walk up and get all xml:XXX attributes we inherit.
inherited, parent = [], node.parentNode
while parent and parent.nodeType == Node.ELEMENT_NODE:
for a in filter(_IN_XML_NS, _attrs(parent)):
n = a.localName
if n not in xmlattrs:
xmlattrs.append(n)
inherited.append(a)
parent = parent.parentNode
return inherited
def _do_document(self, node):
'''_do_document(self, node) -> None
Process a document node. documentOrder holds whether the document
element has been encountered such that PIs/comments can be written
as specified.'''
self.documentOrder = _LesserElement
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
self._do_element(child)
self.documentOrder = _GreaterElement # After document element
elif child.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
self._do_pi(child)
elif child.nodeType == Node.COMMENT_NODE:
self._do_comment(child)
elif child.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(child)
handlers[Node.DOCUMENT_NODE] = _do_document
def _do_text(self, node):
'''_do_text(self, node) -> None
Process a text or CDATA node. Render various special characters
as their C14N entity representations.'''
if not _in_subset(self.subset, node):
return
s = node.data.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace("\015", "
")
if s:
self.write(s)
handlers[Node.TEXT_NODE] = _do_text
handlers[Node.CDATA_SECTION_NODE] = _do_text
def _do_pi(self, node):
'''_do_pi(self, node) -> None
Process a PI node. Render a leading or trailing #xA if the
document order of the PI is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node):
return
W = self.write
if self.documentOrder == _GreaterElement:
W('\n')
W('<?')
W(node.nodeName)
s = node.data
if s:
W(' ')
W(s)
W('?>')
if self.documentOrder == _LesserElement:
W('\n')
handlers[Node.PROCESSING_INSTRUCTION_NODE] = _do_pi
def _do_comment(self, node):
'''_do_comment(self, node) -> None
Process a comment node. Render a leading or trailing #xA if the
document order of the comment is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node):
return
if self.comments:
W = self.write
if self.documentOrder == _GreaterElement:
W('\n')
W('<!--')
W(node.data)
W('-->')
if self.documentOrder == _LesserElement:
W('\n')
handlers[Node.COMMENT_NODE] = _do_comment
def _do_attr(self, n, value):
''''_do_attr(self, node) -> None
Process an attribute.'''
W = self.write
W(' ')
W(n)
W('="')
s = value.replace(value, "&", "&")
s = s.replace("<", "<")
s = s.replace('"', '"')
s = s.replace('\011', '	')
s = s.replace('\012', '
')
s = s.replace('\015', '
')
W(s)
W('"')
def _do_element(self, node, initial_other_attrs = []):
'''_do_element(self, node, initial_other_attrs = []) -> None
Process an element (and its children).'''
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# ns_local -- NS declarations relevant to this element
# xml_attrs -- Attributes in XML namespace from parent
# xml_attrs_local -- Local attributes in XML namespace.
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1].copy(), self.state[2].copy() #0422
ns_local = ns_parent.copy()
xml_attrs_local = {}
# progress("_do_element node.nodeName=", node.nodeName)
# progress("_do_element node.namespaceURI", node.namespaceURI)
# progress("_do_element node.tocml()", node.toxml())
# Divide attributes into NS, XML, and others.
other_attrs = initial_other_attrs[:]
in_subset = _in_subset(self.subset, node)
for a in _attrs(node):
# progress("\t_do_element a.nodeName=", a.nodeName)
if a.namespaceURI == XMLNS.BASE:
n = a.nodeName
if n == "xmlns:":
n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == XMLNS.XML:
if _inclusive(self) or in_subset:
xml_attrs_local[a.nodeName] = a #0426
else:
other_attrs.append(a)
#add local xml:foo attributes to ancestor's xml:foo attributes
xml_attrs.update(xml_attrs_local)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n, v in ns_local.items():
# If default namespace is XMLNS.BASE or empty,
# and if an ancestor was the same
if n == "xmlns" and v in [ XMLNS.BASE, '' ] \
and ns_rendered.get('xmlns') in [ XMLNS.BASE, '', None ]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n in ["xmlns:xml", "xml"] \
and v in [ 'http://www.w3.org/XML/1998/namespace' ]:
continue
# If not previously rendered
# and it's inclusive or utilized
if (n, v) not in ns_rendered.items() \
and (_inclusive(self) or \
_utilized(n, node, other_attrs, self.unsuppressedPrefixes)):
ns_to_render.append((n, v))
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(_sorter_ns)
for n, v in ns_to_render:
self._do_attr(n, v)
ns_rendered[n] = v #0417
# If exclusive or the parent is in the subset, add the local xml attributes
# Else, add all local and ancestor xml attributes
# Sort and render the attributes.
if not _inclusive(self) or _in_subset(self.subset, node.parentNode): #0426
other_attrs.extend(xml_attrs_local.values())
else:
other_attrs.extend(xml_attrs.values())
other_attrs.sort(_sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local, ns_rendered, xml_attrs)
for c in _children(node):
_implementation.handlers[c.nodeType](self, c)
self.state = state
if name:
W('</%s>' % name)
handlers[Node.ELEMENT_NODE] = _do_element
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict -- a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments -- keep comments if non-zero (default is 0)
subset -- Canonical XML subsetting resulting from XPath (default is [])
unsuppressedPrefixes -- do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
apply(_implementation, (node, output.write), kw)
else:
s = StringIO.StringIO()
apply(_implementation, (node, s.write), kw)
return s.getvalue()
# end of xmlC14n.py
# from why import BecauseOfData, becauseSubexpression
def BecauseOfData(*args, **kargs):
# print args, kargs
pass
def becauseSubexpression(*args, **kargs):
# print args, kargs
pass
N3_forSome_URI = forSomeSym
N3_forAll_URI = forAllSym
# Magic resources we know about
ADDED_HASH = "#" # Stop where we use this in case we want to remove it!
# This is the hash on namespace URIs
RDF_type = ( SYMBOL , RDF_type_URI )
DAML_sameAs = ( SYMBOL, DAML_sameAs_URI )
LOG_implies_URI = "http://www.w3.org/2000/10/swap/log#implies"
BOOLEAN_DATATYPE = _XSD_PFX + "boolean"
DECIMAL_DATATYPE = _XSD_PFX + "decimal"
DOUBLE_DATATYPE = _XSD_PFX + "double"
FLOAT_DATATYPE = _XSD_PFX + "float"
INTEGER_DATATYPE = _XSD_PFX + "integer"
option_noregen = 0 # If set, do not regenerate genids on output
# @@ I18n - the notname chars need extending for well known unicode non-text
# characters. The XML spec switched to assuming unknown things were name
# characaters.
# _namechars = string.lowercase + string.uppercase + string.digits + '_-'
_notQNameChars = "\t\r\n !\"#$%&'()*.,+/;<=>?@[\\]^`{|}~" # else valid qname :-/
_notNameChars = _notQNameChars + ":" # Assume anything else valid name :-/
_rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
N3CommentCharacter = "#" # For unix script #! compatabilty
########################################## Parse string to sink
#
# Regular expressions:
eol = re.compile(r'[ \t]*(#[^\n]*)?\r?\n') # end of line, poss. w/comment
eof = re.compile(r'[ \t]*(#[^\n]*)?$') # end of file, poss. w/comment
ws = re.compile(r'[ \t]*') # Whitespace not including NL
signed_integer = re.compile(r'[-+]?[0-9]+') # integer
number_syntax = re.compile(r'(?P<integer>[-+]?[0-9]+)(?P<decimal>\.[0-9]+)?(?P<exponent>(?:e|E)[-+]?[0-9]+)?')
digitstring = re.compile(r'[0-9]+') # Unsigned integer
interesting = re.compile(r'[\\\r\n\"]')
langcode = re.compile(r'[a-zA-Z0-9]+(-[a-zA-Z0-9]+)?')
#"
class SinkParser:
def __init__(self, store, openFormula=None, thisDoc="", baseURI=None,
genPrefix = "", flags="",
why=None):
""" note: namespace names should *not* end in #;
the # will get added during qname processing """
self._bindings = {}
self._flags = flags
if thisDoc != "":
assert ':' in thisDoc, "Document URI not absolute: <%s>" % thisDoc
self._bindings[""] = thisDoc + "#" # default
self._store = store
if genPrefix:
store.setGenPrefix(genPrefix) # pass it on
self._thisDoc = thisDoc
self.lines = 0 # for error handling
self.startOfLine = 0 # For calculating character number
self._genPrefix = genPrefix
self.keywords = ['a', 'this', 'bind', 'has', 'is', 'of', 'true', 'false' ]
self.keywordsSet = 0 # Then only can others be considerd qnames
self._anonymousNodes = {} # Dict of anon nodes already declared ln: Term
self._variables = {}
self._parentVariables = {}
self._reason = why # Why the parser was asked to parse this
self._reason2 = None # Why these triples
# was: diag.tracking
if tracking:
self._reason2 = BecauseOfData(
store.newSymbol(thisDoc), because=self._reason)
if baseURI:
self._baseURI = baseURI
else:
if thisDoc:
self._baseURI = thisDoc
else:
self._baseURI = None
assert not self._baseURI or ':' in self._baseURI
if not self._genPrefix:
if self._thisDoc:
self._genPrefix = self._thisDoc + "#_g"
else:
self._genPrefix = uniqueURI()
if openFormula == None:
if self._thisDoc:
self._formula = store.newFormula(thisDoc + "#_formula")
else:
self._formula = store.newFormula()
else:
self._formula = openFormula
self._context = self._formula
self._parentContext = None
def here(self, i):
"""String generated from position in file
This is for repeatability when refering people to bnodes in a document.
This has diagnostic uses less formally, as it should point one to which
bnode the arbitrary identifier actually is. It gives the
line and character number of the '[' charcacter or path character
which introduced the blank node. The first blank node is boringly _L1C1.
It used to be used only for tracking, but for tests in general
it makes the canonical ordering of bnodes repeatable."""
return "%s_L%iC%i" % (self._genPrefix , self.lines,
i - self.startOfLine + 1)
def formula(self):
return self._formula
def loadStream(self, stream):
return self.loadBuf(stream.read()) # Not ideal
def loadBuf(self, buf):
"""Parses a buffer and returns its top level formula"""
self.startDoc()
self.feed(buf)
return self.endDoc() # self._formula
def feed(self, octets):
"""Feed an octet stream tothe parser
if BadSyntax is raised, the string
passed in the exception object is the
remainder after any statements have been parsed.
So if there is more data to feed to the
parser, it should be straightforward to recover."""
if not isinstance(octets, unicode):
s = octets.decode('utf-8')
# NB already decoded, so \ufeff
if len(s) > 0 and s[0] == codecs.BOM_UTF8.decode('utf-8'):
s = s[1:]
else:
s = octets
i = 0
while i >= 0:
j = self.skipSpace(s, i)
if j < 0:
return
i = self.directiveOrStatement(s, j)
if i < 0:
print("# next char: %s" % s[j])
raise BadSyntax(self._thisDoc, self.lines, s, j,
"expected directive or statement")
def directiveOrStatement(self, argstr, h):
i = self.skipSpace(argstr, h)
if i < 0:
return i # EOF
j = self.directive(argstr, i)
if j >= 0:
return self.checkDot(argstr, j)
j = self.statement(argstr, i)
if j >= 0:
return self.checkDot(argstr, j)
return j
#@@I18N
global _notNameChars
#_namechars = string.lowercase + string.uppercase + string.digits + '_-'
def tok(self, tok, argstr, i):
"""Check for keyword. Space must have been stripped on entry and
we must not be at end of file."""
assert tok[0] not in _notNameChars # not for punctuation
if argstr[i:i+1] == "@":
i = i+1
else:
if tok not in self.keywords:
return -1 # No, this has neither keywords declaration nor "@"
if (argstr[i:i+len(tok)] == tok
and (argstr[i+len(tok)] in _notQNameChars )):
i = i + len(tok)
return i
else:
return -1
def directive(self, argstr, i):
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
res = []
j = self.tok('bind', argstr, i) # implied "#". Obsolete.
if j > 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"keyword bind is obsolete: use @prefix")
j = self.tok('keywords', argstr, i)
if j > 0:
i = self.commaSeparatedList(argstr, j, res, self.bareWord)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"'@keywords' needs comma separated list of words")
self.setKeywords(res[:])
# was: diag.chatty_flag
if chatty_flag > 80:
progress("Keywords ", self.keywords)
return i
j = self.tok('forAll', argstr, i)
if j > 0:
i = self.commaSeparatedList(argstr, j, res, self.uri_ref2)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"Bad variable list after @forAll")
for x in res:
#self._context.declareUniversal(x)
if x not in self._variables or x in self._parentVariables:
self._variables[x] = self._context.newUniversal(x)
return i
j = self.tok('forSome', argstr, i)
if j > 0:
i = self. commaSeparatedList(argstr, j, res, self.uri_ref2)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"Bad variable list after @forSome")
for x in res:
self._context.declareExistential(x)
return i
j = self.tok('prefix', argstr, i) # no implied "#"
if j >= 0:
t = []
i = self.qname(argstr, j, t)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"expected qname after @prefix")
j = self.uri_ref2(argstr, i, t)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"expected <uriref> after @prefix _qname_")
ns = self.uriOf(t[1])
if self._baseURI:
ns = join(self._baseURI, ns)
elif ":" not in ns:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"With no base URI, cannot use relative URI in @prefix <"+ns+">")
assert ':' in ns # must be absolute
self._bindings[t[0][0]] = ns
self.bind(t[0][0], hexify(ns))
return j
j = self.tok('base', argstr, i) # Added 2007/7/7
if j >= 0:
t = []
i = self.uri_ref2(argstr, j, t)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"expected <uri> after @base ")
ns = self.uriOf(t[0])
if self._baseURI:
ns = join(self._baseURI, ns)
else:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"With no previous base URI, cannot use relative URI in @base <"+ns+">")
assert ':' in ns # must be absolute
self._baseURI = ns
return i
return -1 # Not a directive, could be something else.
def bind(self, qn, uri):
assert isinstance(uri,
types.StringType), "Any unicode must be %x-encoded already"
if qn == "":
self._store.setDefaultNamespace(uri)
else:
self._store.bind(qn, uri)
def setKeywords(self, k):
"Takes a list of strings"
if k == None:
self.keywordsSet = 0
else:
self.keywords = k
self.keywordsSet = 1
def startDoc(self):
# was: self._store.startDoc()
self._store.startDoc(self._formula)
def endDoc(self):
"""Signal end of document and stop parsing. returns formula"""
self._store.endDoc(self._formula) # don't canonicalize yet
return self._formula
def makeStatement(self, quadruple):
#$$$$$$$$$$$$$$$$$$$$$
# print "# Parser output: ", `quadruple`
self._store.makeStatement(quadruple, why=self._reason2)
def statement(self, argstr, i):
r = []
i = self.object(argstr, i, r) # Allow literal for subject - extends RDF
if i < 0:
return i
j = self.property_list(argstr, i, r[0])
if j < 0:
raise BadSyntax(self._thisDoc, self.lines,
argstr, i, "expected propertylist")
return j
def subject(self, argstr, i, res):
return self.item(argstr, i, res)
def verb(self, argstr, i, res):
""" has _prop_
is _prop_ of
a
=
_prop_
>- prop ->
<- prop -<
_operator_"""
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
r = []
j = self.tok('has', argstr, i)
if j >= 0:
i = self.prop(argstr, j, r)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines,
argstr, j, "expected property after 'has'")
res.append(('->', r[0]))
return i
j = self.tok('is', argstr, i)
if j >= 0:
i = self.prop(argstr, j, r)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"expected <property> after 'is'")
j = self.skipSpace(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"End of file found, expected property after 'is'")
return j # eof
i = j
j = self.tok('of', argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"expected 'of' after 'is' <prop>")
res.append(('<-', r[0]))
return j
j = self.tok('a', argstr, i)
if j >= 0:
res.append(('->', RDF_type))
return j
if argstr[i:i+2] == "<=":
res.append(('<-', self._store.newSymbol(Logic_NS+"implies")))
return i+2
if argstr[i:i+1] == "=":
if argstr[i+1:i+2] == ">":
res.append(('->', self._store.newSymbol(Logic_NS+"implies")))
return i+2
res.append(('->', DAML_sameAs))
return i+1
if argstr[i:i+2] == ":=":
# patch file relates two formulae, uses this @@ really?
res.append(('->', Logic_NS+"becomes"))
return i+2
j = self.prop(argstr, i, r)
if j >= 0:
res.append(('->', r[0]))
return j
if argstr[i:i+2] == ">-" or argstr[i:i+2] == "<-":
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
">- ... -> syntax is obsolete.")
return -1
def prop(self, argstr, i, res):
return self.item(argstr, i, res)
def item(self, argstr, i, res):
return self.path(argstr, i, res)
def blankNode(self, uri=None):
if "B" not in self._flags:
return self._context.newBlankNode(uri, why=self._reason2)
x = self._context.newSymbol(uri)
self._context.declareExistential(x)
return x
def path(self, argstr, i, res):
"""Parse the path production.
"""
j = self.nodeOrLiteral(argstr, i, res)
if j < 0:
return j # nope
while argstr[j:j+1] in "!^.": # no spaces, must follow exactly (?)
ch = argstr[j:j+1] # @@ Allow "." followed IMMEDIATELY by a node.
if ch == ".":
ahead = argstr[j+1:j+2]
if not ahead or (ahead in _notNameChars
and ahead not in ":?<[{("): break
subj = res.pop()
obj = self.blankNode(uri=self.here(j))
j = self.node(argstr, j+1, res)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"EOF found in middle of path syntax")
pred = res.pop()
if ch == "^": # Reverse traverse
self.makeStatement((self._context, pred, obj, subj))
else:
self.makeStatement((self._context, pred, subj, obj))
res.append(obj)
return j
def anonymousNode(self, ln):
"""Remember or generate a term for one of these _: anonymous nodes"""
term = self._anonymousNodes.get(ln, None)
if term != None:
return term
term = self._store.newBlankNode(self._context, why=self._reason2)
self._anonymousNodes[ln] = term
return term
def node(self, argstr, i, res, subjectAlready=None):
"""Parse the <node> production.
Space is now skipped once at the beginning
instead of in multipe calls to self.skipSpace().
"""
subj = subjectAlready
j = self.skipSpace(argstr, i)
if j < 0:
return j #eof
i = j
ch = argstr[i:i+1] # Quick 1-character checks first:
if ch == "[":
bnodeID = self.here(i)
j = self.skipSpace(argstr, i+1)
if j < 0:
raise BadSyntax(self._thisDoc,
self.lines, argstr, i, "EOF after '['")
if argstr[j:j+1] == "=": # Hack for "is" binding name to anon node
i = j + 1
objs = []
j = self.objectList(argstr, i, objs)
if j >= 0:
subj = objs[0]
if len(objs) > 1:
for obj in objs:
self.makeStatement((self._context,
DAML_sameAs, subj, obj))
j = self.skipSpace(argstr, j)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"EOF when objectList expected after [ = ")
if argstr[j:j+1] == ";":
j = j+1
else:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"objectList expected after [= ")
if subj is None:
subj = self.blankNode(uri= bnodeID)
i = self.property_list(argstr, j, subj)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"property_list expected")
j = self.skipSpace(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"EOF when ']' expected after [ <propertyList>")
if argstr[j:j+1] != "]":
raise BadSyntax(self._thisDoc,
self.lines, argstr, j, "']' expected")
res.append(subj)
return j+1
if ch == "{":
ch2 = argstr[i+1:i+2]
if ch2 == '$':
i += 1
j = i + 1
List = []
first_run = True
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"needed '$}', found end.")
if argstr[i:i+2] == '$}':
j = i+2
break
if not first_run:
if argstr[i:i+1] == ',':
i += 1
else:
raise BadSyntax(self._thisDoc, self.lines,
argstr, i, "expected: ','")
else:
first_run = False
item = []
j = self.item(argstr, i, item) #@@@@@ should be path, was object
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"expected item in set or '$}'")
List.append(self._store.intern(item[0]))
res.append(self._store.newSet(List, self._context))
return j
else:
j = i + 1
oldParentContext = self._parentContext
self._parentContext = self._context
parentAnonymousNodes = self._anonymousNodes
grandParentVariables = self._parentVariables
self._parentVariables = self._variables
self._anonymousNodes = {}
self._variables = self._variables.copy()
reason2 = self._reason2
self._reason2 = becauseSubexpression
if subj is None:
subj = self._store.newFormula()
self._context = subj
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines,
argstr, i, "needed '}', found end.")
if argstr[i:i+1] == "}":
j = i+1
break
j = self.directiveOrStatement(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines,
argstr, i, "expected statement or '}'")
self._anonymousNodes = parentAnonymousNodes
self._variables = self._parentVariables
self._parentVariables = grandParentVariables
self._context = self._parentContext
self._reason2 = reason2
self._parentContext = oldParentContext
res.append(subj.close()) # No use until closed
return j
if ch == "(":
thing_type = self._store.newList
ch2 = argstr[i+1:i+2]
if ch2 == '$':
thing_type = self._store.newSet
i += 1
j = i+1
List = []
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines,
argstr, i, "needed ')', found end.")
if argstr[i:i+1] == ')':
j = i+1
break
item = []
j = self.item(argstr, i, item) #@@@@@ should be path, was object
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"expected item in list or ')'")
List.append(self._store.intern(item[0]))
res.append(thing_type(List, self._context))
return j
j = self.tok('this', argstr, i) # This context
if j >= 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"Keyword 'this' was ancient N3. Now use @forSome and @forAll keywords.")
res.append(self._context)
return j
#booleans
j = self.tok('true', argstr, i)
if j >= 0:
res.append(True)
return j
j = self.tok('false', argstr, i)
if j >= 0:
res.append(False)
return j
if subj is None: # If this can be a named node, then check for a name.
j = self.uri_ref2(argstr, i, res)
if j >= 0:
return j
return -1
def property_list(self, argstr, i, subj):
"""Parse property list
Leaves the terminating punctuation in the buffer
"""
while 1:
j = self.skipSpace(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"EOF found when expected verb in property list")
return j #eof
if argstr[j:j+2] == ":-":
i = j + 2
res = []
j = self.node(argstr, i, res, subj)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"bad {} or () or [] node after :- ")
i = j
continue
i = j
v = []
j = self.verb(argstr, i, v)
if j <= 0:
return i # void but valid
objs = []
i = self.objectList(argstr, j, objs)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"objectList expected")
for obj in objs:
dira, sym = v[0]
if dira == '->':
self.makeStatement((self._context, sym, subj, obj))
else:
self.makeStatement((self._context, sym, obj, subj))
j = self.skipSpace(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"EOF found in list of objects")
return j #eof
if argstr[i:i+1] != ";":
return i
i = i+1 # skip semicolon and continue
def commaSeparatedList(self, argstr, j, res, what):
"""return value: -1 bad syntax; >1 new position in argstr
res has things found appended
"""
i = self.skipSpace(argstr, j)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"EOF found expecting comma sep list")
return i
if argstr[i] == ".":
return j # empty list is OK
i = what(argstr, i, res)
if i < 0:
return -1
while 1:
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
ch = argstr[j:j+1]
if ch != ",":
if ch != ".":
return -1
return j # Found but not swallowed "."
i = what(argstr, j+1, res)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"bad list content")
return i
def objectList(self, argstr, i, res):
i = self.object(argstr, i, res)
if i < 0:
return -1
while 1:
j = self.skipSpace(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"EOF found after object")
return j #eof
if argstr[j:j+1] != ",":
return j # Found something else!
i = self.object(argstr, j+1, res)
if i < 0:
return i
def checkDot(self, argstr, i):
j = self.skipSpace(argstr, i)
if j < 0:
return j #eof
if argstr[j:j+1] == ".":
return j+1 # skip
if argstr[j:j+1] == "}":
return j # don't skip it
if argstr[j:j+1] == "]":
return j
raise BadSyntax(self._thisDoc, self.lines,
argstr, j, "expected '.' or '}' or ']' at end of statement")
return i
def uri_ref2(self, argstr, i, res):
"""Generate uri from n3 representation.
Note that the RDF convention of directly concatenating
NS and local name is now used though I prefer inserting a '#'
to make the namesapces look more like what XML folks expect.
"""
qn = []
j = self.qname(argstr, i, qn)
if j >= 0:
pfx, ln = qn[0]
if pfx is None:
assert 0, "not used?"
ns = self._baseURI + ADDED_HASH
else:
try:
ns = self._bindings[pfx]
except KeyError:
if pfx == "_": # Magic prefix 2001/05/30, can be overridden
res.append(self.anonymousNode(ln))
return j
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"Prefix \"%s:\" not bound" % (pfx))
symb = self._store.newSymbol(ns + ln)
if symb in self._variables:
res.append(self._variables[symb])
else:
res.append(symb) # @@@ "#" CONVENTION
if not ns.find("#"):
progress("Warning: no # on namespace %s," % ns)
return j
i = self.skipSpace(argstr, i)
if i < 0:
return -1
if argstr[i] == "?":
v = []
j = self.variable(argstr, i, v)
if j > 0: #Forget varibles as a class, only in context.
res.append(v[0])
return j
return -1
elif argstr[i] == "<":
i = i + 1
st = i
while i < len(argstr):
if argstr[i] == ">":
uref = argstr[st:i] # the join should dealt with "":
if self._baseURI:
uref = join(self._baseURI, uref) # was: uripath.join
else:
assert ":" in uref, \
"With no base URI, cannot deal with relative URIs"
if argstr[i-1:i] == "#" and not uref[-1:] == "#":
uref = uref + "#" # She meant it! Weirdness in urlparse?
symb = self._store.newSymbol(uref)
if symb in self._variables:
res.append(self._variables[symb])
else:
res.append(symb)
return i+1
i = i + 1
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"unterminated URI reference")
elif self.keywordsSet:
v = []
j = self.bareWord(argstr, i, v)
if j < 0:
return -1 #Forget varibles as a class, only in context.
if v[0] in self.keywords:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
'Keyword "%s" not allowed here.' % v[0])
res.append(self._store.newSymbol(self._bindings[""]+v[0]))
return j
else:
return -1
def skipSpace(self, argstr, i):
"""Skip white space, newlines and comments.
return -1 if EOF, else position of first non-ws character"""
while 1:
m = eol.match(argstr, i)
if m == None:
break
self.lines = self.lines + 1
i = m.end() # Point to first character unmatched
self.startOfLine = i
m = ws.match(argstr, i)
if m != None:
i = m.end()
m = eof.match(argstr, i)
if m != None:
return -1
return i
def variable(self, argstr, i, res):
""" ?abc -> variable(:abc)
"""
j = self.skipSpace(argstr, i)
if j < 0:
return -1
if argstr[j:j+1] != "?":
return -1
j = j + 1
i = j
if argstr[j] in "0123456789-":
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"Varible name can't start with '%s'" % argstr[j])
return -1
while i < len(argstr) and argstr[i] not in _notNameChars:
i = i+1
if self._parentContext == None:
varURI = self._store.newSymbol(self._baseURI + "#" + argstr[j:i])
if varURI not in self._variables:
self._variables[varURI] = self._context.newUniversal(varURI
, why=self._reason2)
res.append(self._variables[varURI])
return i
# @@ was:
# raise BadSyntax(self._thisDoc, self.lines, argstr, j,
# "Can't use ?xxx syntax for variable in outermost level: %s"
# % argstr[j-1:i])
varURI = self._store.newSymbol(self._baseURI + "#" + argstr[j:i])
if varURI not in self._parentVariables:
self._parentVariables[varURI] = self._parentContext.newUniversal(varURI
, why=self._reason2)
res.append(self._parentVariables[varURI])
return i
def bareWord(self, argstr, i, res):
""" abc -> :abc
"""
j = self.skipSpace(argstr, i)
if j < 0:
return -1
if argstr[j] in "0123456789-" or argstr[j] in _notNameChars:
return -1
i = j
while i < len(argstr) and argstr[i] not in _notNameChars:
i = i+1
res.append(argstr[j:i])
return i
def qname(self, argstr, i, res):
"""
xyz:def -> ('xyz', 'def')
If not in keywords and keywordsSet: def -> ('', 'def')
:def -> ('', 'def')
"""
i = self.skipSpace(argstr, i)
if i < 0:
return -1
c = argstr[i]
if c in "0123456789-+.":
return -1
if c not in _notNameChars:
ln = c
i = i + 1
while i < len(argstr):
c = argstr[i]
if c=="." or c not in _notNameChars:
ln = ln + c
i = i + 1
else: break
if argstr[i-1]==".": # qname cannot end with "."
return -1
else: # First character is non-alpha
ln = '' # Was: None - TBL (why? useful?)
if i < len(argstr) and argstr[i] == ':':
pfx = ln
i = i + 1
ln = ''
while i < len(argstr):
c = argstr[i]
if c not in _notNameChars:
ln = ln + c
i = i + 1
else:
break
res.append((pfx, ln))
return i
else: # delimiter was not ":"
if ln and self.keywordsSet and ln not in self.keywords:
res.append(('', ln))
return i
return -1
def object(self, argstr, i, res):
j = self.subject(argstr, i, res)
if j >= 0:
return j
else:
j = self.skipSpace(argstr, i)
if j < 0:
return -1
else:
i = j
if argstr[i] == '"':
if argstr[i:i+3] == '"""':
delim = '"""'
else:
delim = '"'
i = i + len(delim)
j, s = self.strconst(argstr, i, delim)
res.append(self._store.newLiteral(s))
progress("New string const ", s, j)
return j
else:
return -1
def nodeOrLiteral(self, argstr, i, res):
j = self.node(argstr, i, res)
startline = self.lines # Remember where for error messages
if j >= 0:
return j
else:
j = self.skipSpace(argstr, i)
if j < 0:
return -1
else:
i = j
ch = argstr[i]
if ch in "-+0987654321":
m = number_syntax.match(argstr, i)
if m == None:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"Bad number syntax")
j = m.end()
if m.group('exponent') != None: # includes decimal exponent
res.append(float(argstr[i:j]))
# res.append(self._store.newLiteral(argstr[i:j],
# self._store.newSymbol(FLOAT_DATATYPE)))
elif m.group('decimal') != None:
res.append(Decimal(argstr[i:j]))
else:
res.append(long(argstr[i:j]))
# res.append(self._store.newLiteral(argstr[i:j],
# self._store.newSymbol(INTEGER_DATATYPE)))
return j
if argstr[i] == '"':
if argstr[i:i+3] == '"""':
delim = '"""'
else:
delim = '"'
i = i + len(delim)
dt = None
j, s = self.strconst(argstr, i, delim)
lang = None
if argstr[j:j+1] == "@": # Language?
m = langcode.match(argstr, j+1)
if m == None:
raise BadSyntax(self._thisDoc, startline, argstr, i,
"Bad language code syntax on string literal, after @")
i = m.end()
lang = argstr[j+1:i]
j = i
if argstr[j:j+2] == "^^":
res2 = []
j = self.uri_ref2(argstr, j+2, res2) # Read datatype URI
dt = res2[0]
# if dt.uriref() == "http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral":
if dt == "http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral":
try:
dom = XMLtoDOM('<rdf:envelope xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns">'
+ s
+ '</rdf:envelope>').firstChild
except:
raise ValueError('s="%s"' % s)
res.append(self._store.newXMLLiteral(dom))
return j
res.append(self._store.newLiteral(s, dt, lang))
return j
else:
return -1
def uriOf(self, sym):
if isinstance(sym, types.TupleType):
return sym[1] # old system for --pipe
# return sym.uriref() # cwm api
return sym
def strconst(self, argstr, i, delim):
"""parse an N3 string constant delimited by delim.
return index, val
"""
j = i
ustr = u"" # Empty unicode string
startline = self.lines # Remember where for error messages
while j < len(argstr):
if argstr[j] == '"':
if delim == '"': # done when delim is "
i = j + 1
return i, ustr
if delim == '"""': # done when delim is """ and ...
if argstr[j:j+5] == '"""""': # ... we have "" before
i = j + 5
ustr = ustr + '""'
return i, ustr
if argstr[j:j+4] == '""""': # ... we have " before
i = j + 4
ustr = ustr + '"'
return i, ustr
if argstr[j:j+3] == '"""': # ... current " is part of delim
i = j + 3
return i, ustr
# we are inside of the string and current char is "
j = j + 1
ustr = ustr + '"'
continue
m = interesting.search(argstr, j) # was argstr[j:].
# Note for pos param to work, MUST be compiled ... re bug?
assert m, "Quote expected in string at ^ in %s^%s" % (
argstr[j-20:j], argstr[j:j+20]) # we at least have to find a quote
i = m.start()
try:
ustr = ustr + argstr[j:i]
except UnicodeError:
err = ""
for c in argstr[j:i]:
err = err + (" %02x" % ord(c))
streason = sys.exc_info()[1].__str__()
raise BadSyntax(self._thisDoc, startline, argstr, j,
"Unicode error appending characters %s to string, because\n\t%s"
% (err, streason))
# print "@@@ i = ",i, " j=",j, "m.end=", m.end()
ch = argstr[i]
if ch == '"':
j = i
continue
elif ch == "\r": # Strip carriage returns
j = i+1
continue
elif ch == "\n":
if delim == '"':
raise BadSyntax(self._thisDoc, startline, argstr, i,
"newline found in string literal")
self.lines = self.lines + 1
ustr = ustr + ch
j = i + 1
self.startOfLine = j
elif ch == "\\":
j = i + 1
ch = argstr[j:j+1] # Will be empty if string ends
if not ch:
raise BadSyntax(self._thisDoc, startline, argstr, i,
"unterminated string literal (2)")
k = 'abfrtvn\\"'.find(ch)
if k >= 0:
uch = '\a\b\f\r\t\v\n\\"'[k]
ustr = ustr + uch
j = j + 1
elif ch == "u":
j, ch = self.uEscape(argstr, j+1, startline)
ustr = ustr + ch
elif ch == "U":
j, ch = self.UEscape(argstr, j+1, startline)
ustr = ustr + ch
else:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"bad escape")
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"unterminated string literal")
def uEscape(self, argstr, i, startline):
j = i
count = 0
value = 0
while count < 4: # Get 4 more characters
ch = argstr[j:j+1].lower()
# sbp http://ilrt.org/discovery/chatlogs/rdfig/2002-07-05
j = j + 1
if ch == "":
raise BadSyntax(self._thisDoc, startline, argstr, i,
"unterminated string literal(3)")
k = "0123456789abcdef".find(ch)
if k < 0:
raise BadSyntax(self._thisDoc, startline, argstr, i,
"bad string literal hex escape")
value = value * 16 + k
count = count + 1
uch = unichr(value)
return j, uch
def UEscape(self, argstr, i, startline):
stringType = type('')
j = i
count = 0
value = '\\U'
while count < 8: # Get 8 more characters
ch = argstr[j:j+1].lower()
# sbp http://ilrt.org/discovery/chatlogs/rdfig/2002-07-05
j = j + 1
if ch == "":
raise BadSyntax(self._thisDoc, startline, argstr, i,
"unterminated string literal(3)")
k = "0123456789abcdef".find(ch)
if k < 0:
raise BadSyntax(self._thisDoc, startline, argstr, i,
"bad string literal hex escape")
value = value + ch
count = count + 1
uch = stringType(value).decode('unicode-escape')
return j, uch
wide_build = True
try:
unichr(0x10000)
except ValueError:
wide_build = False
# If we are going to do operators then they should generate
# [ is operator:plus of ( \1 \2 ) ]
class BadSyntax(SyntaxError):
def __init__(self, uri, lines, argstr, i, why):
self._str = argstr.encode('utf-8') # Better go back to strings for errors
self._i = i
self._why = why
self.lines = lines
self._uri = uri
def __str__(self):
argstr = self._str
i = self._i
st = 0
if i > 60:
pre = "..."
st = i - 60
else:
pre = ""
if len(argstr)-i > 60:
post = "..."
else:
post = ""
return 'at line %i of <%s>:\nBad syntax (%s) at ^ in:\n"%s%s^%s%s"' \
% (self.lines +1, self._uri, self._why, pre,
argstr[st:i], argstr[i:i+60], post)
def stripCR(argstr):
res = ""
for ch in argstr:
if ch != "\r":
res = res + ch
return res
def dummyWrite(x):
pass
################################################################################
def toBool(s):
if s == 'true' or s == 'True' or s == '1':
return True
if s == 'false' or s == 'False' or s == '0':
return False
raise ValueError(s)
class Formula(object):
number = 0
def __init__(self, parent):
self.counter = 0
Formula.number += 1
self.number = Formula.number
self.existentials = {}
self.universals = {}
self.quotedgraph = QuotedGraph(
store=parent.store, identifier=self.id())
def __str__(self):
return '_:Formula%s' % self.number
def id(self):
return BNode('_:Formula%s' % self.number)
def newBlankNode(self, uri=None, why=None):
if uri is None:
self.counter += 1
bn = BNode('f%sb%s' % (id(self), self.counter))
else:
bn = BNode(uri.split('#').pop().replace('_', 'b'))
return bn
def newUniversal(self, uri, why=None):
return Variable(uri.split('#').pop())
def declareExistential(self, x):
self.existentials[x] = self.newBlankNode()
def close(self):
return self.quotedgraph
r_hibyte = re.compile(r'([\x80-\xff])')
def iri(uri):
return uri.decode('utf-8')
# return unicode(r_hibyte.sub(lambda m: '%%%02X' % ord(m.group(1)), uri))
class RDFSink(object):
def __init__(self, graph):
self.rootFormula = None
self.counter = 0
self.graph = graph
def newFormula(self):
assert self.graph.store.formula_aware
f = Formula(self.graph)
return f
def newSymbol(self, *args):
uri = args[0].encode('utf-8')
return URIRef(iri(uri))
def newBlankNode(self, arg=None, **kargs):
if isinstance(arg, Formula):
return arg.newBlankNode()
elif arg is None:
self.counter += 1
bn = BNode('n' + str(self.counter))
else:
bn = BNode(str(arg[0]).split('#').pop().replace('_', 'b'))
return bn
def newLiteral(self, s, dt, lang):
if dt:
return Literal(s, datatype=dt)
else:
return Literal(s, lang=lang)
def newList(self, n, f):
if not n:
return self.newSymbol(
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil'
)
a = self.newBlankNode(f)
first = self.newSymbol(
'http://www.w3.org/1999/02/22-rdf-syntax-ns#first'
)
rest = self.newSymbol('http://www.w3.org/1999/02/22-rdf-syntax-ns#rest')
self.makeStatement((f, first, a, n[0]))
self.makeStatement((f, rest, a, self.newList(n[1:], f)))
return a
def newSet(self, *args):
return set(args)
def setDefaultNamespace(self, *args):
return ':'.join(repr(n) for n in args)
def makeStatement(self, quadruple, why=None):
f, p, s, o = quadruple
if hasattr(p, 'formula'):
raise Exception("Formula used as predicate")
s = self.normalise(f, s)
p = self.normalise(f, p)
o = self.normalise(f, o)
if f == self.rootFormula:
# print s, p, o, '.'
self.graph.add((s, p, o))
else:
f.quotedgraph.add((s, p, o))
#return str(quadruple)
def normalise(self, f, n):
if isinstance(n, tuple):
return URIRef(unicode(n[1]))
# if isinstance(n, list):
# rdflist, f = n
# name = self.newBlankNode()
# if f == self.rootFormula:
# sublist = name
# for i in xrange(0, len(rdflist) - 1):
# print sublist, 'first', rdflist[i]
# rest = self.newBlankNode()
# print sublist, 'rest', rest
# sublist = rest
# print sublist, 'first', rdflist[-1]
# print sublist, 'rest', 'nil'
# return name
if isinstance(n, bool):
s = Literal(str(n).lower(), datatype=BOOLEAN_DATATYPE)
return s
if isinstance(n, int) or isinstance(n, long):
s = Literal(unicode(n), datatype=INTEGER_DATATYPE)
return s
if isinstance(n, Decimal):
value = str(n.normalize())
if value == '-0':
value = '0'
s = Literal(value, datatype=DECIMAL_DATATYPE )
return s
if isinstance(n, float):
s = Literal(str(n), datatype=DOUBLE_DATATYPE )
return s
if f.existentials.has_key(n):
return f.existentials[n]
# if isinstance(n, Var):
# if f.universals.has_key(n):
# return f.universals[n]
# f.universals[n] = f.newBlankNode()
# return f.universals[n]
return n
def intern(self, something):
return something
def bind(self, pfx, uri):
pass # print pfx, ':', uri
def startDoc(self, formula):
self.rootFormula = formula
def endDoc(self, formula):
pass
###################################################
#
# Utilities
#
Escapes = {'a': '\a',
'b': '\b',
'f': '\f',
'r': '\r',
't': '\t',
'v': '\v',
'n': '\n',
'\\': '\\',
'"': '"'}
forbidden1 = re.compile(ur'[\\\"\a\b\f\r\v\u0080-\U0000ffff]')
forbidden2 = re.compile(ur'[\\\"\a\b\f\r\v\t\n\u0080-\U0000ffff]')
#"
def stringToN3(argstr, singleLine=0, flags=""):
res = ''
if (len(argstr) > 20 and argstr[-1] != '"' \
and not singleLine and (argstr.find("\n") >= 0 \
or argstr.find('"') >= 0)):
delim = '"""'
forbidden = forbidden1 # (allow tabs too now)
else:
delim = '"'
forbidden = forbidden2
i = 0
while i < len(argstr):
m = forbidden.search(argstr, i)
if not m:
break
j = m.start()
res = res + argstr[i:j]
ch = m.group(0)
if ch == '"' and delim == '"""' and argstr[j:j+3] != '"""': #"
res = res + ch
else:
k = '\a\b\f\r\t\v\n\\"'.find(ch)
if k >= 0:
res = res + "\\" + 'abfrtvn\\"'[k]
else:
if 'e' in flags:
# res = res + ('\\u%04x' % ord(ch))
res = res + ('\\u%04X' % ord(ch))
# http://www.w3.org/TR/rdf-testcases/#ntriples
else:
res = res + ch
i = j + 1
# The following code fixes things for really high range Unicode
newstr = ""
for ch in res + argstr[i:]:
if ord(ch)>65535:
newstr = newstr + ('\\U%08X' % ord(ch))
# http://www.w3.org/TR/rdf-testcases/#ntriples
else:
newstr = newstr + ch
return delim + newstr + delim
def backslashUify(ustr):
"""Use URL encoding to return an ASCII string corresponding
to the given unicode"""
# progress("String is "+`ustr`)
# s1=ustr.encode('utf-8')
s = ""
for ch in ustr: # .encode('utf-8'):
if ord(ch) > 65535:
ch = "\\U%08X" % ord(ch)
elif ord(ch) > 126:
ch = "\\u%04X" % ord(ch)
else:
ch = "%c" % ord(ch)
s = s + ch
return b(s)
@py3compat.format_doctest_out
def hexify(ustr):
"""Use URL encoding to return an ASCII string
corresponding to the given UTF8 string
>>> hexify("http://example/a b")
%(b)s'http://example/a%%20b'
"""
# progress("String is "+`ustr`)
# s1=ustr.encode('utf-8')
s = ""
for ch in ustr: # .encode('utf-8'):
if ord(ch) > 126 or ord(ch) < 33 :
ch = "%%%02X" % ord(ch)
else:
ch = "%c" % ord(ch)
s = s + ch
return b(s)
# # Unused, dysfunctional.
# def dummy():
# res = ""
# if len(argstr) > 20 and (argstr.find("\n") >=0 or argstr.find('"') >=0):
# delim= '"""'
# forbidden = "\\\"\a\b\f\r\v" # (allow tabs too now)
# else:
# delim = '"'
# forbidden = "\\\"\a\b\f\r\v\t\n"
# for i in range(len(argstr)):
# ch = argstr[i]
# j = forbidden.find(ch)
# if ch == '"' and delim == '"""' \
# and i+1 < len(argstr) and argstr[i+1] != '"':
# j=-1 # Single quotes don't need escaping in long format
# if j >= 0:
# ch = "\\" + '\\"abfrvtn'[j]
# elif ch not in "\n\t" and (ch < " " or ch > "}"):
# ch = "[[" + `ch` + "]]" #[2:-1] # Use python
# res = res + ch
# return delim + res + delim
class N3Parser(Parser):
def __init__(self):
pass
def parse(self, source, graph, encoding="utf-8"):
# we're currently being handed a Graph, not a ConjunctiveGraph
assert graph.store.context_aware # is this implied by formula_aware
assert graph.store.formula_aware
if encoding not in [None, "utf-8"]:
raise Exception("N3 files are always utf-8 encoded, I was passed: %s"%encoding)
conj_graph = ConjunctiveGraph(store=graph.store)
conj_graph.default_context = graph # TODO: CG __init__ should have a default_context arg
# TODO: update N3Processor so that it can use conj_graph as the sink
conj_graph.namespace_manager = graph.namespace_manager
sink = RDFSink(conj_graph)
baseURI = graph.absolutize(source.getPublicId() or source.getSystemId() or "")
p = SinkParser(sink, baseURI=baseURI)
p.loadStream(source.getByteStream())
for prefix, namespace in p._bindings.items():
conj_graph.bind(prefix, namespace)
def _test():
import doctest
doctest.testmod()
# if __name__ == '__main__':
# _test()
def main():
g = ConjunctiveGraph()
sink = RDFSink(g)
base_uri = 'file://' + os.path.join(os.getcwd(), sys.argv[1])
p = SinkParser(sink, baseURI=base_uri)
p._bindings[''] = p._baseURI + '#'
p.startDoc()
f = open(sys.argv[1], 'rb')
rdbytes = f.read()
f.close()
p.feed(rdbytes)
p.endDoc()
for t in g.quads((None, None, None)):
print t
if __name__ == '__main__':
main()
#ends
| bsd-3-clause | -6,180,058,760,584,588,000 | 31.715589 | 290 | 0.48554 | false |
ieugen/Teachingbox | usercontrib/crawler3D/python/pyui/grid.py | 1 | 8378 | # PyUI
# Copyright (C) 2001-2002 Sean C. Riley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""A scrollable grid class for PyUI. The elements in the grid are PyUI widgets.
"""
import pyui
import copy
from pyui.desktop import getDesktop, getTheme, getRenderer
class GridPanel(pyui.widgets.Panel):
"""A scrollable grid class. I have a grid of cells of which only some will
be visible at any time.
"""
def __init__(self, visibleWidth, visibleHeight, useColumnHeaders = 1, useRowHeaders = 1):
self.vWidth = visibleWidth
self.vHeight = visibleHeight
self.scrollPos = 0
pyui.widgets.Panel.__init__(self)
self.setLayout(pyui.layouts.BorderLayoutManager())
self.cheader = ColumnHeaders(visibleWidth)
self.rheader = RowHeaders(visibleHeight)
self.scrollBar = pyui.widgets.VScroll()
self.scrollBar.resize(10, 50)
self.cellPanel = CellPanel(visibleWidth, visibleHeight)
if useColumnHeaders:
self.addChild(self.cheader, pyui.layouts.BorderLayoutManager.NORTH)
if useRowHeaders:
self.addChild(self.rheader, pyui.layouts.BorderLayoutManager.WEST)
self.addChild(self.cellPanel, pyui.layouts.BorderLayoutManager.CENTER)
self.addChild(self.scrollBar, pyui.layouts.BorderLayoutManager.EAST)
self.pack()
def resize(self, w, h):
print "Resizing GridPanel", w, h
pyui.widgets.Panel.resize(self, w, h)
def setColumnName(self, columnNum, name):
self.cheader.setColumnName(columnNum, name)
def setRowName(self, rowNum, name):
self.rheader.setRowName(rowNum, name)
def getCellAt(self, x, y):
"""return a cell at the co-ordinates.
"""
return self.cellPanel.getCellAt(x, y)
def putCellAt(self, widget, x, y):
"""put a widget into the grid at the co-ordinates.
"""
return self.cellPanel.putCellAt(widget, x, y)
def removeCellAt(self, x, y):
"""remove a widget from the grid
"""
return self.cellPanel.removeCellAt(x, y)
def findCellAt(self, posX, posY):
"""Find the cell at the x,y pixel position. Pass-through to the inner grid panel.
"""
return self.cellPanel.findCellAt(posX, posY)
def findCoordinatesAt(self, posX, posY):
"""convert screen co-ordinates into grid co-ordinates.
"""
return self.cellPanel.findCoordinatesAt(posX, posY)
def clear(self):
return self.cellPanel.clear()
class CellPanel(pyui.widgets.Panel):
"""The inner cell grid of a GridPanel.
"""
def __init__(self, vWidth, vHeight):
pyui.widgets.Panel.__init__(self)
self.vWidth = float(vWidth)
self.vHeight = float(vHeight)
self.cells = {}
self.scrollPos = 0
self.cellWidth = 1
self.cellHeight = 1
self.numRows = vHeight
self.registerEvent(pyui.locals.SCROLLPOS, self.onScroll)
def resize(self, width, height):
pyui.widgets.Panel.resize(self, width, height)
self.cellWidth = self.windowRect[2] / self.vWidth
self.cellHeight = self.windowRect[3] / self.vHeight
self.setupAllCells()
def setupAllCells(self):
for key in self.cells.keys():
if key[1] >= self.scrollPos and key[1] < self.scrollPos + self.vHeight:
self.setupCell( self.cells[key], key[0], key[1])
self.cells[key].setShow(1)
else:
self.cells[key].setShow(0)
def getCellAt(self, x, y):
return self.cells.get( (x,y), None)
def removeCellAt(self, x, y):
cell = self.cells.get( (x,y), None)
if cell:
cell.destroy()
self.children.remove(cell)
del self.cells[ (x,y) ]
self.setDirty(1)
def clear(self):
tmp = copy.copy(self.children)
for cell in tmp:
self.removeCellAt( cell.gridPosition[0], cell.gridPosition[1] )
def putCellAt(self, widget, x, y):
if self.cells.has_key( (x,y) ):
print "Error: already a widget at (%s,%s)" % (x,y)
return 0
self.addChild(widget)
self.cells[ (x,y) ] = widget
self.setupCell(widget, x, y)
if y > self.numRows:
self.numRows = y + 1
self.parent.scrollBar.setNumItems(y+1, self.vHeight)
return 1
def setupCell(self, widget, x, y):
"""this moves and positions the cell. it also sets "gridPosition" so the cell
knows where in the grid it lives.
"""
if y >= self.scrollPos and y < self.scrollPos + self.vHeight:
widget.setShow(1)
else:
widget.setShow(0)
#print "setup cell", x, y
widget.gridPosition = (x,y)
widget.moveto( self.cellWidth * x + 2,
self.cellHeight * (y-self.scrollPos) + 2)
widget.resize( self.cellWidth -4, self.cellHeight -4)
def onScroll(self, event):
if event.id == self.parent.scrollBar.id:
self.scrollPos = event.pos
self.setupAllCells()
self.setDirty(1)
self.window.setDirty(1)
return 1
return 0
def findCellAt(self, posX, posY):
"""find the cell at x,y
"""
x = int((posX - self.rect[0]) / self.cellWidth)
y = int((posY - self.rect[1]) / self.cellHeight) + self.scrollPos
return self.cells.get( (x,y), None)
def findCoordinatesAt(self, posX, posY):
x = int((posX - self.rect[0]) / self.cellWidth)
y = int((posY - self.rect[1]) / self.cellHeight) + self.scrollPos
return (x,y)
def draw(self, renderer):
"""only draw the visible widgets.
"""
for key in self.cells.keys():
if key[1] >= self.scrollPos and key[1] < self.scrollPos + self.vHeight:
self.cells[key].draw(renderer)
xpos = self.windowRect[0]
ypos = self.windowRect[1]
w = self.windowRect[2]
h = self.windowRect[3]
cellw = w / self.vWidth
cellh = h / self.vHeight
for x in range(0, self.vWidth+1):
renderer.drawLine(xpos + x * cellw, ypos, xpos + x * cellw, ypos + h, pyui.colors.white)
for y in range(0,self.vHeight):
renderer.drawLine(xpos, ypos + y * cellh, xpos + w, ypos + y * cellh, pyui.colors.white)
class ColumnHeaders(pyui.widgets.Panel):
"""The column headers for the GridPanel.
"""
def __init__(self, numColumns):
pyui.widgets.Panel.__init__(self)
self.setLayout(pyui.layouts.TableLayoutManager(numColumns, 1))
for i in range(0, numColumns):
self.addChild( pyui.widgets.Button("---"), (i, 0, 1, 1) )
self.resize(self.rect[2], 22)
def setColumnName(self, columnNum, name):
self.children[columnNum].setText(name)
class RowHeaders(pyui.widgets.Panel):
"""The row headers for the GridPanel.
"""
def __init__(self, numRows):
pyui.widgets.Panel.__init__(self)
self.setLayout(pyui.layouts.TableLayoutManager(1, numRows) )
for i in range(0, numRows):
self.addChild( pyui.widgets.Button("%d" % i), (0, i, 1, 1) )
self.resize(22, self.rect[3])
def setRowName(self, rowNum, name):
self.children[rowNum].setText(name)
| gpl-3.0 | -7,408,309,931,645,807,000 | 34.907489 | 100 | 0.584149 | false |
mridang/django-eggnog | eggnog/management/commands/checkupdates.py | 1 | 2002 | from threading import Thread
from pkg_resources import *
from django.core.management.base import BaseCommand, CommandError
from yolk.setuptools_support import get_pkglist
from yolk.yolklib import get_highest_version, Distributions
from yolk.pypi import CheeseShop
from eggnog.models import Update
class Command(BaseCommand):
"""
Custom management command for checking for package updates.
"""
help = 'Checks for package updates from PyPi'
threads = []
dists = Distributions()
pypi = CheeseShop()
def __init__(self, *args, **kwargs):
"""
Initializer for the management commands to flush stale data.
"""
super(Command, self).__init__(*args, **kwargs)
Update.objects.all().delete()
def handle(self, *args, **options):
"""
Main management command method that starts the checking process.
"""
print "Checking for updates from PyPi"
for pkg in get_pkglist():
for (dist, active) in self.dists.get_distributions("all", pkg, self.dists.get_highest_installed(pkg)):
thread = Thread(target=self.__check_pypi, args=(dist.project_name, dist.version))
self.threads.append(thread)
thread.start()
for thread in self.threads:
thread.join()
def __check_pypi(self, name, current):
"""
Queries PyPi for updates
"""
(package, versions) = self.pypi.query_versions_pypi(name)
if versions:
newest = get_highest_version(versions)
if newest != current:
if parse_version(current) < parse_version(newest):
print " * Updates for %s are available. You have %s and the latest is %s." % (package, current, newest)
else:
print " * No updates are available for %s." % (package)
Update.objects.create(package=package, installed=current, available=newest)
| bsd-3-clause | -1,709,283,128,330,770,400 | 32.366667 | 123 | 0.608891 | false |
IdahoDataEngineers/vcardz | vcardz/data.py | 1 | 2577 | #
# Kontexa vCard data structure and processing
#
from email.utils import parseaddr
import re
from six.moves.urllib.parse import urlparse
from .atom import Atom
from .bag import Bag
from .utils import new_id
REX_BEGIN = "^BEGIN:VCARD"
REX_END = "END:VCARD$"
REX_PHONE_NUMBERS = "\+?1? *\(?([0-9]{3})\)?[-. ]?([0-9]{3})[-. ]?([0-9]{4})(?:[,x ]*)([0-9]*)" # noqa
REX_EMAIL = "[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?" # noqa
class FormattedName(Atom):
pass
class Name(Bag):
pass
class Nickname(Atom):
pass
class Photo(Atom):
pass
class Birthday(Atom):
pass
class Email(Atom):
user = ""
domain = ""
def __init__(self, data):
Atom.__init__(self, data)
try:
self.value = self.value.lower()
# temp = re.match(Parser.REX_EMAIL, self.value)
# if not temp:
# self.tag = None
# self.value = None
# return
self.value = parseaddr(self.value)[1].lower()
frags = self.value.split('@')
self.user = frags[0]
self.domain = frags[1]
except IndexError:
pass
class Phone(Atom):
def __init__(self, data):
temp = re.sub('[^0-9]', '', data)
if not temp:
raise ValueError
Atom.__init__(self, data)
match = re.match(REX_PHONE_NUMBERS, self.value)
if None != match:
phone = match.group(1) + "-" + \
match.group(2) + "-" + \
match.group(3)
if "" != match.group(4):
phone += " x" + match.group(4)
self.value = phone
class Address(Bag):
pass
class Label(Bag):
pass
class Organization(Atom):
pass
class Role(Atom):
def __init__(self, data):
Atom.__init__(self, data)
if "- - -" == self.value:
self.tag = None
self.value = None
class Title(Atom):
pass
class Categories(Bag):
pass
class Note(Atom):
pass
class ProdID(Atom):
pass
class Rev(Atom):
pass
class SortString(Atom):
pass
class Url(Atom):
def __init__(self, data):
Atom.__init__(self, data)
o = urlparse(self.value)
if '' == o.scheme:
self.value = 'http://' + self.value
self.value = self.value.replace('http\://', '')
class Mailer(Atom):
pass
class Uid(Atom):
@staticmethod
def create():
return Uid("UID:kontexa;%s" % new_id())
| gpl-2.0 | 9,038,364,064,295,289,000 | 17.810219 | 155 | 0.503686 | false |
aerkalov/Booktype | lib/booki/editor/management/commands/bookrename.py | 1 | 3217 | # This file is part of Booktype.
# Copyright (c) 2012 Aleksandar Erkalovic <[email protected]>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from django.contrib.auth.models import User
from booki.editor import common
from booki.editor import models
from django.conf import settings
class Command(BaseCommand):
args = "<book name>"
help = "Rename book."
option_list = BaseCommand.option_list + (
make_option('--owner',
action='store',
dest='owner',
default=None,
help='Set new owner of the book.'),
make_option('--new-book-title',
action='store',
dest='new_book_title',
default=None,
help='Set new book title.'),
make_option('--new-book-url',
action='store',
dest='new_book_url',
default=None,
help='Set new book url name.'),
)
requires_model_validation = False
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("You must specify book name.")
try:
book = models.Book.objects.get(url_title__iexact=args[0])
except models.Book.DoesNotExist:
raise CommandError('Book "%s" does not exist.' % args[0])
if options['new_book_title']:
book.title = options['new_book_title']
if options['new_book_url']:
import os
os.rename('%s/books/%s' % (settings.DATA_ROOT, book.url_title), '%s/books/%s' % (settings.DATA_ROOT, options['new_book_url']))
book.url_title = options['new_book_url']
# TODO: test this
n = len(settings.DATA_ROOT)+len('books/')+1
for attachment in models.Attachment.objects.filter(version__book=book):
name = attachment.attachment.name
j = name[n:].find('/')
newName = '%s/books/%s%s' % (settings.DATA_ROOT, book.url_title, name[n:][j:])
attachment.attachment.name = newName
attachment.save()
if options['owner']:
try:
user = User.objects.get(username=options['owner'])
except User.DoesNotExist:
raise CommandError('User "%s" does not exist. Can not finish import.' % options['owner'])
book.owner = user
book.save()
| agpl-3.0 | 7,888,365,485,220,761,000 | 34.744444 | 138 | 0.588126 | false |
Subsets and Splits