repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
hkernbach/arangodb | utils/generateMimetypes.py | 7 | 5690 | import csv, sys, os.path, re
# wrap text after x characters
def wrap(string, width=80, ind1=0, ind2=0, prefix=''):
string = prefix + ind1 * " " + string
newstring = ""
string = string.replace("\n", " ")
while len(string) > width:
marker = width - 1
while not string[marker].isspace():
marker = marker - 1
newline = string[0:marker] + "\n"
newstring = newstring + newline
string = prefix + ind2 * " " + string[marker + 1:]
return newstring + string
# generate javascript file from mimetypes
def genJsFile(types):
jslint = "/*jslint indent: 2,\n"\
" nomen: true,\n"\
" maxlen: 100,\n"\
" sloppy: true,\n"\
" vars: true,\n"\
" white: true,\n"\
" plusplus: true */\n"\
"/*global exports */\n\n"
out = jslint \
+ prologue\
+ "exports.mimeTypes = {\n"
extensions = { }
# print individual mimetypes
i = 0
for t in types:
extension = t[0]
mimetype = t[1]
out = out + " \"" + extension + "\": [ \"" + mimetype + "\", " + t[2] + " ]"
if not mimetype in extensions:
extensions[mimetype] = [ ]
extensions[mimetype].append(extension)
i = i + 1
if i < len(types):
out = out + ", \n"
else:
out = out + "\n"
out = out + "};\n\n"
# print extensions
out = out + "exports.extensions = {\n"
i = 0
for e in extensions:
out = out + " \"" + e + "\": [ \"" + "\", \"".join(extensions[e]) + "\" ]"
i = i + 1
if i < len(extensions):
out = out + ", \n"
else:
out = out + "\n"
out = out + "};\n\n"
return out
# generate C header file from errors
def genCHeaderFile(types):
header = "\n"\
+ "#ifndef LIB_BASICS_VOC_MIMETYPES_H\n"\
+ "#define LIB_BASICS_VOC_MIMETYPES_H 1\n"\
+ "\n"\
+ "////////////////////////////////////////////////////////////////////////////////\n"\
+ "/// @brief initialize mimetypes\n"\
+ "////////////////////////////////////////////////////////////////////////////////\n"\
+ "\n"\
+ "void TRI_InitializeEntriesMimetypes();\n"\
+ "\n"\
+ "#endif\n"
return header
# generate C implementation file from mimetypes
def genCFile(types, filename):
headerfile = os.path.splitext(filename)[0] + ".h"
impl = prologue\
+ "#include \"Basics/Common.h\"\n\n"\
+ "#include \"Basics/mimetypes.h\"\n"\
+ "#include \"" + headerfile + "\"\n"\
+ "\n"\
+ "////////////////////////////////////////////////////////////////////////////////\n"\
+ "/// @brief initialize mimetypes\n"\
+ "////////////////////////////////////////////////////////////////////////////////\n"\
+ "\n"\
+ "void TRI_InitializeEntriesMimetypes() {\n"
# print individual types
for t in types:
impl = impl + " TRI_RegisterMimetype(\"" + t[0] + "\", \"" + t[1] + "\", " + t[2] + ");\n"
impl = impl\
+ "}\n"
return impl
# define some globals
prologue = "////////////////////////////////////////////////////////////////////////////////\n"\
+ "/// AUTO-GENERATED FILE GENERATED FROM mimetypes.dat\n"\
+ "////////////////////////////////////////////////////////////////////////////////\n"\
+ "\n"\
+ "////////////////////////////////////////////////////////////////////////////////\n"\
+ "/// DISCLAIMER\n"\
+ "///\n"\
+ "/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany\n"\
+ "/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany\n"\
+ "///\n"\
+ "/// Licensed under the Apache License, Version 2.0 (the \"License\");\n"\
+ "/// you may not use this file except in compliance with the License.\n"\
+ "/// You may obtain a copy of the License at\n"\
+ "///\n"\
+ "/// http://www.apache.org/licenses/LICENSE-2.0\n"\
+ "///\n"\
+ "/// Unless required by applicable law or agreed to in writing, software\n"\
+ "/// distributed under the License is distributed on an \"AS IS\" BASIS,\n"\
+ "/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n"\
+ "/// See the License for the specific language governing permissions and\n"\
+ "/// limitations under the License.\n"\
+ "///\n"\
+ "/// Copyright holder is ArangoDB GmbH, Cologne, Germany\n"\
+ "////////////////////////////////////////////////////////////////////////////////\n"\
if len(sys.argv) < 3:
print >> sys.stderr, "usage: %s <sourcefile> <outfile>" % sys.argv[0]
sys.exit()
source = sys.argv[1]
# read input file
mimetypes = csv.reader(open(source, "rb"))
types = []
r1 = re.compile(r'^#.*')
for t in mimetypes:
if len(t) == 0:
continue
if r1.match(t[0]):
continue
t[2] = t[2].strip()
if t[0] == "" or t[1] == "" or not (t[2] == "true" or t[2] == "false"):
print >> sys.stderr, "invalid mimetypes declaration file: %s" % (source)
sys.exit()
types.append(t)
outfile = sys.argv[2]
extension = os.path.splitext(outfile)[1]
filename = outfile
if extension == ".tmp":
filename = os.path.splitext(outfile)[0]
extension = os.path.splitext(filename)[1]
if extension == ".js":
out = genJsFile(types)
elif extension == ".h":
out = genCHeaderFile(types)
elif extension == ".cpp":
out = genCFile(types, filename)
else:
print >> sys.stderr, "usage: %s <sourcefile> <outfile>" % sys.argv[0]
sys.exit()
outFile = open(outfile, "wb")
outFile.write(out);
outFile.close()
| apache-2.0 | 6,517,194,593,476,660,000 | 28.790576 | 98 | 0.462566 | false |
CiscoSystems/nova | nova/tests/console/test_rpcapi.py | 32 | 2839 | # Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.console.rpcapi
"""
import contextlib
import mock
from oslo.config import cfg
from nova.console import rpcapi as console_rpcapi
from nova import context
from nova import test
CONF = cfg.CONF
class ConsoleRpcAPITestCase(test.NoDBTestCase):
def _test_console_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = console_rpcapi.ConsoleAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.console_topic)
orig_prepare = rpcapi.client.prepare
expected_version = kwargs.pop('version', rpcapi.client.target.version)
with contextlib.nested(
mock.patch.object(rpcapi.client, rpc_method),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
prepare_mock.return_value = rpcapi.client
rpc_mock.return_value = 'foo' if rpc_method == 'call' else None
csv_mock.side_effect = (
lambda v: orig_prepare(version=v).can_send_version())
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with(version=expected_version)
rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
def test_add_console(self):
self._test_console_api('add_console', instance_id='i',
rpc_method='cast')
# NOTE(russellb) Havana compat
self.flags(console='havana', group='upgrade_levels')
self._test_console_api('add_console', instance_id='i',
rpc_method='cast', version='1.0')
def test_remove_console(self):
self._test_console_api('remove_console', console_id='i',
rpc_method='cast')
# NOTE(russellb) Havana compat
self.flags(console='havana', group='upgrade_levels')
self._test_console_api('remove_console', console_id='i',
rpc_method='cast', version='1.0')
| apache-2.0 | 4,160,911,469,931,397,000 | 36.355263 | 78 | 0.634731 | false |
pongad/api-client-staging | generated/python/proto-google-cloud-logging-v2/google/cloud/proto/logging/v2/logging_config_pb2_grpc.py | 7 | 6663 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import google.cloud.proto.logging.v2.logging_config_pb2 as google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2
import google.protobuf.empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ConfigServiceV2Stub(object):
"""Service for configuring sinks used to export log entries outside of
Stackdriver Logging.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListSinks = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/ListSinks',
request_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.ListSinksRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.ListSinksResponse.FromString,
)
self.GetSink = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/GetSink',
request_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.GetSinkRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.LogSink.FromString,
)
self.CreateSink = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/CreateSink',
request_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.CreateSinkRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.LogSink.FromString,
)
self.UpdateSink = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/UpdateSink',
request_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.UpdateSinkRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.LogSink.FromString,
)
self.DeleteSink = channel.unary_unary(
'/google.logging.v2.ConfigServiceV2/DeleteSink',
request_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.DeleteSinkRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class ConfigServiceV2Servicer(object):
"""Service for configuring sinks used to export log entries outside of
Stackdriver Logging.
"""
def ListSinks(self, request, context):
"""Lists sinks.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSink(self, request, context):
"""Gets a sink.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateSink(self, request, context):
"""Creates a sink that exports specified log entries to a destination. The
export of newly-ingested log entries begins immediately, unless the current
time is outside the sink's start and end times or the sink's
`writer_identity` is not permitted to write to the destination. A sink can
export log entries only from the resource owning the sink.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateSink(self, request, context):
"""Updates a sink. If the named sink doesn't exist, then this method is
identical to
[sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create).
If the named sink does exist, then this method replaces the following
fields in the existing sink with values from the new sink: `destination`,
`filter`, `output_version_format`, `start_time`, and `end_time`.
The updated filter might also have a new `writer_identity`; see the
`unique_writer_identity` field.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteSink(self, request, context):
"""Deletes a sink. If the sink has a unique `writer_identity`, then that
service account is also deleted.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ConfigServiceV2Servicer_to_server(servicer, server):
rpc_method_handlers = {
'ListSinks': grpc.unary_unary_rpc_method_handler(
servicer.ListSinks,
request_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.ListSinksRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.ListSinksResponse.SerializeToString,
),
'GetSink': grpc.unary_unary_rpc_method_handler(
servicer.GetSink,
request_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.GetSinkRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.LogSink.SerializeToString,
),
'CreateSink': grpc.unary_unary_rpc_method_handler(
servicer.CreateSink,
request_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.CreateSinkRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.LogSink.SerializeToString,
),
'UpdateSink': grpc.unary_unary_rpc_method_handler(
servicer.UpdateSink,
request_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.UpdateSinkRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.LogSink.SerializeToString,
),
'DeleteSink': grpc.unary_unary_rpc_method_handler(
servicer.DeleteSink,
request_deserializer=google_dot_cloud_dot_proto_dot_logging_dot_v2_dot_logging__config__pb2.DeleteSinkRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.logging.v2.ConfigServiceV2', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| bsd-3-clause | -5,180,625,488,711,537,000 | 49.862595 | 137 | 0.731652 | false |
total-impact/total-impact-core | extras/db_housekeeping/postgres_mirror.py | 2 | 12478 | import couchdb, os, logging, sys, collections
from pprint import pprint
import time, datetime, json
import requests
from couch_paginator import CouchPaginator
from totalimpact import dao
import psycopg2
# run in heroku by a) commiting, b) pushing to heroku, and c) running
# heroku run python extras/db_housekeeping/postgres_mirror.py
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='[%(process)d] %(levelname)8s %(threadName)30s %(name)s - %(message)s'
)
logger = logging.getLogger("postgres_mirror")
def action_on_a_page_single_doc(page):
docs = [row.doc for row in page]
for doc in docs:
doc["tiid"] = doc["_id"]
try:
doc["last_update_run"]
except KeyError:
doc["last_update_run"] = None
print "try"
try:
print doc["tiid"]
cur.execute("""INSERT INTO items(tiid, created, last_modified, last_update_run)
VALUES (%(tiid)s, %(created)s, %(last_modified)s, %(last_update_run)s)""", doc)
#conn.commit()
except psycopg2.IntegrityError:
print "row already exists"
mypostgresdao.conn.rollback()
except:
mypostgresdao.conn.rollback()
finally:
pass
def build_items_save_list(items):
items_save_list = []
for item in items:
item["tiid"] = item["_id"]
try:
item["last_update_run"]
except KeyError:
item["last_update_run"] = None
items_save_list += [item]
return items_save_list
def build_metrics_save_list(items):
metrics_save_list = []
for item in items:
if "metrics" in item:
for full_metric_name in item["metrics"]:
for timestamp in item["metrics"][full_metric_name]["values"]["raw_history"]:
(provider, bare_metric_name) = full_metric_name.split(":")
metrics_save_list += [{"tiid":item["_id"],
"provider":provider,
"metric_name":bare_metric_name,
"collected_date":timestamp,
"drilldown_url":item["metrics"][full_metric_name]["provenance_url"],
"raw_value":item["metrics"][full_metric_name]["values"]["raw_history"][timestamp]
}]
return metrics_save_list
def build_aliases_save_list(items):
aliases_save_list = []
for item in items:
if "aliases" in item:
for namespace in item["aliases"]:
for nid in item["aliases"][namespace]:
aliases_save_list += [{"tiid":item["_id"],
"provider":"unknown",
"namespace":namespace,
"nid":nid,
"collected_date":now
}]
return aliases_save_list
class NoneDict(dict):
# returns None if key not defined instead of throwing KeyError
def __getitem__(self, key):
return dict.get(self, key)
def build_biblio_save_list(items):
biblio_save_list = []
for item in items:
if "biblio" in item:
biblio_save = NoneDict()
biblio_save.update(item["biblio"])
biblio_save["tiid"] = item["_id"]
biblio_save["collected_date"] = now
biblio_save["authors_lnames"] = None
if "owner" in biblio_save:
biblio_save["provider"] = "github"
biblio_save["host"] = "github"
else:
biblio_save["provider"] = "unknown"
if "year" in biblio_save:
biblio_save["year_published"] = int(biblio_save["year"])
if "owner" in biblio_save:
biblio_save["authors_raw"] = biblio_save["owner"]
if "create_date" in biblio_save:
biblio_save["date_published"] = biblio_save["create_date"]
if "journal" in biblio_save:
biblio_save["host"] = biblio_save["journal"]
biblio_save_list += [biblio_save]
return biblio_save_list
"""
CREATE TABLE items (
tiid text NOT NULL,
created timestamptz,
last_modified timestamptz,
last_update_run timestamptz,
PRIMARY KEY (tiid)
);
CREATE TABLE metrics (
tiid text NOT NULL,
provider text NOT NULL,
metric_name text NOT NULL,
collected_date timestamptz NOT NULL,
drilldown_url text,
raw_value text,
PRIMARY KEY (tiid,provider,metric_name,collected_date)
);
CREATE TABLE aliases (
tiid text NOT NULL,
"namespace" text NOT NULL,
nid text NOT NULL,
last_modified timestamptz,
PRIMARY KEY (tiid, "namespace", nid))
CREATE TABLE biblio (
tiid text NOT NULL,
provider text NOT NULL,
last_modified timestamptz,
title text,
year_published numeric(25),
date_published timestamptz,
authors_lnames text,
authors_raw text,
"host" text,
url text,
description text,
PRIMARY KEY (tiid, provider))
CREATE TABLE email (
id text NOT NULL,
created timestamptz,
payload text NOT NULL,
PRIMARY KEY (id))
"""
def insert_unless_error(select_statement, save_list):
print "try to insert"
#cur.executemany(select_statement, save_list)
for l in save_list:
print cur.mogrify(select_statement, l)
try:
#cur.execute(select_statement, l)
pass
except psycopg2.IntegrityError:
print "insert already exists"
def item_action_on_a_page(page):
items = [row.doc for row in page]
print "ITEMS"
print datetime.datetime.now().isoformat()
items_save_list = build_items_save_list(items)
print datetime.datetime.now().isoformat()
insert_unless_error("""INSERT INTO items(tiid, created, last_modified, last_update_run)
VALUES (%(tiid)s, %(created)s, %(last_modified)s, %(last_update_run)s);""",
items_save_list)
print "BIBLIO"
print datetime.datetime.now().isoformat()
biblio_save_list = build_biblio_save_list(items)
print datetime.datetime.now().isoformat()
insert_unless_error("""INSERT INTO biblio(tiid, provider, collected_date, title, year_published, date_published, authors_lnames, authors_raw, host, url, description)
VALUES (%(tiid)s, %(provider)s, %(collected_date)s, %(title)s, %(year_published)s, %(date_published)s, %(authors_lnames)s, %(authors_raw)s, %(host)s, %(url)s, %(description)s)""",
biblio_save_list)
print "ALIASES"
print datetime.datetime.now().isoformat()
aliases_save_list = build_aliases_save_list(items)
print datetime.datetime.now().isoformat()
insert_unless_error("""INSERT INTO aliases(tiid, namespace, nid, collected_date)
VALUES (%(tiid)s, %(namespace)s, %(nid)s, %(collected_date)s)""",
aliases_save_list)
print "METRICS"
print datetime.datetime.now().isoformat()
metrics_save_list = build_metrics_save_list(items)
print datetime.datetime.now().isoformat()
insert_unless_error("""INSERT INTO metrics(tiid, provider, metric_name, collected_date, drilldown_url, raw_value)
VALUES (%(tiid)s, %(provider)s, %(metric_name)s, %(collected_date)s, %(drilldown_url)s, %(raw_value)s)""",
metrics_save_list)
print "done"
def build_email_save_list(emails):
email_save_list = []
for email in emails:
print email["_id"]
email_save = {}
email_save["id"] = email["_id"]
email_save["created"] = email["created"]
email_save["payload"] = json.dumps(email["payload"])
email_save_list += [email_save]
return email_save_list
def email_action_on_a_page(page):
emails = [row.doc for row in page]
print "EMAILS"
print datetime.datetime.now().isoformat()
emails_save_list = build_email_save_list(emails)
print datetime.datetime.now().isoformat()
insert_unless_error("""INSERT INTO email(id, created, payload)
VALUES (%(id)s, %(created)s, %(payload)s);""",
emails_save_list)
print datetime.datetime.now().isoformat()
print "done"
def build_api_users_save_list(docs):
api_users_save_list = []
registered_items_save_list = []
for doc in docs:
print doc["_id"]
api_users_save = {}
api_users_save["api_key"] = doc["current_key"]
api_users_save["max_registered_items"] = doc["max_registered_items"]
api_users_save["created"] = doc["created"]
for key in doc["meta"]:
api_users_save[key] = doc["meta"][key]
api_users_save_list += [api_users_save]
for alias in doc["registered_items"]:
registered_items_save_list += [{
"api_key":api_users_save["api_key"],
"registered_date":doc["registered_items"][alias]["registered_date"],
"alias":alias}]
return (api_users_save_list, registered_items_save_list)
def insert_string(tablename, colnames):
colnames_string = ", ".join(colnames)
percent_colnames_string = ", ".join(["%("+col+")s" for col in colnames])
insert = "INSERT INTO {tablename} ({colnames_string}) VALUES ({percent_colnames_string});\t".format(
tablename=tablename,
colnames_string=colnames_string,
percent_colnames_string=percent_colnames_string)
return insert
def api_users_action_on_a_page(page):
docs = [row.doc for row in page]
print "API USERS"
print datetime.datetime.now().isoformat()
(api_users_save_list, registerted_items_save_list) = build_api_users_save_list(docs)
print datetime.datetime.now().isoformat()
colnames = "api_key, max_registered_items, created, planned_use, example_url, api_key_owner, notes, email, organization".split(", ")
insert_unless_error(insert_string("api_users", colnames), api_users_save_list)
colnames = "api_key, alias, registered_date".split(", ")
insert_unless_error(insert_string("registered_items", colnames), registerted_items_save_list)
print datetime.datetime.now().isoformat()
print "done"
def run_on_documents(func_page, view_name, start_key, end_key, row_count=0, page_size=500):
couch_page = CouchPaginator(db, view_name, page_size, start_key=start_key, end_key=end_key, include_docs=True)
while couch_page:
func_page(couch_page)
row_count += page_size
logger.info("%i. getting new page" %(row_count))
if couch_page.has_next:
couch_page = CouchPaginator(db, view_name, page_size, start_key=couch_page.next, end_key=end_key, include_docs=True)
else:
couch_page = None
print "number items = ", row_count
#run
# set up postgres
mypostgresdao = dao.PostgresDao(os.environ["POSTGRESQL_URL"])
cur = mypostgresdao.get_cursor()
logger.info("connected to postgres")
# set up couchdb
cloudant_db = os.getenv("CLOUDANT_DB")
cloudant_url = os.getenv("CLOUDANT_URL")
couch = couchdb.Server(url=cloudant_url)
db = couch[cloudant_db]
logger.info("connected to couch at " + cloudant_url + " / " + cloudant_db)
# do a few preventative checks
if (cloudant_db == "ti"):
print "\n\nTHIS MAY BE THE PRODUCTION DATABASE!!!"
else:
print "\n\nThis doesn't appear to be the production database\n\n"
confirm = None
#confirm = raw_input("\nType YES if you are sure you want to run this test:")
confirm = "YES"
if not confirm=="YES":
print "nevermind, then."
exit()
# set up the action code
#myview_name = "queues/by_alias"
#mystart_key = ["url", "https://github.0000000"]
#myend_key = ["url", "https://github.zzzzzzzz"]
myview_name = "by_type/by_type"
mystart_key = ["api_user"]
myend_key = ["api_user"]
now = datetime.datetime.now().isoformat()
run_on_documents(api_users_action_on_a_page,
view_name=myview_name,
start_key=mystart_key,
end_key=myend_key,
page_size=500)
cur.close()
mypostgresdao.close()
# try:
# cur.execute("CREATE TABLE phonebook(phone VARCHAR(32), firstname VARCHAR(32), lastname VARCHAR(32), address VARCHAR(64));")
# except psycopg2.ProgrammingError:
# print "table already exists"
# cur.execute("SELECT * FROM phonebook ORDER BY lastname;")
# print cur.fetchone()
| mit | 7,727,598,140,778,121,000 | 33.75766 | 204 | 0.603302 | false |
kambysese/mne-python | tutorials/time-freq/plot_ssvep.py | 10 | 27044 | """
.. _tut-ssvep:
==========================================================
Frequency-tagging: Basic analysis of an SSVEP/vSSR dataset
==========================================================
In this tutorial we compute the frequency spectrum and quantify signal-to-noise
ratio (SNR) at a target frequency in EEG data recorded during fast periodic
visual stimulation (FPVS) at 12 Hz and 15 Hz in different trials.
Extracting SNR at stimulation frequency is a simple way to quantify frequency
tagged responses in MEEG (a.k.a. steady state visually evoked potentials,
SSVEP, or visual steady-state responses, vSSR in the visual domain,
or auditory steady-state responses, ASSR in the auditory domain).
For a general introduction to the method see
`Norcia et al. (2015) <https://doi.org/10.1167/15.6.4>`_ for the visual domain,
and `Picton et al. (2003) <https://doi.org/10.3109/14992020309101316>`_ for
the auditory domain.
**Data and outline:**
We use a simple example dataset with frequency tagged visual stimulation:
N=2 participants observed checkerboard patterns inverting with a constant
frequency of either 12.0 Hz of 15.0 Hz.
32 channels wet EEG was recorded.
(see :ref:`ssvep-dataset` for more information).
We will visualize both the power-spectral density (PSD) and the SNR
spectrum of the epoched data,
extract SNR at stimulation frequency,
plot the topography of the response,
and statistically separate 12 Hz and 15 Hz responses in the different trials.
Since the evoked response is mainly generated in early visual areas of the
brain the statistical analysis will be carried out on an occipital
ROI.
.. contents:: Outline
:depth: 2
""" # noqa: E501
# Authors: Dominik Welke <[email protected]>
# Evgenii Kalenkovich <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
import numpy as np
from scipy.stats import ttest_rel
###############################################################################
# Data preprocessing
# ------------------
# Due to a generally high SNR in SSVEP/vSSR, typical preprocessing steps
# are considered optional. This doesn't mean, that a proper cleaning would not
# increase your signal quality!
#
# * Raw data have FCz reference, so we will apply common-average rereferencing.
#
# * We will apply a 0.1 highpass filter.
#
# * Lastly, we will cut the data in 20 s epochs corresponding to the trials.
#
#
# Load raw data
data_path = mne.datasets.ssvep.data_path()
bids_fname = data_path + '/sub-02/ses-01/eeg/sub-02_ses-01_task-ssvep_eeg.vhdr'
raw = mne.io.read_raw_brainvision(bids_fname, preload=True, verbose=False)
raw.info['line_freq'] = 50.
# Set montage
montage = mne.channels.make_standard_montage('easycap-M1')
raw.set_montage(montage, verbose=False)
# Set common average reference
raw.set_eeg_reference('average', projection=False, verbose=False)
# Apply bandpass filter
raw.filter(l_freq=0.1, h_freq=None, fir_design='firwin', verbose=False)
# Construct epochs
event_id = {
'12hz': 255,
'15hz': 155
}
events, _ = mne.events_from_annotations(raw, verbose=False)
raw.info["events"] = events
tmin, tmax = -1., 20. # in s
baseline = None
epochs = mne.Epochs(
raw, events=events,
event_id=[event_id['12hz'], event_id['15hz']], tmin=tmin,
tmax=tmax, baseline=baseline, verbose=False)
###############################################################################
# Frequency analysis
# ------------------
# Now we compute the frequency spectrum of the EEG data.
# You will already see the peaks at the stimulation frequencies and some of
# their harmonics, without any further processing.
#
# The 'classical' PSD plot will be compared to a plot of the SNR spectrum.
# SNR will be computed as a ratio of the power in a given frequency bin
# to the average power in its neighboring bins.
# This procedure has two advantages over using the raw PSD:
#
# * it normalizes the spectrum and accounts for 1/f power decay.
#
# * power modulations which are not very narrow band will disappear.
#
# Calculate power spectral density (PSD)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# The frequency spectrum will be computed using Fast Fourier transform (FFT).
# This seems to be common practice in the steady-state literature and is
# based on the exact knowledge of the stimulus and the assumed response -
# especially in terms of it's stability over time.
# For a discussion see e.g.
# `Bach & Meigen (1999) <https://doi.org/10.1023/A:1002648202420>`_
#
# We will exclude the first second of each trial from the analysis:
#
# * steady-state response often take a while to stabilize, and the
# transient phase in the beginning can distort the signal estimate.
#
# * this section of data is expected to be dominated by responses related to
# the stimulus onset, and we are not interested in this.
#
# In MNE we call plain FFT as a special case of Welch's method, with only a
# single Welch window spanning the entire trial and no specific windowing
# function (i.e. applying a boxcar window).
#
tmin = 1.
tmax = 20.
fmin = 1.
fmax = 90.
sfreq = epochs.info['sfreq']
psds, freqs = mne.time_frequency.psd_welch(
epochs,
n_fft=int(sfreq * (tmax - tmin)),
n_overlap=0, n_per_seg=None,
tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax,
window='boxcar',
verbose=False)
###############################################################################
# Calculate signal to noise ratio (SNR)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# SNR - as we define it here - is a measure of relative power:
# it's the ratio of power in a given frequency bin - the 'signal' -
# to a 'noise' baseline - the average power in the surrounding frequency bins.
# This approach was initially proposed by
# `Meigen & Bach (1999) <https://doi.org/10.1023/A:1002097208337>`_
#
# Hence, we need to set some parameters for this baseline - how many
# neighboring bins should be taken for this computation, and do we want to skip
# the direct neighbors (this can make sense if the stimulation frequency is not
# super constant, or frequency bands are very narrow).
#
# The function below does what we want.
#
def snr_spectrum(psd, noise_n_neighbor_freqs=1, noise_skip_neighbor_freqs=1):
"""Compute SNR spectrum from PSD spectrum using convolution.
Parameters
----------
psd : ndarray, shape ([n_trials, n_channels,] n_frequency_bins)
Data object containing PSD values. Works with arrays as produced by
MNE's PSD functions or channel/trial subsets.
noise_n_neighbor_freqs : int
Number of neighboring frequencies used to compute noise level.
increment by one to add one frequency bin ON BOTH SIDES
noise_skip_neighbor_freqs : int
set this >=1 if you want to exclude the immediately neighboring
frequency bins in noise level calculation
Returns
-------
snr : ndarray, shape ([n_trials, n_channels,] n_frequency_bins)
Array containing SNR for all epochs, channels, frequency bins.
NaN for frequencies on the edges, that do not have enough neighbors on
one side to calculate SNR.
"""
# Construct a kernel that calculates the mean of the neighboring
# frequencies
averaging_kernel = np.concatenate((
np.ones(noise_n_neighbor_freqs),
np.zeros(2 * noise_skip_neighbor_freqs + 1),
np.ones(noise_n_neighbor_freqs)))
averaging_kernel /= averaging_kernel.sum()
# Calculate the mean of the neighboring frequencies by convolving with the
# averaging kernel.
mean_noise = np.apply_along_axis(
lambda psd_: np.convolve(psd_, averaging_kernel, mode='valid'),
axis=-1, arr=psd
)
# The mean is not defined on the edges so we will pad it with nas. The
# padding needs to be done for the last dimension only so we set it to
# (0, 0) for the other ones.
edge_width = noise_n_neighbor_freqs + noise_skip_neighbor_freqs
pad_width = [(0, 0)] * (mean_noise.ndim - 1) + [(edge_width, edge_width)]
mean_noise = np.pad(
mean_noise, pad_width=pad_width, constant_values=np.nan
)
return psd / mean_noise
###############################################################################
# Now we call the function to compute our SNR spectrum.
#
# As described above, we have to define two parameters.
#
# * how many noise bins do we want?
#
# * do we want to skip the n bins directly next to the target bin?
#
#
# Tweaking these parameters *can* drastically impact the resulting spectrum,
# but mainly if you choose extremes.
# E.g. if you'd skip very many neighboring bins, broad band power modulations
# (such as the alpha peak) should reappear in the SNR spectrum.
# On the other hand, if you skip none you might miss or smear peaks if the
# induced power is distributed over two or more frequency bins (e.g. if the
# stimulation frequency isn't perfectly constant, or you have very narrow
# bins).
#
# Here, we want to compare power at each bin with average power of the
# **three neighboring bins** (on each side) and **skip one bin** directly next
# to it.
#
snrs = snr_spectrum(psds, noise_n_neighbor_freqs=3,
noise_skip_neighbor_freqs=1)
##############################################################################
# Plot PSD and SNR spectra
# ^^^^^^^^^^^^^^^^^^^^^^^^
# Now we will plot grand average PSD (in blue) and SNR (in red) ± sd
# for every frequency bin.
# PSD is plotted on a log scale.
#
fig, axes = plt.subplots(2, 1, sharex='all', sharey='none', figsize=(8, 5))
freq_range = range(np.where(np.floor(freqs) == 1.)[0][0],
np.where(np.ceil(freqs) == fmax - 1)[0][0])
psds_plot = 10 * np.log10(psds)
psds_mean = psds_plot.mean(axis=(0, 1))[freq_range]
psds_std = psds_plot.std(axis=(0, 1))[freq_range]
axes[0].plot(freqs[freq_range], psds_mean, color='b')
axes[0].fill_between(
freqs[freq_range], psds_mean - psds_std, psds_mean + psds_std,
color='b', alpha=.2)
axes[0].set(title="PSD spectrum", ylabel='Power Spectral Density [dB]')
# SNR spectrum
snr_mean = snrs.mean(axis=(0, 1))[freq_range]
snr_std = snrs.std(axis=(0, 1))[freq_range]
axes[1].plot(freqs[freq_range], snr_mean, color='r')
axes[1].fill_between(
freqs[freq_range], snr_mean - snr_std, snr_mean + snr_std,
color='r', alpha=.2)
axes[1].set(
title="SNR spectrum", xlabel='Frequency [Hz]',
ylabel='SNR', ylim=[-2, 30], xlim=[fmin, fmax])
fig.show()
###############################################################################
# You can see that the peaks at the stimulation frequencies (12 Hz, 15 Hz)
# and their harmonics are visible in both plots (just as the line noise at
# 50 Hz).
# Yet, the SNR spectrum shows them more prominently as peaks from a
# noisy but more or less constant baseline of SNR = 1.
# You can further see that the SNR processing removes any broad-band power
# differences (such as the increased power in alpha band around 10 Hz),
# and also removes the 1/f decay in the PSD.
#
# Note, that while the SNR plot implies the possibility of values below 0
# (mean minus sd) such values do not make sense.
# Each SNR value is a ratio of positive PSD values, and the lowest possible PSD
# value is 0 (negative Y-axis values in the upper panel only result from
# plotting PSD on a log scale).
# Hence SNR values must be positive and can minimally go towards 0.
#
# Extract SNR values at the stimulation frequency
# -----------------------------------------------
#
# Our processing yielded a large array of many SNR values for each trial x
# channel x frequency-bin of the PSD array.
#
# For statistical analysis we obviously need to define specific subsets of this
# array. First of all, we are only interested in SNR at the stimulation
# frequency, but we also want to restrict the analysis to a spatial ROI.
# Lastly, answering your interesting research questions will probably rely on
# comparing SNR in different trials.
#
# Therefore we will have to find the indices of trials, channels, etc.
# Alternatively, one could subselect the trials already at the epoching step,
# using MNE's event information, and process different epoch structures
# separately.
#
# Let's only have a look at the trials with 12 Hz stimulation, for now.
#
# define stimulation frequency
stim_freq = 12.
###############################################################################
# Get index for the stimulation frequency (12 Hz)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Ideally, there would be a bin with the stimulation frequency exactly in its
# center. However, depending on your Spectral decomposition this is not
# always the case. We will find the bin closest to it - this one should contain
# our frequency tagged response.
#
# find index of frequency bin closest to stimulation frequency
i_bin_12hz = np.argmin(abs(freqs - stim_freq))
# could be updated to support multiple frequencies
# for later, we will already find the 15 Hz bin and the 1st and 2nd harmonic
# for both.
i_bin_24hz = np.argmin(abs(freqs - 24))
i_bin_36hz = np.argmin(abs(freqs - 36))
i_bin_15hz = np.argmin(abs(freqs - 15))
i_bin_30hz = np.argmin(abs(freqs - 30))
i_bin_45hz = np.argmin(abs(freqs - 45))
###############################################################################
# Get indices for the different trial types
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
i_trial_12hz = np.where(epochs.events[:, 2] == event_id['12hz'])[0]
i_trial_15hz = np.where(epochs.events[:, 2] == event_id['15hz'])[0]
###############################################################################
# Get indices of EEG channels forming the ROI
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Define different ROIs
roi_vis = ['POz', 'Oz', 'O1', 'O2', 'PO3', 'PO4', 'PO7',
'PO8', 'PO9', 'PO10', 'O9', 'O10'] # visual roi
# Find corresponding indices using mne.pick_types()
picks_roi_vis = mne.pick_types(epochs.info, eeg=True, stim=False,
exclude='bads', selection=roi_vis)
###############################################################################
# Apply the subset, and check the result
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Now we simply need to apply our selection and yield a result. Therefore,
# we typically report grand average SNR over the subselection.
#
# In this tutorial we don't verify the presence of a neural response.
# This is commonly done in the ASSR literature where SNR is
# often lower. An F-test or Hotelling T² would be
# appropriate for this purpose.
snrs_target = snrs[i_trial_12hz, :, i_bin_12hz][:, picks_roi_vis]
print("sub 2, 12 Hz trials, SNR at 12 Hz")
print(f'average SNR (occipital ROI): {snrs_target.mean()}')
##############################################################################
# Topography of the vSSR
# ----------------------
# But wait...
# As described in the intro, we have decided *a priori* to work with average
# SNR over a subset of occipital channels - a visual region of interest (ROI)
# - because we expect SNR to be higher on these channels than in other
# channels.
#
# Let's check out, whether this was a good decision!
#
# Here we will plot average SNR for each channel location as a topoplot.
# Then we will do a simple paired T-test to check, whether average SNRs over
# the two sets of channels are significantly different.
#
# get average SNR at 12 Hz for ALL channels
snrs_12hz = snrs[i_trial_12hz, :, i_bin_12hz]
snrs_12hz_chaverage = snrs_12hz.mean(axis=0)
# plot SNR topography
fig, ax = plt.subplots(1)
mne.viz.plot_topomap(snrs_12hz_chaverage, epochs.info, vmin=1., axes=ax)
print("sub 2, 12 Hz trials, SNR at 12 Hz")
print("average SNR (all channels): %f" % snrs_12hz_chaverage.mean())
print("average SNR (occipital ROI): %f" % snrs_target.mean())
tstat_roi_vs_scalp = \
ttest_rel(snrs_target.mean(axis=1), snrs_12hz.mean(axis=1))
print("12 Hz SNR in occipital ROI is significantly larger than 12 Hz SNR over "
"all channels: t = %.3f, p = %f" % tstat_roi_vs_scalp)
##############################################################################
# We can see, that 1) this participant indeed exhibits a cluster of channels
# with high SNR in the occipital region and 2) that the average SNR over all
# channels is smaller than the average of the visual ROI computed above.
# The difference is statistically significant. Great!
#
# Such a topography plot can be a nice tool to explore and play with your data
# - e.g. you could try how changing the reference will affect the spatial
# distribution of SNR values.
#
# However, we also wanted to show this plot to point at a potential
# problem with frequency-tagged (or any other brain imaging) data:
# there are many channels and somewhere you will likely find some
# statistically significant effect.
# It is very easy - even unintended - to end up double-dipping or p-hacking.
# So if you want to work with an ROI or individual channels, ideally select
# them *a priori* - before collecting or looking at the data - and preregister
# this decision so people will believe you.
# If you end up selecting an ROI or individual channel for reporting *because
# this channel or ROI shows an effect*, e.g. in an explorative analysis, this
# is also fine but make it transparently and correct for multiple comparison.
#
# Statistical separation of 12 Hz and 15 Hz vSSR
# ----------------------------------------------
# After this little detour into open science, let's move on and
# do the analyses we actually wanted to do:
#
# We will show that we can easily detect and discriminate the brains responses
# in the trials with different stimulation frequencies.
#
# In the frequency and SNR spectrum plot above, we had all trials mixed up.
# Now we will extract 12 and 15 Hz SNR in both types of trials individually,
# and compare the values with a simple t-test.
# We will also extract SNR of the 1st and 2nd harmonic for both stimulation
# frequencies. These are often reported as well and can show interesting
# interactions.
#
snrs_roi = snrs[:, picks_roi_vis, :].mean(axis=1)
freq_plot = [12, 15, 24, 30, 36, 45]
color_plot = [
'darkblue', 'darkgreen', 'mediumblue', 'green', 'blue', 'seagreen'
]
xpos_plot = [-5. / 12, -3. / 12, -1. / 12, 1. / 12, 3. / 12, 5. / 12]
fig, ax = plt.subplots()
labels = ['12 Hz trials', '15 Hz trials']
x = np.arange(len(labels)) # the label locations
width = 0.6 # the width of the bars
res = dict()
# loop to plot SNRs at stimulation frequencies and harmonics
for i, f in enumerate(freq_plot):
# extract snrs
stim_12hz_tmp = \
snrs_roi[i_trial_12hz, np.argmin(abs(freqs - f))]
stim_15hz_tmp = \
snrs_roi[i_trial_15hz, np.argmin(abs(freqs - f))]
SNR_tmp = [stim_12hz_tmp.mean(), stim_15hz_tmp.mean()]
# plot (with std)
ax.bar(
x + width * xpos_plot[i], SNR_tmp, width / len(freq_plot),
yerr=np.std(SNR_tmp),
label='%i Hz SNR' % f, color=color_plot[i])
# store results for statistical comparison
res['stim_12hz_snrs_%ihz' % f] = stim_12hz_tmp
res['stim_15hz_snrs_%ihz' % f] = stim_15hz_tmp
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('SNR')
ax.set_title('Average SNR at target frequencies')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(['%i Hz' % f for f in freq_plot], title='SNR at:')
ax.set_ylim([0, 70])
ax.axhline(1, ls='--', c='r')
fig.show()
###############################################################################
# As you can easily see there are striking differences between the trials.
# Let's verify this using a series of two-tailed paired T-Tests.
#
# Compare 12 Hz and 15 Hz SNR in trials after averaging over channels
tstat_12hz_trial_stim = \
ttest_rel(res['stim_12hz_snrs_12hz'], res['stim_12hz_snrs_15hz'])
print("12 Hz Trials: 12 Hz SNR is significantly higher than 15 Hz SNR"
": t = %.3f, p = %f" % tstat_12hz_trial_stim)
tstat_12hz_trial_1st_harmonic = \
ttest_rel(res['stim_12hz_snrs_24hz'], res['stim_12hz_snrs_30hz'])
print("12 Hz Trials: 24 Hz SNR is significantly higher than 30 Hz SNR"
": t = %.3f, p = %f" % tstat_12hz_trial_1st_harmonic)
tstat_12hz_trial_2nd_harmonic = \
ttest_rel(res['stim_12hz_snrs_36hz'], res['stim_12hz_snrs_45hz'])
print("12 Hz Trials: 36 Hz SNR is significantly higher than 45 Hz SNR"
": t = %.3f, p = %f" % tstat_12hz_trial_2nd_harmonic)
print()
tstat_15hz_trial_stim = \
ttest_rel(res['stim_15hz_snrs_12hz'], res['stim_15hz_snrs_15hz'])
print("15 Hz trials: 12 Hz SNR is significantly lower than 15 Hz SNR"
": t = %.3f, p = %f" % tstat_15hz_trial_stim)
tstat_15hz_trial_1st_harmonic = \
ttest_rel(res['stim_15hz_snrs_24hz'], res['stim_15hz_snrs_30hz'])
print("15 Hz trials: 24 Hz SNR is significantly lower than 30 Hz SNR"
": t = %.3f, p = %f" % tstat_15hz_trial_1st_harmonic)
tstat_15hz_trial_2nd_harmonic = \
ttest_rel(res['stim_15hz_snrs_36hz'], res['stim_15hz_snrs_45hz'])
print("15 Hz trials: 36 Hz SNR is significantly lower than 45 Hz SNR"
": t = %.3f, p = %f" % tstat_15hz_trial_2nd_harmonic)
##############################################################################
# Debriefing
# ----------
# So that's it, we hope you enjoyed our little tour through this example
# dataset.
#
# As you could see, frequency-tagging is a very powerful tool that can yield
# very high signal to noise ratios and effect sizes that enable you to detect
# brain responses even within a single participant and single trials of only
# a few seconds duration.
#
# Bonus exercises
# ---------------
# For the overly motivated amongst you, let's see what else we can show with
# these data.
#
# Using the PSD function as implemented in MNE makes it very easy to change
# the amount of data that is actually used in the spectrum
# estimation.
#
# Here we employ this to show you some features of frequency
# tagging data that you might or might not have already intuitively expected:
#
# Effect of trial duration on SNR
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# First we will simulate shorter trials by taking only the first x s of our 20s
# trials (2, 4, 6, 8, ..., 20 s), and compute the SNR using a FFT window
# that covers the entire epoch:
#
stim_bandwidth = .5
# shorten data and welch window
window_lengths = [i for i in range(2, 21, 2)]
window_snrs = [[]] * len(window_lengths)
for i_win, win in enumerate(window_lengths):
# compute spectrogram
windowed_psd, windowed_freqs = mne.time_frequency.psd_welch(
epochs[str(event_id['12hz'])],
n_fft=int(sfreq * win),
n_overlap=0, n_per_seg=None,
tmin=0, tmax=win,
window='boxcar',
fmin=fmin, fmax=fmax, verbose=False)
# define a bandwidth of 1 Hz around stimfreq for SNR computation
bin_width = windowed_freqs[1] - windowed_freqs[0]
skip_neighbor_freqs = \
round((stim_bandwidth / 2) / bin_width - bin_width / 2. - .5) if (
bin_width < stim_bandwidth) else 0
n_neighbor_freqs = \
int((sum((windowed_freqs <= 13) & (windowed_freqs >= 11)
) - 1 - 2 * skip_neighbor_freqs) / 2)
# compute snr
windowed_snrs = \
snr_spectrum(
windowed_psd,
noise_n_neighbor_freqs=n_neighbor_freqs if (
n_neighbor_freqs > 0
) else 1,
noise_skip_neighbor_freqs=skip_neighbor_freqs)
window_snrs[i_win] = \
windowed_snrs[
:, picks_roi_vis,
np.argmin(
abs(windowed_freqs - 12.))].mean(axis=1)
fig, ax = plt.subplots(1)
ax.boxplot(window_snrs, labels=window_lengths, vert=True)
ax.set(title='Effect of trial duration on 12 Hz SNR',
ylabel='Average SNR', xlabel='Trial duration [s]')
ax.axhline(1, ls='--', c='r')
fig.show()
##############################################################################
# You can see that the signal estimate / our SNR measure increases with the
# trial duration.
#
# This should be easy to understand: in longer recordings there is simply
# more signal (one second of additional stimulation adds, in our case, 12
# cycles of signal) while the noise is (hopefully) stochastic and not locked
# to the stimulation frequency.
# In other words: with more data the signal term grows faster than the noise
# term.
#
# We can further see that the very short trials with FFT windows < 2-3s are not
# great - here we've either hit the noise floor and/or the transient response
# at the trial onset covers too much of the trial.
#
# Again, this tutorial doesn't statistically test for the presence of a neural
# response, but an F-test or Hotelling T² would be appropriate for this
# purpose.
#
# Time resolved SNR
# ^^^^^^^^^^^^^^^^^
# ..and finally we can trick MNE's PSD implementation to make it a
# sliding window analysis and come up with a time resolved SNR measure.
# This will reveal whether a participant blinked or scratched their head..
#
# Each of the ten trials is coded with a different color in the plot below.
#
# 3s sliding window
window_length = 4
window_starts = [i for i in range(20 - window_length)]
window_snrs = [[]] * len(window_starts)
for i_win, win in enumerate(window_starts):
# compute spectrogram
windowed_psd, windowed_freqs = mne.time_frequency.psd_welch(
epochs[str(event_id['12hz'])],
n_fft=int(sfreq * window_length) - 1,
n_overlap=0, n_per_seg=None,
window='boxcar',
tmin=win, tmax=win + window_length,
fmin=fmin, fmax=fmax,
verbose=False)
# define a bandwidth of 1 Hz around stimfreq for SNR computation
bin_width = windowed_freqs[1] - windowed_freqs[0]
skip_neighbor_freqs = \
round((stim_bandwidth / 2) / bin_width - bin_width / 2. - .5) if (
bin_width < stim_bandwidth) else 0
n_neighbor_freqs = \
int((sum((windowed_freqs <= 13) & (windowed_freqs >= 11)
) - 1 - 2 * skip_neighbor_freqs) / 2)
# compute snr
windowed_snrs = snr_spectrum(
windowed_psd,
noise_n_neighbor_freqs=n_neighbor_freqs if (
n_neighbor_freqs > 0) else 1,
noise_skip_neighbor_freqs=skip_neighbor_freqs)
window_snrs[i_win] = \
windowed_snrs[:, picks_roi_vis, np.argmin(
abs(windowed_freqs - 12.))].mean(axis=1)
fig, ax = plt.subplots(1)
colors = plt.get_cmap('Greys')(np.linspace(0, 1, 10))
for i in range(10):
ax.plot(window_starts, np.array(window_snrs)[:, i], color=colors[i])
ax.set(title='Time resolved 12 Hz SNR - %is sliding window' % window_length,
ylabel='Average SNR', xlabel='t0 of analysis window [s]')
ax.axhline(1, ls='--', c='r')
ax.legend(['individual trials in greyscale'])
fig.show()
##############################################################################
# Well.. turns out this was a bit too optimistic ;)
#
# But seriously: this was a nice idea, but we've reached the limit of
# what's possible with this single-subject example dataset.
# However, there might be data, applications, or research questions
# where such an analysis makes sense.
#
| bsd-3-clause | -6,765,559,744,100,278,000 | 39.060741 | 79 | 0.650864 | false |
eviljeff/zamboni | mkt/api/tests/__init__.py | 13 | 1031 | import json
from mkt.site.tests import TestCase
class BaseAPI(TestCase):
"""
A base test case useful for API testing.
"""
def _allowed_verbs(self, url, allowed):
"""
Will run through all the verbs except the ones specified in allowed
and ensure that hitting those produces a 405. Otherwise the test will
fail.
"""
verbs = ['get', 'post', 'put', 'patch', 'delete']
for verb in verbs:
if verb in allowed:
continue
try:
res = getattr(self.client, verb)(url)
except AttributeError:
# Not all clients have patch.
if verb != 'patch':
raise
msg = 'Expected 40{1,3,5} for %s, got %s' % (verb.upper(),
res.status_code)
assert res.status_code in (401, 403, 405), msg
def get_error(self, response):
return json.loads(response.content)['error_message']
| bsd-3-clause | 790,494,003,812,005,000 | 31.21875 | 77 | 0.516974 | false |
puttarajubr/commcare-hq | corehq/apps/analytics/signals.py | 1 | 2227 | from corehq.apps.accounting.utils import ensure_domain_instance
from .tasks import identify
from django.dispatch import receiver
from corehq.apps.users.models import WebUser
from corehq.apps.accounting.models import (
ProBonoStatus,
SoftwarePlanEdition,
Subscription,
)
from corehq.apps.accounting.signals import subscription_upgrade_or_downgrade
from corehq.apps.domain.signals import commcare_domain_post_save
from corehq.apps.users.signals import couch_user_post_save
@receiver(couch_user_post_save)
def user_save_callback(sender, **kwargs):
couch_user = kwargs.get("couch_user", None)
if couch_user and couch_user.is_web_user():
update_subscription_properties_by_user(couch_user)
@receiver(commcare_domain_post_save)
@receiver(subscription_upgrade_or_downgrade)
def domain_save_callback(sender, **kwargs):
domain = kwargs.get("domain", None)
domain = ensure_domain_instance(domain)
if domain:
update_subscription_properties_by_domain(domain)
def update_subscription_properties_by_user(couch_user):
# Note: using "yes" and "no" instead of True and False because spec calls
# for using these values. (True is just converted to "True" in KISSmetrics)
properties = {
SoftwarePlanEdition.COMMUNITY: "no",
SoftwarePlanEdition.STANDARD: "no",
SoftwarePlanEdition.PRO: "no",
SoftwarePlanEdition.ADVANCED: "no",
SoftwarePlanEdition.ENTERPRISE: "no",
"Pro Bono": "no",
}
for domain_name in couch_user.domains:
plan_version, subscription = Subscription.get_subscribed_plan_by_domain(domain_name)
if subscription is not None:
if subscription.pro_bono_status == ProBonoStatus.YES:
properties["Pro Bono"] = "yes"
edition = plan_version.plan.edition
if edition in properties:
properties[edition] = "yes"
identify.delay(couch_user.username, properties)
def update_subscription_properties_by_domain(domain):
affected_users = WebUser.view(
'users/web_users_by_domain', reduce=False, key=domain.name, include_docs=True
).all()
for web_user in affected_users:
update_subscription_properties_by_user(web_user)
| bsd-3-clause | 4,323,780,360,677,824,000 | 33.796875 | 92 | 0.710822 | false |
noba3/KoTos | addons/plugin.video.phstreams/resources/lib/resolvers/watch1080.py | 2 | 1918 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse,base64
from resources.lib.libraries import client
from resources.lib.resolvers import openload
def resolve(url):
try:
try: quality = urlparse.parse_qs(urlparse.urlparse(url).query)['quality'][0]
except: quality = '1080P'
url = url.rsplit('?', 1)[0]
result = client.request(url, close=False)
url = client.parseDOM(result, 'div', attrs = {'class': 'player'})[0]
url = client.parseDOM(url, 'iframe', ret='src')[0]
result = client.request(url)
url = client.parseDOM(result, 'iframe', ret='src')
if len(url) > 0: return openload.resolve(url[0])
result = re.compile("\('(.+?)'\)").findall(result)[0]
result = base64.b64decode(result)
url = client.parseDOM(result, 'source', ret='src', attrs = {'data-res': quality})
url += client.parseDOM(result, 'source', ret='src', attrs = {'data-res': '.+?'})
url = url[0]
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| gpl-2.0 | 5,616,269,246,902,563,000 | 31.508475 | 89 | 0.632951 | false |
suqinhuang/tp-qemu | generic/tests/mac_change.py | 7 | 7673 | import re
import logging
from autotest.client.shared import error
from virttest import utils_misc
from virttest import utils_net
from virttest import utils_test
def check_guest_mac(mac, vm, device_id=None):
error.context("Check mac address via monitor", logging.info)
network_info = vm.monitor.info("network")
if not device_id:
device_id = vm.virtnet[0].device_id
if device_id not in str(network_info):
err = "Could not find device '%s' from query-network monitor command."
err += "query-network command output: %s" % str(network_info)
raise error.TestFail(err)
for info in str(network_info).splitlines():
if re.match(device_id, info.strip(), re.I) and mac not in info:
err = "Cold not get correct mac from qmp command!"
err += "query-network command output: %s" % str(network_info)
raise error.TestFail(err)
@error.context_aware
def run(test, params, env):
"""
Change MAC address of guest.
1) Get a new mac from pool, and the old mac addr of guest.
2) Check guest mac by qmp command.
3) Set new mac in guest and regain new IP.
4) Check guest new mac by qmp command.
5) Re-log into guest with new MAC. (nettype != macvtap)
6) Reboot guest and check the the mac address by monitor(optional).
7) File transfer between host and guest. optional
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session_serial = vm.wait_for_serial_login(timeout=timeout)
# This session will be used to assess whether the IP change worked
if params.get("nettype") != "macvtap":
session = vm.wait_for_login(timeout=timeout)
old_mac = vm.get_mac_address(0)
while True:
vm.virtnet.free_mac_address(0)
new_mac = vm.virtnet.generate_mac_address(0)
if old_mac != new_mac:
break
os_type = params.get("os_type")
os_variant = params.get("os_variant")
change_cmd_pattern = params.get("change_cmd")
logging.info("The initial MAC address is %s", old_mac)
check_guest_mac(old_mac, vm)
if os_type == "linux":
interface = utils_net.get_linux_ifname(session_serial, old_mac)
if params.get("shutdown_int", "yes") == "yes":
int_shutdown_cmd = params.get("int_shutdown_cmd",
"ifconfig %s down")
session_serial.cmd_output_safe(int_shutdown_cmd % interface)
else:
connection_id = utils_net.get_windows_nic_attribute(session_serial,
"macaddress",
old_mac,
"netconnectionid")
nic_index = utils_net.get_windows_nic_attribute(session_serial,
"netconnectionid",
connection_id,
"index")
if os_variant == "winxp":
pnpdevice_id = utils_net.get_windows_nic_attribute(session,
"netconnectionid",
connection_id,
"pnpdeviceid")
cd_drive = utils_misc.get_winutils_vol(session)
copy_cmd = r"xcopy %s:\devcon\wxp_x86\devcon.exe c:\ " % cd_drive
session.cmd(copy_cmd)
# Start change MAC address
error.context("Changing MAC address to %s" % new_mac, logging.info)
if os_type == "linux":
change_cmd = change_cmd_pattern % (interface, new_mac)
else:
change_cmd = change_cmd_pattern % (int(nic_index),
"".join(new_mac.split(":")))
try:
session_serial.cmd_output_safe(change_cmd)
# Verify whether MAC address was changed to the new one
error.context("Verify the new mac address, and restart the network",
logging.info)
if os_type == "linux":
if params.get("shutdown_int", "yes") == "yes":
int_activate_cmd = params.get("int_activate_cmd",
"ifconfig %s up")
session_serial.cmd_output_safe(int_activate_cmd % interface)
session_serial.cmd_output_safe("ifconfig | grep -i %s" % new_mac)
logging.info("Mac address change successfully, net restart...")
dhclient_cmd = "dhclient -r && dhclient %s" % interface
session_serial.sendline(dhclient_cmd)
else:
mode = "netsh"
if os_variant == "winxp":
connection_id = pnpdevice_id.split("&")[-1]
mode = "devcon"
utils_net.restart_windows_guest_network(session_serial,
connection_id,
mode=mode)
o = session_serial.cmd_output_safe("ipconfig /all")
if not re.findall("%s" % "-".join(new_mac.split(":")), o, re.I):
raise error.TestFail("Guest mac change failed")
logging.info("Guest mac have been modified successfully")
if params.get("nettype") != "macvtap":
# Re-log into the guest after changing mac address
if utils_misc.wait_for(session.is_responsive, 120, 20, 3):
# Just warning when failed to see the session become dead,
# because there is a little chance the ip does not change.
msg = "The session is still responsive, settings may fail."
logging.warn(msg)
session.close()
# Re-log into guest and check if session is responsive
error.context("Re-log into the guest", logging.info)
session = vm.wait_for_login(timeout=timeout)
if not session.is_responsive():
raise error.TestFail("The new session is not responsive.")
if params.get("reboot_vm_after_mac_changed") == "yes":
error.context("Reboot guest and check the the mac address by "
"monitor", logging.info)
mac_check = new_mac
if os_type == "linux":
nic = vm.virtnet[0]
nic.mac = old_mac
vm.virtnet.update_db()
mac_check = old_mac
session_serial = vm.reboot(session_serial, serial=True)
check_guest_mac(mac_check, vm)
if params.get("file_transfer", "no") == "yes":
error.context("File transfer between host and guest.",
logging.info)
utils_test.run_file_transfer(test, params, env)
else:
check_guest_mac(new_mac, vm)
finally:
if os_type == "windows":
clean_cmd_pattern = params.get("clean_cmd")
clean_cmd = clean_cmd_pattern % int(nic_index)
session_serial.cmd_output_safe(clean_cmd)
utils_net.restart_windows_guest_network(session_serial,
connection_id,
mode=mode)
nic = vm.virtnet[0]
nic.mac = old_mac
vm.virtnet.update_db()
| gpl-2.0 | 2,176,835,660,123,757,800 | 44.946108 | 81 | 0.531865 | false |
wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/sqlalchemy/orm/events.py | 19 | 84902 | # orm/events.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""ORM event interfaces.
"""
from .. import event, exc, util
from .base import _mapper_or_none
import inspect
import weakref
from . import interfaces
from . import mapperlib, instrumentation
from .session import Session, sessionmaker
from .scoping import scoped_session
from .attributes import QueryableAttribute
from .query import Query
from sqlalchemy.util.compat import inspect_getargspec
class InstrumentationEvents(event.Events):
"""Events related to class instrumentation events.
The listeners here support being established against
any new style class, that is any object that is a subclass
of 'type'. Events will then be fired off for events
against that class. If the "propagate=True" flag is passed
to event.listen(), the event will fire off for subclasses
of that class as well.
The Python ``type`` builtin is also accepted as a target,
which when used has the effect of events being emitted
for all classes.
Note the "propagate" flag here is defaulted to ``True``,
unlike the other class level events where it defaults
to ``False``. This means that new subclasses will also
be the subject of these events, when a listener
is established on a superclass.
.. versionchanged:: 0.8 - events here will emit based
on comparing the incoming class to the type of class
passed to :func:`.event.listen`. Previously, the
event would fire for any class unconditionally regardless
of what class was sent for listening, despite
documentation which stated the contrary.
"""
_target_class_doc = "SomeBaseClass"
_dispatch_target = instrumentation.InstrumentationFactory
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
return _InstrumentationEventsHold(target)
else:
return None
@classmethod
def _listen(cls, event_key, propagate=True, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
def listen(target_cls, *arg):
listen_cls = target()
if propagate and issubclass(target_cls, listen_cls):
return fn(target_cls, *arg)
elif not propagate and target_cls is listen_cls:
return fn(target_cls, *arg)
def remove(ref):
key = event.registry._EventKey(
None, identifier, listen,
instrumentation._instrumentation_factory)
getattr(instrumentation._instrumentation_factory.dispatch,
identifier).remove(key)
target = weakref.ref(target.class_, remove)
event_key.\
with_dispatch_target(instrumentation._instrumentation_factory).\
with_wrapper(listen).base_listen(**kw)
@classmethod
def _clear(cls):
super(InstrumentationEvents, cls)._clear()
instrumentation._instrumentation_factory.dispatch._clear()
def class_instrument(self, cls):
"""Called after the given class is instrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def class_uninstrument(self, cls):
"""Called before the given class is uninstrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def attribute_instrument(self, cls, key, inst):
"""Called when an attribute is instrumented."""
class _InstrumentationEventsHold(object):
"""temporary marker object used to transfer from _accept_with() to
_listen() on the InstrumentationEvents class.
"""
def __init__(self, class_):
self.class_ = class_
dispatch = event.dispatcher(InstrumentationEvents)
class InstanceEvents(event.Events):
"""Define events specific to object lifecycle.
e.g.::
from sqlalchemy import event
def my_load_listener(target, context):
print "on load!"
event.listen(SomeClass, 'load', my_load_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`.Mapper` objects
* the :class:`.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
.. versionchanged:: 0.8.0 instance events can be associated with
unmapped superclasses of mapped classes.
Instance events are closely related to mapper events, but
are more specific to the instance and its instrumentation,
rather than its system of persistence.
When using :class:`.InstanceEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting classes as well as the
class which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
"""
_target_class_doc = "SomeClass"
_dispatch_target = instrumentation.ClassManager
@classmethod
def _new_classmanager_instance(cls, class_, classmanager):
_InstanceEventsHold.populate(class_, classmanager)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if isinstance(target, instrumentation.ClassManager):
return target
elif isinstance(target, mapperlib.Mapper):
return target.class_manager
elif target is orm.mapper:
return instrumentation.ClassManager
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return instrumentation.ClassManager
else:
manager = instrumentation.manager_of_class(target)
if manager:
return manager
else:
return _InstanceEventsHold(target)
return None
@classmethod
def _listen(cls, event_key, raw=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if not raw:
def wrap(state, *arg, **kw):
return fn(state.obj(), *arg, **kw)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate, **kw)
if propagate:
for mgr in target.subclass_managers(True):
event_key.with_dispatch_target(mgr).base_listen(
propagate=True)
@classmethod
def _clear(cls):
super(InstanceEvents, cls)._clear()
_InstanceEventsHold._clear()
def first_init(self, manager, cls):
"""Called when the first instance of a particular mapping is called.
This event is called when the ``__init__`` method of a class
is called the first time for that particular class. The event
invokes before ``__init__`` actually proceeds as well as before
the :meth:`.InstanceEvents.init` event is invoked.
"""
def init(self, target, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is
loaded from the database; see the :meth:`.InstanceEvents.load`
event in order to intercept a database load.
The event is called before the actual ``__init__`` constructor
of the object is called. The ``kwargs`` dictionary may be
modified in-place in order to affect what is passed to
``__init__``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments passed to the ``__init__`` method.
This is passed as a tuple and is currently immutable.
:param kwargs: keyword arguments passed to the ``__init__`` method.
This structure *can* be altered in place.
.. seealso::
:meth:`.InstanceEvents.init_failure`
:meth:`.InstanceEvents.load`
"""
def init_failure(self, target, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is loaded
from the database.
The event is invoked after an exception raised by the ``__init__``
method is caught. After the event
is invoked, the original exception is re-raised outwards, so that
the construction of the object still raises an exception. The
actual exception and stack trace raised should be present in
``sys.exc_info()``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments that were passed to the ``__init__``
method.
:param kwargs: keyword arguments that were passed to the ``__init__``
method.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.load`
"""
def load(self, target, context):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress. This argument may be
``None`` if the load does not correspond to a :class:`.Query`,
such as during :meth:`.Session.merge`.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.refresh`
:meth:`.SessionEvents.loaded_as_persistent`
"""
def refresh(self, target, context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed from a query.
Contrast this to the :meth:`.InstanceEvents.load` method, which
is invoked when the object is first loaded from a query.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress.
:param attrs: sequence of attribute names which
were populated, or None if all column-mapped, non-deferred
attributes were populated.
.. seealso::
:meth:`.InstanceEvents.load`
"""
def refresh_flush(self, target, flush_context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed within the persistence of the object.
This event is the same as :meth:`.InstanceEvents.refresh` except
it is invoked within the unit of work flush process, and the values
here typically come from the process of handling an INSERT or
UPDATE, such as via the RETURNING clause or from Python-side default
values.
.. versionadded:: 1.0.5
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param attrs: sequence of attribute names which
were populated.
"""
def expire(self, target, attrs):
"""Receive an object instance after its attributes or some subset
have been expired.
'keys' is a list of attribute names. If None, the entire
state was expired.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param attrs: sequence of attribute
names which were expired, or None if all attributes were
expired.
"""
def pickle(self, target, state_dict):
"""Receive an object instance when its associated state is
being pickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary returned by
:class:`.InstanceState.__getstate__`, containing the state
to be pickled.
"""
def unpickle(self, target, state_dict):
"""Receive an object instance after its associated state has
been unpickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary sent to
:class:`.InstanceState.__setstate__`, containing the state
dictionary which was pickled.
"""
class _EventsHold(event.RefCollection):
"""Hold onto listeners against unmapped, uninstrumented classes.
Establish _listen() for that class' mapper/instrumentation when
those objects are created for that class.
"""
def __init__(self, class_):
self.class_ = class_
@classmethod
def _clear(cls):
cls.all_holds.clear()
class HoldEvents(object):
_dispatch_target = None
@classmethod
def _listen(cls, event_key, raw=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
if target.class_ in target.all_holds:
collection = target.all_holds[target.class_]
else:
collection = target.all_holds[target.class_] = {}
event.registry._stored_in_collection(event_key, target)
collection[event_key._key] = (event_key, raw, propagate)
if propagate:
stack = list(target.class_.__subclasses__())
while stack:
subclass = stack.pop(0)
stack.extend(subclass.__subclasses__())
subject = target.resolve(subclass)
if subject is not None:
# we are already going through __subclasses__()
# so leave generic propagate flag False
event_key.with_dispatch_target(subject).\
listen(raw=raw, propagate=False, **kw)
def remove(self, event_key):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
if isinstance(target, _EventsHold):
collection = target.all_holds[target.class_]
del collection[event_key._key]
@classmethod
def populate(cls, class_, subject):
for subclass in class_.__mro__:
if subclass in cls.all_holds:
collection = cls.all_holds[subclass]
for event_key, raw, propagate in collection.values():
if propagate or subclass is class_:
# since we can't be sure in what order different
# classes in a hierarchy are triggered with
# populate(), we rely upon _EventsHold for all event
# assignment, instead of using the generic propagate
# flag.
event_key.with_dispatch_target(subject).\
listen(raw=raw, propagate=False)
class _InstanceEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return instrumentation.manager_of_class(class_)
class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents):
pass
dispatch = event.dispatcher(HoldInstanceEvents)
class MapperEvents(event.Events):
"""Define events specific to mappings.
e.g.::
from sqlalchemy import event
def my_before_insert_listener(mapper, connection, target):
# execute a stored procedure upon INSERT,
# apply the value to the row to be inserted
target.calculated_value = connection.scalar(
"select my_special_function(%d)"
% target.special_number)
# associate the listener function with SomeClass,
# to execute during the "before_insert" hook
event.listen(
SomeClass, 'before_insert', my_before_insert_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`.Mapper` objects
* the :class:`.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
.. versionchanged:: 0.8.0 mapper events can be associated with
unmapped superclasses of mapped classes.
Mapper events provide hooks into critical sections of the
mapper, including those related to object instrumentation,
object loading, and object persistence. In particular, the
persistence methods :meth:`~.MapperEvents.before_insert`,
and :meth:`~.MapperEvents.before_update` are popular
places to augment the state being persisted - however, these
methods operate with several significant restrictions. The
user is encouraged to evaluate the
:meth:`.SessionEvents.before_flush` and
:meth:`.SessionEvents.after_flush` methods as more
flexible and user-friendly hooks in which to apply
additional database state during a flush.
When using :class:`.MapperEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers and/or the mappers of
inheriting classes, as well as any
mapper which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event function
must have a return value, the purpose of which is either to
control subsequent event propagation, or to otherwise alter
the operation in progress by the mapper. Possible return
values are:
* ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event
processing normally.
* ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
event handlers in the chain.
* other values - the return value specified by specific listeners.
"""
_target_class_doc = "SomeClass"
_dispatch_target = mapperlib.Mapper
@classmethod
def _new_mapper_instance(cls, class_, mapper):
_MapperEventsHold.populate(class_, mapper)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if target is orm.mapper:
return mapperlib.Mapper
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return target
else:
mapper = _mapper_or_none(target)
if mapper is not None:
return mapper
else:
return _MapperEventsHold(target)
else:
return target
@classmethod
def _listen(
cls, event_key, raw=False, retval=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if identifier in ("before_configured", "after_configured") and \
target is not mapperlib.Mapper:
util.warn(
"'before_configured' and 'after_configured' ORM events "
"only invoke with the mapper() function or Mapper class "
"as the target.")
if not raw or not retval:
if not raw:
meth = getattr(cls, identifier)
try:
target_index = \
inspect_getargspec(meth)[0].index('target') - 1
except ValueError:
target_index = None
def wrap(*arg, **kw):
if not raw and target_index is not None:
arg = list(arg)
arg[target_index] = arg[target_index].obj()
if not retval:
fn(*arg, **kw)
return interfaces.EXT_CONTINUE
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
if propagate:
for mapper in target.self_and_descendants:
event_key.with_dispatch_target(mapper).base_listen(
propagate=True, **kw)
else:
event_key.base_listen(**kw)
@classmethod
def _clear(cls):
super(MapperEvents, cls)._clear()
_MapperEventsHold._clear()
def instrument_class(self, mapper, class_):
r"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
Most attributes of the mapper are not yet initialized.
This listener can either be applied to the :class:`.Mapper`
class overall, or to any un-mapped class which serves as a base
for classes that will be mapped (using the ``propagate=True`` flag)::
Base = declarative_base()
@event.listens_for(Base, "instrument_class", propagate=True)
def on_new_class(mapper, cls_):
" ... "
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
"""
def mapper_configured(self, mapper, class_):
r"""Called when a specific mapper has completed its own configuration
within the scope of the :func:`.configure_mappers` call.
The :meth:`.MapperEvents.mapper_configured` event is invoked
for each mapper that is encountered when the
:func:`.orm.configure_mappers` function proceeds through the current
list of not-yet-configured mappers.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
When the event is called, the mapper should be in its final
state, but **not including backrefs** that may be invoked from
other mappers; they might still be pending within the
configuration operation. Bidirectional relationships that
are instead configured via the
:paramref:`.orm.relationship.back_populates` argument
*will* be fully available, since this style of relationship does not
rely upon other possibly-not-configured mappers to know that they
exist.
For an event that is guaranteed to have **all** mappers ready
to go including backrefs that are defined only on other
mappings, use the :meth:`.MapperEvents.after_configured`
event; this event invokes only after all known mappings have been
fully configured.
The :meth:`.MapperEvents.mapper_configured` event, unlike
:meth:`.MapperEvents.before_configured` or
:meth:`.MapperEvents.after_configured`,
is called for each mapper/class individually, and the mapper is
passed to the event itself. It also is called exactly once for
a particular mapper. The event is therefore useful for
configurational steps that benefit from being invoked just once
on a specific mapper basis, which don't require that "backref"
configurations are necessarily ready yet.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
.. seealso::
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
"""
# TODO: need coverage for this event
def before_configured(self):
"""Called before a series of mappers have been configured.
The :meth:`.MapperEvents.before_configured` event is invoked
each time the :func:`.orm.configure_mappers` function is
invoked, before the function has done any of its work.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured")
def go():
# ...
Constrast this event to :meth:`.MapperEvents.after_configured`,
which is invoked after the series of mappers has been configured,
as well as :meth:`.MapperEvents.mapper_configured`, which is invoked
on a per-mapper basis as each one is configured to the extent possible.
Theoretically this event is called once per
application, but is actually called any time new mappers
are to be affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured", once=True)
def go():
# ...
.. versionadded:: 0.9.3
.. seealso::
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.after_configured`
"""
def after_configured(self):
"""Called after a series of mappers have been configured.
The :meth:`.MapperEvents.after_configured` event is invoked
each time the :func:`.orm.configure_mappers` function is
invoked, after the function has completed its work.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
Contrast this event to the :meth:`.MapperEvents.mapper_configured`
event, which is called on a per-mapper basis while the configuration
operation proceeds; unlike that event, when this event is invoked,
all cross-configurations (e.g. backrefs) will also have been made
available for any mappers that were pending.
Also constrast to :meth:`.MapperEvents.before_configured`,
which is invoked before the series of mappers has been configured.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured")
def go():
# ...
Theoretically this event is called once per
application, but is actually called any time new mappers
have been affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured", once=True)
def go():
# ...
.. seealso::
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.before_configured`
"""
def before_insert(self, mapper, connection, target):
"""Receive an object instance before an INSERT statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class before their INSERT statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_insert(self, mapper, connection, target):
"""Receive an object instance after an INSERT statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class after their INSERT statements have been
emitted at once in a previous step. In the extremely
rare case that this is not desirable, the
:func:`.mapper` can be configured with ``batch=False``,
which will cause batches of instances to be broken up
into individual (and more poorly performing)
event->persist->event steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_update(self, mapper, connection, target):
"""Receive an object instance before an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.before_update` is
*not* a guarantee that an UPDATE statement will be
issued, although you can affect the outcome here by
modifying attributes so that a net change in value does
exist.
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class before their UPDATE statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_update(self, mapper, connection, target):
"""Receive an object instance after an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*, and for which
no UPDATE statement has proceeded. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.after_update` is
*not* a guarantee that an UPDATE statement has been
issued.
To detect if the column-based attributes on the object have net
changes, and therefore resulted in an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class after their UPDATE statements have been emitted at
once in a previous step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_delete(self, mapper, connection, target):
"""Receive an object instance before a DELETE statement
is emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class before their DELETE statements are emitted at
once in a later step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_delete(self, mapper, connection, target):
"""Receive an object instance after a DELETE statement
has been emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class after their DELETE statements have been emitted at
once in a previous step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
class _MapperEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return _mapper_or_none(class_)
class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents):
pass
dispatch = event.dispatcher(HoldMapperEvents)
class SessionEvents(event.Events):
"""Define events specific to :class:`.Session` lifecycle.
e.g.::
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
def my_before_commit(session):
print "before commit!"
Session = sessionmaker()
event.listen(Session, "before_commit", my_before_commit)
The :func:`~.event.listen` function will accept
:class:`.Session` objects as well as the return result
of :class:`~.sessionmaker()` and :class:`~.scoped_session()`.
Additionally, it accepts the :class:`.Session` class which
will apply listeners to all :class:`.Session` instances
globally.
"""
_target_class_doc = "SomeSessionOrFactory"
_dispatch_target = Session
@classmethod
def _accept_with(cls, target):
if isinstance(target, scoped_session):
target = target.session_factory
if not isinstance(target, sessionmaker) and \
(
not isinstance(target, type) or
not issubclass(target, Session)
):
raise exc.ArgumentError(
"Session event listen on a scoped_session "
"requires that its creation callable "
"is associated with the Session class.")
if isinstance(target, sessionmaker):
return target.class_
elif isinstance(target, type):
if issubclass(target, scoped_session):
return Session
elif issubclass(target, Session):
return target
elif isinstance(target, Session):
return target
else:
return None
def after_transaction_create(self, session, transaction):
"""Execute when a new :class:`.SessionTransaction` is created.
This event differs from :meth:`~.SessionEvents.after_begin`
in that it occurs for each :class:`.SessionTransaction`
overall, as opposed to when transactions are begun
on individual database connections. It is also invoked
for nested transactions and subtransactions, and is always
matched by a corresponding
:meth:`~.SessionEvents.after_transaction_end` event
(assuming normal operation of the :class:`.Session`).
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_transaction_end(self, session, transaction):
"""Execute when the span of a :class:`.SessionTransaction` ends.
This event differs from :meth:`~.SessionEvents.after_commit`
in that it corresponds to all :class:`.SessionTransaction`
objects in use, including those for nested transactions
and subtransactions, and is always matched by a corresponding
:meth:`~.SessionEvents.after_transaction_create` event.
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_create`
"""
def before_commit(self, session):
"""Execute before commit is called.
.. note::
The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_commit(self, session):
"""Execute after a commit has occurred.
.. note::
The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
.. note::
The :class:`.Session` is not in an active transaction
when the :meth:`~.SessionEvents.after_commit` event is invoked,
and therefore can not emit SQL. To emit SQL corresponding to
every transaction, use the :meth:`~.SessionEvents.before_commit`
event.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_rollback(self, session):
"""Execute after a real DBAPI rollback has occurred.
Note that this event only fires when the *actual* rollback against
the database occurs - it does *not* fire each time the
:meth:`.Session.rollback` method is called, if the underlying
DBAPI transaction has already been rolled back. In many
cases, the :class:`.Session` will not be in
an "active" state during this event, as the current
transaction is not valid. To acquire a :class:`.Session`
which is active after the outermost rollback has proceeded,
use the :meth:`.SessionEvents.after_soft_rollback` event, checking the
:attr:`.Session.is_active` flag.
:param session: The target :class:`.Session`.
"""
def after_soft_rollback(self, session, previous_transaction):
"""Execute after any rollback has occurred, including "soft"
rollbacks that don't actually emit at the DBAPI level.
This corresponds to both nested and outer rollbacks, i.e.
the innermost rollback that calls the DBAPI's
rollback() method, as well as the enclosing rollback
calls that only pop themselves from the transaction stack.
The given :class:`.Session` can be used to invoke SQL and
:meth:`.Session.query` operations after an outermost rollback
by first checking the :attr:`.Session.is_active` flag::
@event.listens_for(Session, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
session.execute("select * from some_table")
:param session: The target :class:`.Session`.
:param previous_transaction: The :class:`.SessionTransaction`
transactional marker object which was just closed. The current
:class:`.SessionTransaction` for the given :class:`.Session` is
available via the :attr:`.Session.transaction` attribute.
.. versionadded:: 0.7.3
"""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param instances: Usually ``None``, this is the collection of
objects which can be passed to the :meth:`.Session.flush` method
(note this usage is deprecated).
.. seealso::
:meth:`~.SessionEvents.after_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush`
:ref:`session_persistence_events`
"""
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
:param session: The target :class:`.Session`.
:param transaction: The :class:`.SessionTransaction`.
:param connection: The :class:`~.engine.Connection` object
which will be used for SQL statements.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def before_attach(self, session, instance):
"""Execute before an instance is attached to a session.
This is called before an add, delete or merge causes
the object to be part of the session.
.. versionadded:: 0.8. Note that :meth:`~.SessionEvents.after_attach`
now fires off after the item is part of the session.
:meth:`.before_attach` is provided for those cases where
the item should not yet be part of the session state.
.. seealso::
:meth:`~.SessionEvents.after_attach`
:ref:`session_lifecycle_events`
"""
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge.
.. note::
As of 0.8, this event fires off *after* the item
has been fully associated with the session, which is
different than previous releases. For event
handlers that require the object not yet
be part of session state (such as handlers which
may autoflush while the target object is not
yet complete) consider the
new :meth:`.before_attach` event.
.. seealso::
:meth:`~.SessionEvents.before_attach`
:ref:`session_lifecycle_events`
"""
@event._legacy_signature("0.9",
["session", "query", "query_context", "result"],
lambda update_context: (
update_context.session,
update_context.query,
update_context.context,
update_context.result))
def after_bulk_update(self, update_context):
"""Execute after a bulk update operation to the session.
This is called as a result of the :meth:`.Query.update` method.
:param update_context: an "update context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`.Query` object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`.ResultProxy` returned as a result of the
bulk UPDATE operation.
"""
@event._legacy_signature("0.9",
["session", "query", "query_context", "result"],
lambda delete_context: (
delete_context.session,
delete_context.query,
delete_context.context,
delete_context.result))
def after_bulk_delete(self, delete_context):
"""Execute after a bulk delete operation to the session.
This is called as a result of the :meth:`.Query.delete` method.
:param delete_context: a "delete context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`.Query` object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`.ResultProxy` returned as a result of the
bulk DELETE operation.
"""
def transient_to_pending(self, session, instance):
"""Intercept the "transient to pending" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def pending_to_transient(self, session, instance):
"""Intercept the "pending to transient" transition for a specific object.
This less common transition occurs when an pending object that has
not been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction,
or when the :meth:`.Session.expunge` method is used.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_transient(self, session, instance):
"""Intercept the "persistent to transient" transition for a specific object.
This less common transition occurs when an pending object that has
has been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def pending_to_persistent(self, session, instance):
"""Intercept the "pending to persistent"" transition for a specific object.
This event is invoked within the flush process, and is
similar to scanning the :attr:`.Session.new` collection within
the :meth:`.SessionEvents.after_flush` event. However, in this
case the object has already been moved to the persistent state
when the event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def detached_to_persistent(self, session, instance):
"""Intercept the "detached to persistent" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call, as well as during the
:meth:`.Session.delete` call if the object was not previously
associated with the
:class:`.Session` (note that an object marked as "deleted" remains
in the "persistent" state until the flush proceeds).
.. note::
If the object becomes persistent as part of a call to
:meth:`.Session.delete`, the object is **not** yet marked as
deleted when this event is called. To detect deleted objects,
check the ``deleted`` flag sent to the
:meth:`.SessionEvents.persistent_to_detached` to event after the
flush proceeds, or check the :attr:`.Session.deleted` collection
within the :meth:`.SessionEvents.before_flush` event if deleted
objects need to be intercepted before the flush.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def loaded_as_persistent(self, session, instance):
"""Intercept the "loaded as persistent" transition for a specific object.
This event is invoked within the ORM loading process, and is invoked
very similarly to the :meth:`.InstanceEvents.load` event. However,
the event here is linkable to a :class:`.Session` class or instance,
rather than to a mapper or class hierarchy, and integrates
with the other session lifecycle events smoothly. The object
is guaranteed to be present in the session's identity map when
this event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_deleted(self, session, instance):
"""Intercept the "persistent to deleted" transition for a specific object.
This event is invoked when a persistent object's identity
is deleted from the database within a flush, however the object
still remains associated with the :class:`.Session` until the
transaction completes.
If the transaction is rolled back, the object moves again
to the persistent state, and the
:meth:`.SessionEvents.deleted_to_persistent` event is called.
If the transaction is committed, the object becomes detached,
which will emit the :meth:`.SessionEvents.deleted_to_detached`
event.
Note that while the :meth:`.Session.delete` method is the primary
public interface to mark an object as deleted, many objects
get deleted due to cascade rules, which are not always determined
until flush time. Therefore, there's no way to catch
every object that will be deleted until the flush has proceeded.
the :meth:`.SessionEvents.persistent_to_deleted` event is therefore
invoked at the end of a flush.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def deleted_to_persistent(self, session, instance):
"""Intercept the "deleted to persistent" transition for a specific object.
This transition occurs only when an object that's been deleted
successfully in a flush is restored due to a call to
:meth:`.Session.rollback`. The event is not called under
any other circumstances.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def deleted_to_detached(self, session, instance):
"""Intercept the "deleted to detached" transition for a specific object.
This event is invoked when a deleted object is evicted
from the session. The typical case when this occurs is when
the transaction for a :class:`.Session` in which the object
was deleted is committed; the object moves from the deleted
state to the detached state.
It is also invoked for objects that were deleted in a flush
when the :meth:`.Session.expunge_all` or :meth:`.Session.close`
events are called, as well as if the object is individually
expunged from its deleted state via :meth:`.Session.expunge`.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_detached(self, session, instance):
"""Intercept the "persistent to detached" transition for a specific object.
This event is invoked when a persistent object is evicted
from the session. There are many conditions that cause this
to happen, including:
* using a method such as :meth:`.Session.expunge`
or :meth:`.Session.close`
* Calling the :meth:`.Session.rollback` method, when the object
was part of an INSERT statement for that session's transaction
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
:param deleted: boolean. If True, indicates this object moved
to the detached state because it was marked as deleted and flushed.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
class AttributeEvents(event.Events):
"""Define events for object attributes.
These are typically defined on the class-bound descriptor for the
target class.
e.g.::
from sqlalchemy import event
def my_append_listener(target, value, initiator):
print "received append event for target: %s" % target
event.listen(MyClass.collection, 'append', my_append_listener)
Listeners have the option to return a possibly modified version
of the value, when the ``retval=True`` flag is passed
to :func:`~.event.listen`::
def validate_phone(target, value, oldvalue, initiator):
"Strip non-numeric characters from a phone number"
return re.sub(r'\D', '', value)
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
listen(UserContact.phone, 'set', validate_phone, retval=True)
A validation function like the above can also raise an exception
such as :exc:`ValueError` to halt the operation.
Several modifiers are available to the :func:`~.event.listen` function.
:param active_history=False: When True, indicates that the
"set" event would like to receive the "old" value being
replaced unconditionally, even if this requires firing off
database loads. Note that ``active_history`` can also be
set directly via :func:`.column_property` and
:func:`.relationship`.
:param propagate=False: When True, the listener function will
be established not just for the class attribute given, but
for attributes of the same name on all current subclasses
of that class, as well as all future subclasses of that
class, using an additional listener that listens for
instrumentation events.
:param raw=False: When True, the "target" argument to the
event will be the :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event
listening must return the "value" argument from the
function. This gives the listening function the opportunity
to change the value that is ultimately used for a "set"
or "append" event.
"""
_target_class_doc = "SomeClass.some_attribute"
_dispatch_target = QueryableAttribute
@staticmethod
def _set_dispatch(cls, dispatch_cls):
dispatch = event.Events._set_dispatch(cls, dispatch_cls)
dispatch_cls._active_history = False
return dispatch
@classmethod
def _accept_with(cls, target):
# TODO: coverage
if isinstance(target, interfaces.MapperProperty):
return getattr(target.parent.class_, target.key)
else:
return target
@classmethod
def _listen(cls, event_key, active_history=False,
raw=False, retval=False,
propagate=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if active_history:
target.dispatch._active_history = True
if not raw or not retval:
def wrap(target, value, *arg):
if not raw:
target = target.obj()
if not retval:
fn(target, value, *arg)
return value
else:
return fn(target, value, *arg)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate)
if propagate:
manager = instrumentation.manager_of_class(target.class_)
for mgr in manager.subclass_managers(True):
event_key.with_dispatch_target(
mgr[target.key]).base_listen(propagate=True)
def append(self, target, value, initiator):
"""Receive a collection append event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being appended. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
"""
def remove(self, target, value, initiator):
"""Receive a collection remove event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being removed.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: No return value is defined for this event.
"""
def set(self, target, value, oldvalue, initiator):
"""Receive a scalar set event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being set. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param oldvalue: the previous value being replaced. This
may also be the symbol ``NEVER_SET`` or ``NO_VALUE``.
If the listener is registered with ``active_history=True``,
the previous value of the attribute will be loaded from
the database if the existing value is currently unloaded
or expired.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
"""
def init_scalar(self, target, value, dict_):
"""Receive a scalar "init" event.
This event is invoked when an uninitialized, unpersisted scalar
attribute is accessed. A value of ``None`` is typically returned
in this case; no changes are made to the object's state.
The event handler can alter this behavior in two ways.
One is that a value other than ``None`` may be returned. The other
is that the value may be established as part of the object's state,
which will also have the effect that it is persisted.
Typical use is to establish a specific default value of an attribute
upon access::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
dict_['some_attribute'] = SOME_CONSTANT
return SOME_CONSTANT
Above, we initialize the attribute ``MyClass.some_attribute`` to the
value of ``SOME_CONSTANT``. The above code includes the following
features:
* By setting the value ``SOME_CONSTANT`` in the given ``dict_``,
we indicate that the value is to be persisted to the database.
**The given value is only persisted to the database if we
explicitly associate it with the object**. The ``dict_`` given
is the ``__dict__`` element of the mapped object, assuming the
default attribute instrumentation system is in place.
* By establishing the ``retval=True`` flag, the value we return
from the function will be returned by the attribute getter.
Without this flag, the event is assumed to be a passive observer
and the return value of our function is ignored.
* The ``propagate=True`` flag is significant if the mapped class
includes inheriting subclasses, which would also make use of this
event listener. Without this flag, an inheriting subclass will
not use our event handler.
When we establish the value in the given dictionary, the value will
be used in the INSERT statement established by the unit of work.
Normally, the default returned value of ``None`` is not established as
part of the object, to avoid the issue of mutations occurring to the
object in response to a normally passive "get" operation, and also
sidesteps the issue of whether or not the :meth:`.AttributeEvents.set`
event should be awkwardly fired off during an attribute access
operation. This does not impact the INSERT operation since the
``None`` value matches the value of ``NULL`` that goes into the
database in any case; note that ``None`` is skipped during the INSERT
to ensure that column and SQL-level default functions can fire off.
The attribute set event :meth:`.AttributeEvents.set` as well as the
related validation feature provided by :obj:`.orm.validates` is
**not** invoked when we apply our value to the given ``dict_``. To
have these events to invoke in response to our newly generated
value, apply the value to the given object as a normal attribute
set operation::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
# will also fire off attribute set events
target.some_attribute = SOME_CONSTANT
return SOME_CONSTANT
When multiple listeners are set up, the generation of the value
is "chained" from one listener to the next by passing the value
returned by the previous listener that specifies ``retval=True``
as the ``value`` argument of the next listener.
The :meth:`.AttributeEvents.init_scalar` event may be used to
extract values from the default values and/or callables established on
mapped :class:`.Column` objects. See the "active column defaults"
example in :ref:`examples_instrumentation` for an example of this.
.. versionadded:: 1.1
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value that is to be returned before this event
listener were invoked. This value begins as the value ``None``,
however will be the return value of the previous event handler
function if multiple listeners are present.
:param dict_: the attribute dictionary of this mapped object.
This is normally the ``__dict__`` of the object, but in all cases
represents the destination that the attribute system uses to get
at the actual value of this attribute. Placing the value in this
dictionary has the effect that the value will be used in the
INSERT statement generated by the unit of work.
.. seealso::
:ref:`examples_instrumentation` - see the
``active_column_defaults.py`` example.
"""
def init_collection(self, target, collection, collection_adapter):
"""Receive a 'collection init' event.
This event is triggered for a collection-based attribute, when
the initial "empty collection" is first generated for a blank
attribute, as well as for when the collection is replaced with
a new one, such as via a set event.
E.g., given that ``User.addresses`` is a relationship-based
collection, the event is triggered here::
u1 = User()
u1.addresses.append(a1) # <- new collection
and also during replace operations::
u1.addresses = [a2, a3] # <- new collection
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param collection: the new collection. This will always be generated
from what was specified as
:paramref:`.RelationshipProperty.collection_class`, and will always
be empty.
:param collection_adpater: the :class:`.CollectionAdapter` that will
mediate internal access to the collection.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
"""
def dispose_collection(self, target, collection, collection_adpater):
"""Receive a 'collection dispose' event.
This event is triggered for a collection-based attribute when
a collection is replaced, that is::
u1.addresses.append(a1)
u1.addresses = [a2, a3] # <- old collection is disposed
The mechanics of the event will typically include that the given
collection is empty, even if it stored objects while being replaced.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
"""
class QueryEvents(event.Events):
"""Represent events within the construction of a :class:`.Query` object.
The events here are intended to be used with an as-yet-unreleased
inspection system for :class:`.Query`. Some very basic operations
are possible now, however the inspection system is intended to allow
complex query manipulations to be automated.
.. versionadded:: 1.0.0
"""
_target_class_doc = "SomeQuery"
_dispatch_target = Query
def before_compile(self, query):
"""Receive the :class:`.Query` object before it is composed into a
core :class:`.Select` object.
This event is intended to allow changes to the query given::
@event.listens_for(Query, "before_compile", retval=True)
def no_deleted(query):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
The event should normally be listened with the ``retval=True``
parameter set, so that the modified query may be returned.
"""
@classmethod
def _listen(
cls, event_key, retval=False, **kw):
fn = event_key._listen_fn
if not retval:
def wrap(*arg, **kw):
if not retval:
query = arg[0]
fn(*arg, **kw)
return query
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(**kw)
| gpl-3.0 | 3,308,734,277,579,368,000 | 37.821216 | 84 | 0.63346 | false |
manuzhang/beam | sdks/python/apache_beam/io/gcp/tests/bigquery_matcher.py | 9 | 3665 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Bigquery data verifier for end-to-end test."""
import logging
from hamcrest.core.base_matcher import BaseMatcher
from apache_beam.testing.test_utils import compute_hash
from apache_beam.utils import retry
__all__ = ['BigqueryMatcher']
# Protect against environments where bigquery library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud import bigquery
from google.cloud.exceptions import GoogleCloudError
except ImportError:
bigquery = None
# pylint: enable=wrong-import-order, wrong-import-position
MAX_RETRIES = 4
def retry_on_http_and_value_error(exception):
"""Filter allowing retries on Bigquery errors and value error."""
return isinstance(exception, (GoogleCloudError, ValueError))
class BigqueryMatcher(BaseMatcher):
"""Matcher that verifies Bigquery data with given query.
Fetch Bigquery data with given query, compute a hash string and compare
with expected checksum.
"""
def __init__(self, project, query, checksum):
if bigquery is None:
raise ImportError(
'Bigquery dependencies are not installed.')
if not query or not isinstance(query, str):
raise ValueError(
'Invalid argument: query. Please use non-empty string')
if not checksum or not isinstance(checksum, str):
raise ValueError(
'Invalid argument: checksum. Please use non-empty string')
self.project = project
self.query = query
self.expected_checksum = checksum
def _matches(self, _):
logging.info('Start verify Bigquery data.')
# Run query
bigquery_client = bigquery.Client(project=self.project)
response = self._query_with_retry(bigquery_client)
logging.info('Read from given query (%s), total rows %d',
self.query, len(response))
# Compute checksum
self.checksum = compute_hash(response)
logging.info('Generate checksum: %s', self.checksum)
# Verify result
return self.checksum == self.expected_checksum
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry_on_http_and_value_error)
def _query_with_retry(self, bigquery_client):
"""Run Bigquery query with retry if got error http response"""
query = bigquery_client.run_sync_query(self.query)
query.run()
# Fetch query data one page at a time.
page_token = None
results = []
while True:
rows, _, page_token = query.fetch_data(page_token=page_token)
results.extend(rows)
if not page_token:
break
return results
def describe_to(self, description):
description \
.append_text("Expected checksum is ") \
.append_text(self.expected_checksum)
def describe_mismatch(self, pipeline_result, mismatch_description):
mismatch_description \
.append_text("Actual checksum is ") \
.append_text(self.checksum)
| apache-2.0 | 8,325,236,582,272,551,000 | 32.318182 | 74 | 0.717599 | false |
valentin-krasontovitsch/ansible | lib/ansible/modules/network/fortios/fortios_webfilter_ips_urlfilter_setting6.py | 7 | 7676 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_ips_urlfilter_setting6
short_description: Configure IPS URL filter settings for IPv6.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure webfilter feature and ips_urlfilter_setting6 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
webfilter_ips_urlfilter_setting6:
description:
- Configure IPS URL filter settings for IPv6.
default: null
suboptions:
device:
description:
- Interface for this route. Source system.interface.name.
distance:
description:
- Administrative distance (1 - 255) for this route.
gateway6:
description:
- Gateway IPv6 address for this route.
geo-filter:
description:
- Filter based on geographical location. Route will NOT be installed if the resolved IPv6 address belongs to the country in the filter.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPS URL filter settings for IPv6.
fortios_webfilter_ips_urlfilter_setting6:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
webfilter_ips_urlfilter_setting6:
device: "<your_own_value> (source system.interface.name)"
distance: "4"
gateway6: "<your_own_value>"
geo-filter: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_webfilter_ips_urlfilter_setting6_data(json):
option_list = ['device', 'distance', 'gateway6',
'geo-filter']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def webfilter_ips_urlfilter_setting6(data, fos):
vdom = data['vdom']
webfilter_ips_urlfilter_setting6_data = data['webfilter_ips_urlfilter_setting6']
filtered_data = filter_webfilter_ips_urlfilter_setting6_data(
webfilter_ips_urlfilter_setting6_data)
return fos.set('webfilter',
'ips-urlfilter-setting6',
data=filtered_data,
vdom=vdom)
def fortios_webfilter(data, fos):
login(data)
methodlist = ['webfilter_ips_urlfilter_setting6']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"webfilter_ips_urlfilter_setting6": {
"required": False, "type": "dict",
"options": {
"device": {"required": False, "type": "str"},
"distance": {"required": False, "type": "int"},
"gateway6": {"required": False, "type": "str"},
"geo-filter": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_webfilter(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,758,649,768,380,794,000 | 28.29771 | 155 | 0.62741 | false |
Lamisator/rublogin | rublogin.py | 1 | 3930 | #!/usr/bin/python3
import requests, getpass, socket, platform, subprocess, time, os, sys, getopt
verb = 0
loginid = ""
pw = ""
def get_ip_address():
dbgmsg("Trying to obtain our current IP-Address...")
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("134.147.64.8", 80))
dbgmsg("Success!")
return s.getsockname()[0]
except Exception:
dbgmsg("Failure! Network unreachable.")
return 0
def ping(host):
dbgmsg("Trying to determine: OS...")
if platform.system() == "Windows":
dbgmsg("Windows detected.")
res = subprocess.call(["ping", host, "-n 1"])
else:
dbgmsg("UNIX(-like) OS detected.")
res = subprocess.call(["ping", "-c 1", host], stdin = subprocess.PIPE, stdout = subprocess.DEVNULL, stderr = subprocess.DEVNULL)
dbgmsg("Pinging to see if our connection is up...")
if res == 0:
dbgmsg("We're connected!")
return 1
else:
dbgmsg("Nope, no connection.")
return 0
def establish_connection(user, pw):
dbgmsg("Trying to establish connection...")
ip = get_ip_address()
if ip == 0:
print("Network is unreachable.\n")
return 0
payload = {'code': '1', 'loginid': loginid, 'password': pw, 'ipaddr': ip, 'action': 'Login'}
text = ""
try:
dbgmsg("Trying to log in via HTTP-POST...")
r = requests.post("https://login.rz.ruhr-uni-bochum.de/cgi-bin/laklogin", data=payload)
text = r.text
except Exception:
print("An error occured while trying to login.\n")
if "Authentisierung gelungen" in text:
print("O.K.")
return 1
else:
print("Fail.")
return 0
def logout():
dbgmsg("Trying to log out...")
ip = get_ip_address()
if ip == 0:
print("Network is unreachable.\n")
return 0
payload = {'code': '1', 'loginid': '', 'password': '', 'ipaddr': ip, 'action': 'Logout'}
text = ""
try:
dbgmsg("Trying to log out via HTTP-POST...")
r = requests.post("https://login.rz.ruhr-uni-bochum.de/cgi-bin/laklogin", data=payload)
text = r.text
except Exception:
print("An error occured while trying to logout.\n")
if "erfolgreich" in text:
print("O.K., successfully logged out.")
return 1
else:
print("Fail.")
return 0
def login(watchdog, interval):
global loginid, pw
loginid = input("Login ID: ")
pw = getpass.getpass()
if establish_connection(loginid, pw) == 0:
exit()
if watchdog:
wd_enabled = 1
try:
pid = os.fork()
except OSError:
sys.exit(1)
if pid > 0:
sys.exit(0)
print("Watchdog-PID: " + str(os.getpid()) + "\n")
while wd_enabled:
time.sleep(interval)
if(ping("8.8.8.8")):
#print("O.K.")
continue
else:
print("\nRUBLOGIN-Watchdog: Connection lost. Trying to re-establish...")
establish_connection(loginid, pw)
def dbgmsg(msg):
if verb: print("Dbg: " + msg)
return
def print_help():
print("Usage:\n -l / --logout : Terminates the current HIRN-Port-Session\n -w / --watchdog [interval]: Enables the watchdog. Checks the status of the connection every [interval] seconds and reconnects, if neccessary\n -v / --verbose : Prints debug messages throughout the login/logout process\n -h / --help : Displays this help")
def main(argv):
global verb
logout_v = 0
watchdog = 0
interval = 0
try:
opts = getopt.getopt(argv, "hvlw:", ["help","verbose","logout","watchdog="])
except Exception:
verb = 0
print("Invalid arguments. Use the -h parameter if you are lost.")
sys.exit()
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help()
sys.exit()
elif opt in("-v", "--verbose"):
verb = 1
print("Debugging messages enabled...")
elif opt in ("-l", "--logout"):
logout_v = 1
elif opt in ("-w", "--watchdog"):
watchdog = 1
try:
interval = int(arg)
except ValueError:
print("Only integers, please. Aborting.")
sys.exit()
if interval < 5:
print("Interval must be 5 seconds or longer. Aborting.")
sys.exit()
print("Watchdog enabled.")
if logout_v:
logout()
else:
login(watchdog, interval)
main(sys.argv[1:])
| gpl-2.0 | 2,143,439,340,035,754,200 | 26.676056 | 330 | 0.647583 | false |
maheshp/novatest | nova/api/openstack/compute/contrib/security_groups.py | 1 | 24133 | # Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
import json
import webob
from webob import exc
from xml.dom import minidom
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import api as compute_api
from nova import db
from nova import exception
from nova.network.security_group import openstack_driver
from nova.network.security_group import quantum_driver
from nova.openstack.common import log as logging
from nova.virt import netutils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def make_rule(elem):
elem.set('id')
elem.set('parent_group_id')
proto = xmlutil.SubTemplateElement(elem, 'ip_protocol')
proto.text = 'ip_protocol'
from_port = xmlutil.SubTemplateElement(elem, 'from_port')
from_port.text = 'from_port'
to_port = xmlutil.SubTemplateElement(elem, 'to_port')
to_port.text = 'to_port'
group = xmlutil.SubTemplateElement(elem, 'group', selector='group')
name = xmlutil.SubTemplateElement(group, 'name')
name.text = 'name'
tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id')
tenant_id.text = 'tenant_id'
ip_range = xmlutil.SubTemplateElement(elem, 'ip_range',
selector='ip_range')
cidr = xmlutil.SubTemplateElement(ip_range, 'cidr')
cidr.text = 'cidr'
def make_sg(elem):
elem.set('id')
elem.set('tenant_id')
elem.set('name')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
rules = xmlutil.SubTemplateElement(elem, 'rules')
rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules')
make_rule(rule)
sg_nsmap = {None: wsgi.XMLNS_V11}
class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group_rule',
selector='security_group_rule')
make_rule(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group',
selector='security_group')
make_sg(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_groups')
elem = xmlutil.SubTemplateElement(root, 'security_group',
selector='security_groups')
make_sg(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = xmlutil.safe_minidom_parse_string(string)
security_group = {}
sg_node = self.find_first_child_named(dom,
'security_group')
if sg_node is not None:
if sg_node.hasAttribute('name'):
security_group['name'] = sg_node.getAttribute('name')
desc_node = self.find_first_child_named(sg_node,
"description")
if desc_node:
security_group['description'] = self.extract_text(desc_node)
return {'body': {'security_group': security_group}}
class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = xmlutil.safe_minidom_parse_string(string)
security_group_rule = self._extract_security_group_rule(dom)
return {'body': {'security_group_rule': security_group_rule}}
def _extract_security_group_rule(self, node):
"""Marshal the security group rule attribute of a parsed request."""
sg_rule = {}
sg_rule_node = self.find_first_child_named(node,
'security_group_rule')
if sg_rule_node is not None:
ip_protocol_node = self.find_first_child_named(sg_rule_node,
"ip_protocol")
if ip_protocol_node is not None:
sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node)
from_port_node = self.find_first_child_named(sg_rule_node,
"from_port")
if from_port_node is not None:
sg_rule['from_port'] = self.extract_text(from_port_node)
to_port_node = self.find_first_child_named(sg_rule_node, "to_port")
if to_port_node is not None:
sg_rule['to_port'] = self.extract_text(to_port_node)
parent_group_id_node = self.find_first_child_named(sg_rule_node,
"parent_group_id")
if parent_group_id_node is not None:
sg_rule['parent_group_id'] = self.extract_text(
parent_group_id_node)
group_id_node = self.find_first_child_named(sg_rule_node,
"group_id")
if group_id_node is not None:
sg_rule['group_id'] = self.extract_text(group_id_node)
cidr_node = self.find_first_child_named(sg_rule_node, "cidr")
if cidr_node is not None:
sg_rule['cidr'] = self.extract_text(cidr_node)
return sg_rule
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule):
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
source_group = self.security_group_api.get(context,
id=rule['group_id'])
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
security_group['rules'] += [self._format_security_group_rule(
context, rule)]
return security_group
def _authorize_context(self, req):
context = req.environ['nova.context']
authorize(context)
return context
def _from_body(self, body, key):
if not body:
raise exc.HTTPUnprocessableEntity()
value = body.get(key, None)
if value is None:
raise exc.HTTPUnprocessableEntity()
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
@wsgi.serializers(xml=SecurityGroupTemplate)
def show(self, req, id):
"""Return data about the given security group."""
context = self._authorize_context(req)
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
"""Delete a security group."""
context = self._authorize_context(req)
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req):
"""Returns a list of security groups."""
context = self._authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
raw_groups = self.security_group_api.list(context,
project=context.project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@wsgi.serializers(xml=SecurityGroupTemplate)
@wsgi.deserializers(xml=SecurityGroupXMLDeserializer)
def create(self, req, body):
"""Creates a new security group."""
context = self._authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupRuleTemplate)
@wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer)
def create(self, req, body):
context = self._authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id', None))
security_group = self.security_group_api.get(context, None,
parent_group_id, map_exception=True)
try:
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except Exception as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net != '0.0.0.0' and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
return {"security_group_rule": self._format_security_group_rule(
context,
security_group_rule)}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = self._authorize_context(req)
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None, group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = self._authorize_context(req)
self.security_group_api.ensure_default(context)
try:
instance = self.compute_api.get(context, server_id)
except exception.InstanceNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
groups = db.security_group_get_by_instance(context, instance['id'])
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
try:
instance = self.compute_api.get(context, id)
method(context, instance, group_name)
except exception.SecurityGroupNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
except exception.InstanceNotFound as exp:
raise exc.HTTPNotFound(explanation=unicode(exp))
except exception.Invalid as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
key = "security_groups"
if not openstack_driver.is_quantum_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# quantum security groups the requested security groups for the
# instance are not in the db and have not been sent to quantum yet.
instance_sgs = []
if req.method != 'POST':
for server in servers:
instance_sgs = (
self.security_group_api.get_instance_security_groups(
req, server['id']))
else:
try:
# try converting to json
req_obj = json.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
instance_sgs = req_obj['server'].get(
key, [{'name': 'default'}])
except ValueError:
root = minidom.parseString(req.body)
sg_root = root.getElementsByTagName(key)
if sg_root:
security_groups = sg_root[0].getElementsByTagName(
'security_group')
for security_group in security_groups:
instance_sgs.append(
{'name': security_group.getAttribute('name')})
if not instance_sgs:
instance_sgs = [{'name': 'default'}]
if instance_sgs:
for server in servers:
server[key] = instance_sgs
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
resp_obj.attach(xml=SecurityGroupServerTemplate())
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
resp_obj.attach(xml=SecurityGroupServersTemplate())
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroupsTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return "security_groups" in datum
def make_server(elem):
secgrps = SecurityGroupsTemplateElement('security_groups')
elem.append(secgrps)
secgrp = xmlutil.SubTemplateElement(secgrps, 'security_group',
selector="security_groups")
secgrp.set('name')
class SecurityGroupServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1)
class SecurityGroupServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1)
class Security_groups(extensions.ExtensionDescriptor):
"""Security group support."""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2011-07-21T00:00:00+00:00"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
class NativeSecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_group_already_exists(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_invalid_group(msg):
raise exc.HTTPBadRequest(explanation=msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exc.HTTPNotFound(explanation=msg)
class NativeNovaSecurityGroupAPI(NativeSecurityGroupExceptions,
compute_api.SecurityGroupAPI):
pass
class NativeQuantumSecurityGroupAPI(NativeSecurityGroupExceptions,
quantum_driver.SecurityGroupAPI):
pass
| apache-2.0 | -1,266,732,917,822,452,700 | 37.245642 | 79 | 0.590395 | false |
Knio/miru | examples/cube00.py | 1 | 1080 | """
Demo loading a Cube and assigning a texture.
"""
from pyglet.graphics import Batch
from pyglet import clock
from pyglet.image import load as load_image
from pyglet import app
from miru.core import Object
from miru.context import context
from miru.ext.geom import Cube, get_vlist
from miru.ui import Window
from miru.graphics import TextureBindGroup
from miru.camera import LightGroup, DirectionalLight
from miru.input import SimpleMouseControl
window = Window(800, 450)
context.window = window
context.osd.add_object(clock.ClockDisplay())
context.control = SimpleMouseControl()
context.camera.lights = LightGroup([
DirectionalLight([1.0, 0.0, 1.0])])
tex = load_image('docs/demo/test.png').get_texture()
batch = Batch()
texture_group = TextureBindGroup(tex)
cube_geom = Cube()
get_vlist(cube_geom, batch, texture_group)
cube = Object(batch)
cube.angle.z = 15
context.add_object(cube)
v = 35
def update(dt):
dy = dt * v
cube.angle.x += dy
clock.schedule_interval(update, 1/60.)
@window.event
def on_draw():
window.clear()
context.render()
app.run()
| mit | 1,801,096,251,414,991,400 | 21.978723 | 52 | 0.746296 | false |
kateBy/df_rus_linux | rebuild_cache.py | 1 | 1058 | #!/usr/bin/python3.4
if __name__ != '__main__':
exit()
from extract_strings import *
import find_xref
from time import time
print("Ищем строки в исходном файле")
words = extract_strings('Dwarf_Fortress')
all_data = open("Dwarf_Fortress", 'rb').read()
#Предел поиска строк
MAX_TO_FIND = len(all_data) #FIXME длина не соответствует концу секции
print("Загружаются строки перевода")
trans = load_trans_po('trans.po')
print("Поиск строк-близнецов")
start = time()
gemini = find_gemini(words, trans)
chk = check_founded_gemini(gemini, all_data)
print("Поиск занял", time() - start, "c")
words.update(chk)
print("Поиск перекрестных ссылок")
#Ищем указатели на используемые строки, в несколько потоков
start = time()
xref = find_xref.find(words, MAX_TO_FIND, all_data, load_from_cache=False)
print("Поиск занял", time() - start, "c")
| mit | 5,157,827,789,537,581,000 | 19.214286 | 74 | 0.705536 | false |
geobricks/magic_web | magic_web/__init__.py | 1 | 1186 | from flask import Flask
from flask.ext.cors import CORS
from importlib import import_module
from pgeo.error.custom_exceptions import PGeoException
from flask import jsonify
from flask import render_template
from config.settings import modules
# Initialize the Flask app
app = Flask(__name__)
# Initialize CORS filters
cors = CORS(app, resources={r'/*': {'origins': '*'}})
# Custom error handling
@app.errorhandler(PGeoException)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# Custom 404 page
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
# Root REST
@app.route('/')
def root():
return 'Welcome to MagicWeb!'
# Dynamic import of modules specified in config.settings.py
for module in modules:
# Load module
mod = import_module(module['module_name'])
# Load Blueprint
rest = getattr(mod, module['rest_name'])
# Register Blueprint
app.register_blueprint(rest, url_prefix=module['url_prefix'])
# Start Flask server
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5050, debug=True, threaded=True) | gpl-2.0 | -5,573,446,203,206,300,000 | 21.396226 | 67 | 0.710793 | false |
weka511/bioinformatics | dij.py | 1 | 2107 | # Copyright (C) 2019 Greenweaves Software Limited
#
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>
# DIJ Dijkstra's Algorithm: compute single-source shortest distances
# in a directed graph with positive edge weights.
import argparse
import os
import time
from helpers import read_strings
from graphs import dij
from helpers import create_list
if __name__=='__main__':
start = time.time()
parser = argparse.ArgumentParser("DIJ Dijkstra's Algorithm")
parser.add_argument('--sample', default=False, action='store_true', help='process sample dataset')
parser.add_argument('--rosalind', default=False, action='store_true', help='process Rosalind dataset')
args = parser.parse_args()
if args.sample:
print (dij([[6 ,10],
[1, 2, 4],
[1, 3, 2],
[2, 3, 3],
[6, 3, 2],
[3, 5, 5],
[5, 4, 1],
[3, 2, 1],
[2, 4, 2],
[2, 5, 3]]))
if args.rosalind:
with open(f'{os.path.basename(__file__).split(".")[0]}.txt','w') as f:
Solution = ' '.join([str(i) for i in dij(create_list(path='./data'))])
print (Solution)
f.writelines(f'{Solution}\n')
elapsed = time.time()-start
minutes = int(elapsed/60)
seconds = elapsed-60*minutes
print (f'Elapsed Time {minutes} m {seconds:.2f} s')
| gpl-3.0 | -8,501,222,803,260,093,000 | 38.037037 | 106 | 0.588514 | false |
ucb-sts/sts | tests/unit/log_parsing/superlog_parser_test.py | 2 | 1898 | #!/usr/bin/env python
#
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
sys.path.append(os.path.dirname(__file__) + "/../../..")
import sts.input_traces.log_parser as log_parser
from sts.replay_event import LinkFailure, LinkRecovery
class superlog_parser_test(unittest.TestCase):
tmpfile = '/tmp/superlog.tmp'
def open_simple_superlog(self):
''' Returns the file. Make sure to close afterwards! '''
superlog = open(self.tmpfile, 'w')
e1 = str('''{"dependent_labels": ["e2"], "start_dpid": 1, "class": "LinkFailure",'''
''' "start_port_no": 1, "end_dpid": 2, "end_port_no": 1, "label": "e1", "time": [0,0], "round": 0}''')
superlog.write(e1 + '\n')
e2 = str('''{"dependent_labels": [], "start_dpid": 1, "class": "LinkRecovery",'''
''' "start_port_no": 1, "end_dpid": 2, "end_port_no": 1, "label": "e2", "time": [0,0], "round": 0}''')
superlog.write(e2 + '\n')
superlog.close()
def test_basic(self):
name = None
try:
self.open_simple_superlog()
events = log_parser.parse_path(self.tmpfile)
self.assertEqual(2, len(events))
self.assertEqual(LinkFailure,type(events[0]))
self.assertEqual(LinkRecovery,type(events[1]))
finally:
if name is not None:
os.unlink(name)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 7,488,244,927,765,582,000 | 34.811321 | 115 | 0.649104 | false |
jathanism/trigger | trigger/utils/importlib.py | 13 | 2346 | # -*- coding: utf-8 -*-
"""
Utils to import modules.
Taken verbatim from ``django.utils.importlib`` in Django 1.4.
"""
import os
import sys
# Exports
__all__ = ('import_module', 'import_module_from_path')
# Functions
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""
Import a module and return the module object.
The ``package`` argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
def import_module_from_path(full_path, global_name):
"""
Import a module from a file path and return the module object.
Allows one to import from anywhere, something ``__import__()`` does not do.
The module is added to ``sys.modules`` as ``global_name``.
:param full_path:
The absolute path to the module .py file
:param global_name:
The name assigned to the module in sys.modules. To avoid
confusion, the global_name should be the same as the variable to which
you're assigning the returned module.
"""
path, filename = os.path.split(full_path)
module, ext = os.path.splitext(filename)
sys.path.append(path)
try:
mymodule = __import__(module)
sys.modules[global_name] = mymodule
except ImportError:
raise ImportError('Module could not be imported from %s.' % full_path)
finally:
del sys.path[-1]
return mymodule
| bsd-3-clause | 4,422,137,457,110,509,000 | 28.696203 | 79 | 0.620205 | false |
tobegit3hub/keystone_docker | keystone/tests/unit/test_token_bind.py | 14 | 7942 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from keystone.common import wsgi
from keystone import exception
from keystone.models import token_model
from keystone.tests import unit
from keystone.tests.unit import test_token_provider
KERBEROS_BIND = 'USER@REALM'
ANY = 'any'
class BindTest(unit.TestCase):
"""Test binding tokens to a Principal.
Even though everything in this file references kerberos the same concepts
will apply to all future binding mechanisms.
"""
def setUp(self):
super(BindTest, self).setUp()
self.TOKEN_BIND_KERB = copy.deepcopy(
test_token_provider.SAMPLE_V3_TOKEN)
self.TOKEN_BIND_KERB['token']['bind'] = {'kerberos': KERBEROS_BIND}
self.TOKEN_BIND_UNKNOWN = copy.deepcopy(
test_token_provider.SAMPLE_V3_TOKEN)
self.TOKEN_BIND_UNKNOWN['token']['bind'] = {'FOO': 'BAR'}
self.TOKEN_BIND_NONE = copy.deepcopy(
test_token_provider.SAMPLE_V3_TOKEN)
self.ALL_TOKENS = [self.TOKEN_BIND_KERB, self.TOKEN_BIND_UNKNOWN,
self.TOKEN_BIND_NONE]
def assert_kerberos_bind(self, tokens, bind_level,
use_kerberos=True, success=True):
if not isinstance(tokens, dict):
for token in tokens:
self.assert_kerberos_bind(token, bind_level,
use_kerberos=use_kerberos,
success=success)
elif use_kerberos == ANY:
for val in (True, False):
self.assert_kerberos_bind(tokens, bind_level,
use_kerberos=val, success=success)
else:
context = {'environment': {}}
self.config_fixture.config(group='token',
enforce_token_bind=bind_level)
if use_kerberos:
context['environment']['REMOTE_USER'] = KERBEROS_BIND
context['environment']['AUTH_TYPE'] = 'Negotiate'
# NOTE(morganfainberg): This assumes a V3 token.
token_ref = token_model.KeystoneToken(
token_id=uuid.uuid4().hex,
token_data=tokens)
if not success:
self.assertRaises(exception.Unauthorized,
wsgi.validate_token_bind,
context, token_ref)
else:
wsgi.validate_token_bind(context, token_ref)
# DISABLED
def test_bind_disabled_with_kerb_user(self):
self.assert_kerberos_bind(self.ALL_TOKENS,
bind_level='disabled',
use_kerberos=ANY,
success=True)
# PERMISSIVE
def test_bind_permissive_with_kerb_user(self):
self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
bind_level='permissive',
use_kerberos=True,
success=True)
def test_bind_permissive_with_regular_token(self):
self.assert_kerberos_bind(self.TOKEN_BIND_NONE,
bind_level='permissive',
use_kerberos=ANY,
success=True)
def test_bind_permissive_without_kerb_user(self):
self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
bind_level='permissive',
use_kerberos=False,
success=False)
def test_bind_permissive_with_unknown_bind(self):
self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN,
bind_level='permissive',
use_kerberos=ANY,
success=True)
# STRICT
def test_bind_strict_with_regular_token(self):
self.assert_kerberos_bind(self.TOKEN_BIND_NONE,
bind_level='strict',
use_kerberos=ANY,
success=True)
def test_bind_strict_with_kerb_user(self):
self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
bind_level='strict',
use_kerberos=True,
success=True)
def test_bind_strict_without_kerb_user(self):
self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
bind_level='strict',
use_kerberos=False,
success=False)
def test_bind_strict_with_unknown_bind(self):
self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN,
bind_level='strict',
use_kerberos=ANY,
success=False)
# REQUIRED
def test_bind_required_with_regular_token(self):
self.assert_kerberos_bind(self.TOKEN_BIND_NONE,
bind_level='required',
use_kerberos=ANY,
success=False)
def test_bind_required_with_kerb_user(self):
self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
bind_level='required',
use_kerberos=True,
success=True)
def test_bind_required_without_kerb_user(self):
self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
bind_level='required',
use_kerberos=False,
success=False)
def test_bind_required_with_unknown_bind(self):
self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN,
bind_level='required',
use_kerberos=ANY,
success=False)
# NAMED
def test_bind_named_with_regular_token(self):
self.assert_kerberos_bind(self.TOKEN_BIND_NONE,
bind_level='kerberos',
use_kerberos=ANY,
success=False)
def test_bind_named_with_kerb_user(self):
self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
bind_level='kerberos',
use_kerberos=True,
success=True)
def test_bind_named_without_kerb_user(self):
self.assert_kerberos_bind(self.TOKEN_BIND_KERB,
bind_level='kerberos',
use_kerberos=False,
success=False)
def test_bind_named_with_unknown_bind(self):
self.assert_kerberos_bind(self.TOKEN_BIND_UNKNOWN,
bind_level='kerberos',
use_kerberos=ANY,
success=False)
def test_bind_named_with_unknown_scheme(self):
self.assert_kerberos_bind(self.ALL_TOKENS,
bind_level='unknown',
use_kerberos=ANY,
success=False)
| apache-2.0 | 4,816,819,909,145,048,000 | 39.111111 | 77 | 0.506799 | false |
rjeschmi/easybuild-easyblocks | easybuild/easyblocks/generic/cmakemake.py | 3 | 4277 | ##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for software that is configured with CMake, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
import os
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.environment import setvar
from easybuild.tools.filetools import run_cmd
from easybuild.tools.modules import ROOT_ENV_VAR_NAME_PREFIX
class CMakeMake(ConfigureMake):
"""Support for configuring build with CMake instead of traditional configure script"""
@staticmethod
def extra_options(extra_vars=None):
"""Define extra easyconfig parameters specific to CMakeMake."""
extra_vars = dict(ConfigureMake.extra_options(extra_vars))
extra_vars.update({
'srcdir': [None, "Source directory location to provide to cmake command", CUSTOM],
'separate_build_dir': [False, "Perform build in a separate directory", CUSTOM],
})
return ConfigureMake.extra_options(extra_vars)
def configure_step(self, srcdir=None, builddir=None):
"""Configure build using cmake"""
# Set the search paths for CMake
include_paths = os.pathsep.join(self.toolchain.get_variable("CPPFLAGS", list))
library_paths = os.pathsep.join(self.toolchain.get_variable("LDFLAGS", list))
setvar("CMAKE_INCLUDE_PATH", include_paths)
setvar("CMAKE_LIBRARY_PATH", library_paths)
default_srcdir = '.'
if self.cfg.get('separate_build_dir', False):
objdir = 'easybuild_obj'
try:
os.mkdir(objdir)
os.chdir(objdir)
except OSError, err:
self.log.error("Failed to create separate build dir %s in %s: %s" % (objdir, os.getcwd(), err))
default_srcdir = '..'
if srcdir is None:
if self.cfg.get('srcdir', None) is not None:
srcdir = self.cfg['srcdir']
elif builddir is not None:
self.log.deprecated("CMakeMake.configure_step: named argument 'builddir' (should be 'srcdir')", "2.0")
srcdir = builddir
else:
srcdir = default_srcdir
options = ['-DCMAKE_INSTALL_PREFIX=%s' % self.installdir]
env_to_options = {
'CC': 'CMAKE_C_COMPILER',
'CFLAGS': 'CMAKE_C_FLAGS',
'CXX': 'CMAKE_CXX_COMPILER',
'CXXFLAGS': 'CMAKE_CXX_FLAGS',
'F90': 'CMAKE_Fortran_COMPILER',
'FFLAGS': 'CMAKE_Fortran_FLAGS',
}
for env_name, option in env_to_options.items():
value = os.getenv(env_name)
if value is not None:
options.append("-D%s='%s'" % (option, value))
options_string = " ".join(options)
command = "%s cmake %s %s %s" % (self.cfg['preconfigopts'], srcdir, options_string, self.cfg['configopts'])
(out, _) = run_cmd(command, log_all=True, simple=False)
return out
| gpl-2.0 | 7,201,343,697,509,725,000 | 40.125 | 118 | 0.656301 | false |
gfyoung/pandas | pandas/tests/series/methods/test_argsort.py | 3 | 2248 | import numpy as np
import pytest
from pandas import Series, Timestamp, isna
import pandas._testing as tm
class TestSeriesArgsort:
def _check_accum_op(self, name, ser, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(ser).values, func(np.array(ser)), check_dtype=check_dtype
)
# with missing values
ts = ser.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
def test_argsort(self, datetime_series):
self._check_accum_op("argsort", datetime_series, check_dtype=False)
argsorted = datetime_series.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
# GH#2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp(f"201301{i:02d}") for i in range(1, 6)])
assert s.dtype == "datetime64[ns]"
shifted = s.shift(-1)
assert shifted.dtype == "datetime64[ns]"
assert isna(shifted[4])
result = s.argsort()
expected = Series(range(5), dtype="int64")
tm.assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(list(range(4)) + [-1], dtype="int64")
tm.assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind="mergesort")
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind="mergesort")
qexpected = np.argsort(s.values, kind="quicksort")
tm.assert_series_equal(mindexer.astype(np.intp), Series(mexpected))
tm.assert_series_equal(qindexer.astype(np.intp), Series(qexpected))
msg = (
r"ndarray Expected type <class 'numpy\.ndarray'>, "
r"found <class 'pandas\.core\.series\.Series'> instead"
)
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
def test_argsort_preserve_name(self, datetime_series):
result = datetime_series.argsort()
assert result.name == datetime_series.name
| bsd-3-clause | 2,934,937,973,536,457,000 | 34.68254 | 79 | 0.621886 | false |
ogoodman/icegrid-starter | doc/source/generate_sources.py | 1 | 2454 | """Provides ``generateSources`` which generates a simple tree of contents pages."""
import os
INDEX_RST = """\
.. %s documentation master file
Welcome to %s's documentation
=================================
Contents:
.. toctree::
:maxdepth: 2
%s
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
"""
PACKAGE_RST = """\
.. %s documentation
%s
Contents:
.. toctree::
:maxdepth: 2
%s"""
MODULE_RST = """\
.. %s documentation
%s
.. automodule:: %s
:members:
:show-inheritance:
:inherited-members:
"""
def generateSources(project, contents):
"""Generates ``index.rst`` and a collection of contents pages.
The input is a dictionary whose keys are the packages and
modules to be documentend and whose values are the titles they
should have in the generated contents pages.
For each package or module ``a.b`` a corresponding file ``a.b.rst``
will be generated in the current directory.
:param project: project name
:param contents: dictionary of module titles
"""
mods = {}
for mod in contents:
if mod not in mods:
mods[mod] = set()
while '.' in mod:
cmod = mod
mod = mod.rsplit('.', 1)[0]
if '.' not in mod:
mod = 'index'
if mod not in mods:
mods[mod] = set()
mods[mod].add(cmod)
files = set()
for mod, cmods in mods.iteritems():
if cmods:
cmod_lines = ''.join([' %s\n' % cmod for cmod in sorted(cmods)])
if mod != 'index':
title = mod + ' - ' + contents[mod]
title_lines = '%s\n%s' % (title, '=' * len(title))
if mod == 'index':
out = INDEX_RST % (project, project, cmod_lines)
elif cmods:
out = PACKAGE_RST % (project, title_lines, cmod_lines)
else:
out = MODULE_RST % (project, title_lines, mod)
outfile = mod + '.rst'
files.add(outfile)
if os.path.exists(outfile):
original = open(outfile).read()
else:
original = ''
if original != out:
with open(outfile, 'w') as fh:
fh.write(out)
print 'Added' if original == '' else 'Updated', outfile
for file in os.listdir('.'):
if file.endswith('.rst') and file not in files:
os.unlink(file)
print 'Removed', file
| mit | 609,997,641,139,304,600 | 22.825243 | 83 | 0.53749 | false |
soybean217/lora-python | GServer/frequency_plan/__init__.py | 1 | 1534 | from abc import ABC, abstractmethod, abstractproperty, abstractclassmethod, abstractstaticmethod
from enum import Enum
class FrequencyPlan(Enum):
EU863_870 = 'EU863_870'
EU433 = 'EU433'
CN470_510 = 'CN470_510'
MT433 = 'MT433'
CP500 = 'CP500'
CNICG470 = 'CNICG470'
@staticmethod
def assert_isinstanceof(value):
assert isinstance(value, FrequencyPlan), '%r is not a valid Frequency' % value
class Frequency(ABC):
@abstractclassmethod
def rx1_freq(cls, freq_up):
pass
@abstractclassmethod
def rx1_datr(cls, dr_up, dr_offset):
pass
@abstractproperty
class DataRate(Enum):
pass
@abstractproperty
class Channel:
pass
@abstractproperty
class TXPower(Enum):
pass
@abstractstaticmethod
def adr_schema(rssi, recent_datr):
pass
@staticmethod
def get_freq_plan(freq_plan):
return frequency_plan[freq_plan]
from .CN470_510 import CN470_510
from .EU863_870 import EU863_870
from .EU433 import EU433
from .MT433 import MT433
from .CP500 import CP500
from .CNICG470 import CNICG470
Frequency.register(CN470_510)
Frequency.register(EU863_870)
Frequency.register(EU433)
Frequency.register(MT433)
Frequency.register(CP500)
Frequency.register(CNICG470)
frequency_plan = {
FrequencyPlan.EU863_870: EU863_870,
FrequencyPlan.EU433: EU433,
FrequencyPlan.CN470_510: CN470_510,
FrequencyPlan.MT433: MT433,
FrequencyPlan.CP500: CP500,
FrequencyPlan.CNICG470: CNICG470,
} | mit | -7,143,824,432,248,667,000 | 21.246377 | 96 | 0.698827 | false |
Adman/pynameday | pynameday/austria.py | 1 | 7172 | # -*- coding: utf-8 -*-
from pynameday.core import NamedayMixin
class Austria(NamedayMixin):
"""Austrian namedays"""
NAMEDAYS = (
# january
('Neujahr', 'Basilius', 'Genoveva', 'Angela', 'Emilie',
'Heilige 3 Könige', 'Raimund v. P.', 'Severin, Erhard',
'Julian, Eberhard', 'Gregor X.', 'Taufe Jesu', 'Ernst',
'Jutta, Gottfried', 'Felix v. N.', 'Gabriel v. F.', 'Theobald',
'Antonius', 'Regina', 'Marius, Mario', 'Fabian, Sebastian', 'Agnes',
'Vinzenz, Walter', 'Heinrich S.', 'Franz v. S.', 'Pauli Bekehrung',
'Timotheus, Titus', 'Angela M.', 'Thomas v. A.', 'Valerius',
'Martina', 'Johannes Bosco'),
# february
('Brigitta v. K.', 'Mariä Lichtmess', 'Blasius', 'Andreas C.',
'Agatha', 'Dorothea', 'Richard K.', 'Hieronymus', 'Apollonia, Erich',
'Wilhelm d. Gr.', 'Theodor', 'Reginald', 'Gerlinde', 'Valentin',
'Siegfried', 'Philippa', 'Alexius', 'Konstantia', 'Bonifatius',
'Leo d. W.', 'Petrus Dam.', 'Petri Stuhlfeier', 'Romana', 'Mathias',
'Walpurga', 'Alexander', 'Leander, Gabriel', 'Roman', None),
# march
('Albin, Rüdiger', 'Karl, Agnes', 'Friedrich', 'Kasimir',
'Gerda, Diemar', 'Coletta, Fridolin', 'Reinhard', 'Johannes v. G.',
'Franziska v. R.', 'Emil, Gustav', 'Theresia R.', 'Maximilian',
'Gerald, Paulina', 'Mathilde', 'Klemens M. H.', 'Hilarius v. A.',
'Getrud', 'Eduard', 'Josev, Nährv. Jesu', 'Claudia, Irmgard',
'Alexandra', 'Lea, Elmar', 'Otto v. A.', 'Katharina v. Schw.',
'Verkündung des Herrn', 'Emmanuel', 'Frowin, Haimo', 'Johanna v. M.',
'Berthold v. K.', 'Amadeus v. S.', 'Cornelia'),
# april
('Hugo, Irene', 'Sandra', 'Richard', 'Isidor', 'Vinzenz Ferr.',
'Sixtus I. Cölestin', 'Johann Bapt. de la Salle', 'Walter, Beate',
'Waltraud', 'Engelbert v. A.', 'Stanislaus', 'Julius, Hertha',
'Martin I., Ida v B.', 'Tiburtius', 'Waltmann', 'Bernadette',
'Eberhard', 'Aja Apollonius', 'Leo IX., Gerold', 'Simon v. T.',
'Konrad v. P., Anselm', 'Wolfhelm', 'Georg, Gerhard', 'Wilfried',
'Markus, Erwin, Ev.', 'Trudpert', 'Petrus Can.', 'Ludwig v. M.',
'Roswitha', 'Hildegard'),
# may
(None, 'Athanasius, Boris', 'Philipp, Jakob', 'Florian', 'Gotthard',
'Valerian', 'Gisela', 'Ida, Désiré', 'Volkmar', 'Antonin', 'Gangolf',
'Pankratius', 'Servatius', 'Bonifatius', 'Rupert, Sophie', 'Johannes',
'Walter, Pascal', 'Erich', 'Cölestin V., Ivo', 'Elfriede',
'Hermann, Josef', 'Rita, Julia', 'Renate', 'Vinzenz, Dagmar',
'Gregor VII.', 'Philip N.', 'Augustin v. C.', 'Wilhelm', 'Maximin',
'Ferdinand', 'Petronilla, Aldo'),
# june
('Konrad, Silke', 'Armin, Eugen', 'Karl L., Silvia',
'Franz C., Klothilde', 'Bonifaz', 'Norbert', 'Robert',
'Medardus, Ilga', 'Ephräm, Gratia', 'Heinrich v. B., Diana',
'Barnabas, Alice', 'Leo III., Johann v. S. F.', 'Antonius v. P.',
'Burkhard, Gottschalk', 'Vitus, Veit', 'Benno v. M.',
'Rainer, Adolf v. M.', 'Markus, Marcellianus', 'Juliana v. F.',
'Adalbert, Florentina', 'Aloisius v. G.', 'Thomas M.', 'Edeltraud',
'Johannes der Täufer', 'Dorothea, Eleonore', 'Johann, Paul',
'Cyrill v. A., Harald', 'Irenäus, Diethild', 'Peter, Paul Judith',
'Otto, Ernst v. P.'),
# july
('Theobald', 'Mariä, Heimsuchung', 'Thomas, Raimund',
'Elisabeth v. P.', 'Anton M. Zacc.', 'Maria Goretti', 'Willibald',
'Eugen III., Edgar K.', 'Gottfried, Veronika', 'Knud, Engelbert',
'Benedikt v. N., Oliver', 'Nabor, Felix', 'Heinrich II.', 'Roland',
'Egon, Balduin', 'Maria v. B. K., Carmen', 'Alexius',
'Friedrich, Arnold', 'Justa, Bernulf', 'Margareta', 'Daniel',
'Maria Magdalena', 'Brigitta', 'Christophorus', 'Jakob, Thea, Ap.',
'Anna, Joachim', 'Rudolf A.', 'Samuel, Viktor', 'Martha, Lucilla',
'Ingeborg', 'Ignatius v. L.'),
# august
('Alfons v. L.', 'Eusebius v. V., Stefan', 'Lydia, Benno v. E.',
'Johannes M. V., Rainer', 'Oswald, Dominika', 'Christi Verklärung',
'Albert', 'Dominikus, Gustav', 'Edith', 'Laurentius, Astrid',
'Klara, Susanna', 'Hilaria', 'Gertrud v. A., Marco', 'Maximilian',
'Mariä Himmelfahrt', 'Stefan v. U., Theodor', 'Hyazinth',
'Helene, Claudia', 'Emilia B.', 'Bernhard v. Cl.', 'Pius X.',
'Regina, Siegfried A.', 'Rosa v. L., Philipp B.', 'Isolde, Michaela',
'Ludwig IX., Patricia', 'Margareta v. F.', 'Monika', 'Augustin',
'Sabine, Beatrix v. A.', 'Herbert v. K., Felix', 'Raimund N.'),
# september
('Verena, Ruth', 'René, Ingrid', 'Gregor d. Gr.', 'Rosalia, Ida',
'Laurentius, Albert', 'Magnus, Beata', 'Regina, Ralph',
'Mariä Geburt', 'Grogonius', 'Nikolaus v. T., Diethard', 'Helga',
'Eberhard, Guido', 'Tobias', 'Kreuz-Erhöhung', 'Dolores, Melitta',
'Ludmilla, Edith', 'Robert B., Lambert', 'Josef v. C.',
'Wilma, Arnulf', 'Fausta, Candida', 'Matthäus', 'Moritz',
'Helene D., Thekla', 'Rupert v. S.', 'Nikolaus v. Fl.', 'Eugenia',
'Vinzenz v. P.', 'Wenzel v. B., Dietmar', 'Michael, Gabriel',
'Hieronymus'),
# october
('Theresia v. K. J.', 'Schutzengelfest', 'Ewald, Udo',
'Franz v. Assisi, Edwin', 'Attila, Placidus', 'Bruno d. Karth.',
'Markus I.', 'Simeon', 'Dionysius', 'Viktor v. X.', 'Bruno v. K.',
'Maximilian, Horst', 'Eduard', 'Burkhard', 'Theresia v. A.',
'Hedwig', 'Ignatuis v. A., Rudolf', 'Lukas', 'Paul v. Kr., Frieda',
'Wendelin', 'Ursula', 'Kordula', 'Oda', 'Anton M. Cl.', 'Krispin',
'Nationalfeiertag', 'Wolfhard', 'Simon, Judas, Thadd.', 'Hermelindis',
'Alfons Rodr.', 'Wolfgang, Christoph'),
# november
('Allerheiligen', 'Allerseelen', 'Hubert, Silvia', 'Karl Borr.',
'Emmerich', 'Christina d. K.', 'Engelbert', 'Gottfried', 'Theodor',
'Leo d. Gr., Andreas Av.', 'Martin v. T.', 'Emil, Christian', 'Eugen',
'Alberich', 'Leopold von Österreich', 'Othmar, Edmund',
'Gertrud, Hilda', 'Odo, Roman', 'Elisabeth von Th.', 'Edmund K.',
'Gelasius I.', 'Cäcilia', 'Klemens I., Felicitas',
'Chrysogonus, Flora', 'Katharina v. A.', 'Konrad', 'Oda, Modestus',
'Gunther, Stephan d. J.', 'Friedrich v. R.', 'Andreas'),
# december
('Blanka, Natalie', 'Bibiana', 'Franz, Xaver', 'Barbara', 'Gerald',
'Nikolaus v. M.', 'Ambrosius', 'Mariä Empfängnis', 'Valerie',
'Diethard', 'David, Daniel', 'Johanna F. v. Ch.', 'Lucia, Ottilie',
'Franziska', 'Christiana, Nina', 'Adelheid', 'Lazarus, Jolanda',
'Gatian', 'Urban V.', 'Eugen v. A.', 'Ingomar', 'Jutta, Marian',
'Victoria', 'Heiliger Abend', 'Christtag', 'Stefanitag', 'Johannes',
'Unschuldige, Kinder', 'Thomas B., Tamara', 'Hermine', 'Silvester'),
)
| mit | -1,165,950,710,893,416,200 | 54.851563 | 79 | 0.549028 | false |
melund/python-prompt-toolkit | prompt_toolkit/renderer.py | 13 | 19226 | """
Renders the command line on the console.
(Redraws parts of the input line that were changed.)
"""
from __future__ import unicode_literals
from prompt_toolkit.filters import to_cli_filter
from prompt_toolkit.layout.mouse_handlers import MouseHandlers
from prompt_toolkit.layout.screen import Point, Screen, WritePosition
from prompt_toolkit.output import Output
from prompt_toolkit.styles import Style
from prompt_toolkit.token import Token
from prompt_toolkit.utils import is_windows
from six.moves import range
__all__ = (
'Renderer',
'print_tokens',
)
def _output_screen_diff(output, screen, current_pos, previous_screen=None, last_token=None,
is_done=False, attrs_for_token=None, size=None, previous_width=0): # XXX: drop is_done
"""
Render the diff between this screen and the previous screen.
This takes two `Screen` instances. The one that represents the output like
it was during the last rendering and one that represents the current
output raster. Looking at these two `Screen` instances, this function will
render the difference by calling the appropriate methods of the `Output`
object that only paint the changes to the terminal.
This is some performance-critical code which is heavily optimized.
Don't change things without profiling first.
:param current_pos: Current cursor position.
:param last_token: `Token` instance that represents the output attributes of
the last drawn character. (Color/attributes.)
:param attrs_for_token: :class:`._TokenToAttrsCache` instance.
:param width: The width of the terminal.
:param prevous_width: The width of the terminal during the last rendering.
"""
width, height = size.columns, size.rows
#: Remember the last printed character.
last_token = [last_token] # nonlocal
#: Variable for capturing the output.
write = output.write
write_raw = output.write_raw
# Create locals for the most used output methods.
# (Save expensive attribute lookups.)
_output_set_attributes = output.set_attributes
_output_reset_attributes = output.reset_attributes
_output_cursor_forward = output.cursor_forward
_output_cursor_up = output.cursor_up
_output_cursor_backward = output.cursor_backward
# Hide cursor before rendering. (Avoid flickering.)
output.hide_cursor()
def reset_attributes():
" Wrapper around Output.reset_attributes. "
_output_reset_attributes()
last_token[0] = None # Forget last char after resetting attributes.
def move_cursor(new):
" Move cursor to this `new` point. Returns the given Point. "
current_x, current_y = current_pos.x, current_pos.y
if new.y > current_y:
# Use newlines instead of CURSOR_DOWN, because this meight add new lines.
# CURSOR_DOWN will never create new lines at the bottom.
# Also reset attributes, otherwise the newline could draw a
# background color.
reset_attributes()
write('\r\n' * (new.y - current_y))
current_x = 0
_output_cursor_forward(new.x)
return new
elif new.y < current_y:
_output_cursor_up(current_y - new.y)
if current_x >= width - 1:
write('\r')
_output_cursor_forward(new.x)
elif new.x < current_x or current_x >= width - 1:
_output_cursor_backward(current_x - new.x)
elif new.x > current_x:
_output_cursor_forward(new.x - current_x)
return new
def output_char(char):
"""
Write the output of this character.
"""
# If the last printed character has the same token, it also has the
# same style, so we don't output it.
the_last_token = last_token[0]
if the_last_token and the_last_token == char.token:
write(char.char)
else:
_output_set_attributes(attrs_for_token[char.token])
write(char.char)
last_token[0] = char.token
# Disable autowrap
if not previous_screen:
output.disable_autowrap()
reset_attributes()
# When the previous screen has a different size, redraw everything anyway.
# Also when we are done. (We meight take up less rows, so clearing is important.)
if is_done or not previous_screen or previous_width != width: # XXX: also consider height??
current_pos = move_cursor(Point(0, 0))
reset_attributes()
output.erase_down()
previous_screen = Screen()
# Get height of the screen.
# (height changes as we loop over data_buffer, so remember the current value.)
# (Also make sure to clip the height to the size of the output.)
current_height = min(screen.height, height)
# Loop over the rows.
row_count = min(max(screen.height, previous_screen.height), height)
c = 0 # Column counter.
for y in range(row_count):
new_row = screen.data_buffer[y]
previous_row = previous_screen.data_buffer[y]
zero_width_escapes_row = screen.zero_width_escapes[y]
new_max_line_len = min(width - 1, max(new_row.keys()) if new_row else 0)
previous_max_line_len = min(width - 1, max(previous_row.keys()) if previous_row else 0)
# Loop over the columns.
c = 0
while c < new_max_line_len + 1:
new_char = new_row[c]
old_char = previous_row[c]
char_width = (new_char.width or 1)
# When the old and new character at this position are different,
# draw the output. (Because of the performance, we don't call
# `Char.__ne__`, but inline the same expression.)
if new_char.char != old_char.char or new_char.token != old_char.token:
current_pos = move_cursor(Point(y=y, x=c))
# Send injected escape sequences to output.
if c in zero_width_escapes_row:
write_raw(zero_width_escapes_row[c])
output_char(new_char)
current_pos = current_pos._replace(x=current_pos.x + char_width)
c += char_width
# If the new line is shorter, trim it.
if previous_screen and new_max_line_len < previous_max_line_len:
current_pos = move_cursor(Point(y=y, x=new_max_line_len+1))
reset_attributes()
output.erase_end_of_line()
# Correctly reserve vertical space as required by the layout.
# When this is a new screen (drawn for the first time), or for some reason
# higher than the previous one. Move the cursor once to the bottom of the
# output. That way, we're sure that the terminal scrolls up, even when the
# lower lines of the canvas just contain whitespace.
# The most obvious reason that we actually want this behaviour is the avoid
# the artifact of the input scrolling when the completion menu is shown.
# (If the scrolling is actually wanted, the layout can still be build in a
# way to behave that way by setting a dynamic height.)
if current_height > previous_screen.height:
current_pos = move_cursor(Point(y=current_height - 1, x=0))
# Move cursor:
if is_done:
current_pos = move_cursor(Point(y=current_height, x=0))
output.erase_down()
else:
current_pos = move_cursor(screen.cursor_position)
if is_done:
output.enable_autowrap()
# Always reset the color attributes. This is important because a background
# thread could print data to stdout and we want that to be displayed in the
# default colors. (Also, if a background color has been set, many terminals
# give weird artifacs on resize events.)
reset_attributes()
if screen.show_cursor or is_done:
output.show_cursor()
return current_pos, last_token[0]
class HeightIsUnknownError(Exception):
" Information unavailable. Did not yet receive the CPR response. "
class _TokenToAttrsCache(dict):
"""
A cache structure that maps Pygments Tokens to :class:`.Attr`.
(This is an important speed up.)
"""
def __init__(self, get_style_for_token):
self.get_style_for_token = get_style_for_token
def __missing__(self, token):
try:
result = self.get_style_for_token(token)
except KeyError:
result = None
self[token] = result
return result
class Renderer(object):
"""
Typical usage:
::
output = Vt100_Output.from_pty(sys.stdout)
r = Renderer(style, output)
r.render(cli, layout=...)
"""
def __init__(self, style, output, use_alternate_screen=False, mouse_support=False):
assert isinstance(style, Style)
assert isinstance(output, Output)
self.style = style
self.output = output
self.use_alternate_screen = use_alternate_screen
self.mouse_support = to_cli_filter(mouse_support)
self._in_alternate_screen = False
self._mouse_support_enabled = False
self._bracketed_paste_enabled = False
# Waiting for CPR flag. True when we send the request, but didn't got a
# response.
self.waiting_for_cpr = False
self.reset(_scroll=True)
def reset(self, _scroll=False, leave_alternate_screen=True):
# Reset position
self._cursor_pos = Point(x=0, y=0)
# Remember the last screen instance between renderers. This way,
# we can create a `diff` between two screens and only output the
# difference. It's also to remember the last height. (To show for
# instance a toolbar at the bottom position.)
self._last_screen = None
self._last_size = None
self._last_token = None
# When the style hash changes, we have to do a full redraw as well as
# clear the `_attrs_for_token` dictionary.
self._last_style_hash = None
self._attrs_for_token = None
# Default MouseHandlers. (Just empty.)
self.mouse_handlers = MouseHandlers()
# Remember the last title. Only set the title when it changes.
self._last_title = None
#: Space from the top of the layout, until the bottom of the terminal.
#: We don't know this until a `report_absolute_cursor_row` call.
self._min_available_height = 0
# In case of Windown, also make sure to scroll to the current cursor
# position. (Only when rendering the first time.)
if is_windows() and _scroll:
self.output.scroll_buffer_to_prompt()
# Quit alternate screen.
if self._in_alternate_screen and leave_alternate_screen:
self.output.quit_alternate_screen()
self._in_alternate_screen = False
# Disable mouse support.
if self._mouse_support_enabled:
self.output.disable_mouse_support()
self._mouse_support_enabled = False
# Disable bracketed paste.
if self._bracketed_paste_enabled:
self.output.disable_bracketed_paste()
self._bracketed_paste_enabled = False
# Flush output. `disable_mouse_support` needs to write to stdout.
self.output.flush()
@property
def height_is_known(self):
"""
True when the height from the cursor until the bottom of the terminal
is known. (It's often nicer to draw bottom toolbars only if the height
is known, in order to avoid flickering when the CPR response arrives.)
"""
return self.use_alternate_screen or self._min_available_height > 0 or \
is_windows() # On Windows, we don't have to wait for a CPR.
@property
def rows_above_layout(self):
"""
Return the number of rows visible in the terminal above the layout.
"""
if self._in_alternate_screen:
return 0
elif self._min_available_height > 0:
total_rows = self.output.get_size().rows
last_screen_height = self._last_screen.height if self._last_screen else 0
return total_rows - max(self._min_available_height, last_screen_height)
else:
raise HeightIsUnknownError('Rows above layout is unknown.')
def request_absolute_cursor_position(self):
"""
Get current cursor position.
For vt100: Do CPR request. (answer will arrive later.)
For win32: Do API call. (Answer comes immediately.)
"""
# Only do this request when the cursor is at the top row. (after a
# clear or reset). We will rely on that in `report_absolute_cursor_row`.
assert self._cursor_pos.y == 0
# For Win32, we have an API call to get the number of rows below the
# cursor.
if is_windows():
self._min_available_height = self.output.get_rows_below_cursor_position()
else:
if self.use_alternate_screen:
self._min_available_height = self.output.get_size().rows
else:
# Asks for a cursor position report (CPR).
self.waiting_for_cpr = True
self.output.ask_for_cpr()
def report_absolute_cursor_row(self, row):
"""
To be called when we know the absolute cursor position.
(As an answer of a "Cursor Position Request" response.)
"""
# Calculate the amount of rows from the cursor position until the
# bottom of the terminal.
total_rows = self.output.get_size().rows
rows_below_cursor = total_rows - row + 1
# Set the
self._min_available_height = rows_below_cursor
self.waiting_for_cpr = False
def render(self, cli, layout, is_done=False):
"""
Render the current interface to the output.
:param is_done: When True, put the cursor at the end of the interface. We
won't print any changes to this part.
"""
output = self.output
# Enter alternate screen.
if self.use_alternate_screen and not self._in_alternate_screen:
self._in_alternate_screen = True
output.enter_alternate_screen()
# Enable bracketed paste.
if not self._bracketed_paste_enabled:
self.output.enable_bracketed_paste()
self._bracketed_paste_enabled = True
# Enable/disable mouse support.
needs_mouse_support = self.mouse_support(cli)
if needs_mouse_support and not self._mouse_support_enabled:
output.enable_mouse_support()
self._mouse_support_enabled = True
elif not needs_mouse_support and self._mouse_support_enabled:
output.disable_mouse_support()
self._mouse_support_enabled = False
# Create screen and write layout to it.
size = output.get_size()
screen = Screen()
screen.show_cursor = False # Hide cursor by default, unless one of the
# containers decides to display it.
mouse_handlers = MouseHandlers()
if is_done:
height = 0 # When we are done, we don't necessary want to fill up until the bottom.
else:
height = self._last_screen.height if self._last_screen else 0
height = max(self._min_available_height, height)
# When te size changes, don't consider the previous screen.
if self._last_size != size:
self._last_screen = None
# When we render using another style, do a full repaint. (Forget about
# the previous rendered screen.)
# (But note that we still use _last_screen to calculate the height.)
if self.style.invalidation_hash() != self._last_style_hash:
self._last_screen = None
self._attrs_for_token = None
if self._attrs_for_token is None:
self._attrs_for_token = _TokenToAttrsCache(self.style.get_attrs_for_token)
self._last_style_hash = self.style.invalidation_hash()
layout.write_to_screen(cli, screen, mouse_handlers, WritePosition(
xpos=0,
ypos=0,
width=size.columns,
height=(size.rows if self.use_alternate_screen else height),
extended_height=size.rows,
))
# When grayed. Replace all tokens in the new screen.
if cli.is_aborting or cli.is_exiting:
screen.replace_all_tokens(Token.Aborted)
# Process diff and write to output.
self._cursor_pos, self._last_token = _output_screen_diff(
output, screen, self._cursor_pos,
self._last_screen, self._last_token, is_done,
attrs_for_token=self._attrs_for_token,
size=size,
previous_width=(self._last_size.columns if self._last_size else 0))
self._last_screen = screen
self._last_size = size
self.mouse_handlers = mouse_handlers
# Write title if it changed.
new_title = cli.terminal_title
if new_title != self._last_title:
if new_title is None:
self.output.clear_title()
else:
self.output.set_title(new_title)
self._last_title = new_title
output.flush()
def erase(self, leave_alternate_screen=True, erase_title=True):
"""
Hide all output and put the cursor back at the first line. This is for
instance used for running a system command (while hiding the CLI) and
later resuming the same CLI.)
:param leave_alternate_screen: When True, and when inside an alternate
screen buffer, quit the alternate screen.
:param erase_title: When True, clear the title from the title bar.
"""
output = self.output
output.cursor_backward(self._cursor_pos.x)
output.cursor_up(self._cursor_pos.y)
output.erase_down()
output.reset_attributes()
output.flush()
# Erase title.
if self._last_title and erase_title:
output.clear_title()
self.reset(leave_alternate_screen=leave_alternate_screen)
def clear(self):
"""
Clear screen and go to 0,0
"""
# Erase current output first.
self.erase()
# Send "Erase Screen" command and go to (0, 0).
output = self.output
output.erase_screen()
output.cursor_goto(0, 0)
output.flush()
self.request_absolute_cursor_position()
def print_tokens(output, tokens, style):
"""
Print a list of (Token, text) tuples in the given style to the output.
"""
assert isinstance(output, Output)
assert isinstance(style, Style)
# Reset first.
output.reset_attributes()
output.enable_autowrap()
# Print all (token, text) tuples.
attrs_for_token = _TokenToAttrsCache(style.get_attrs_for_token)
for token, text in tokens:
attrs = attrs_for_token[token]
if attrs:
output.set_attributes(attrs)
else:
output.reset_attributes()
output.write(text)
# Reset again.
output.reset_attributes()
output.flush()
| bsd-3-clause | -6,831,280,797,240,471,000 | 35.551331 | 111 | 0.621918 | false |
dneg/cortex | test/IECoreRI/PTCParticleReader.py | 12 | 3742 | ##########################################################################
#
# Copyright (c) 2007-2009, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os, os.path
import IECore
import IECoreRI
if hasattr( IECoreRI, "PTCParticleReader" ):
class PTCParticleReaderTester( unittest.TestCase ) :
testfile = os.path.join( os.path.dirname( os.path.abspath( __file__ ) ), "data/test.3Dbake" )
def testConstruction(self):
r = IECore.Reader.create(self.testfile)
self.assertEqual(type(r), IECoreRI.PTCParticleReader)
def testReadFromFactory(self):
r = IECore.Reader.create(self.testfile)
pointCloud = r.read()
self.assertEqual( type(pointCloud), IECore.PointsPrimitive )
def testRead(self):
r = IECoreRI.PTCParticleReader()
r['fileName'] = self.testfile
pointCloud = r.read()
self.assertEqual( type(pointCloud), IECore.PointsPrimitive )
self.assertEqual( pointCloud.numPoints, 2975 )
self.assertEqual( len( pointCloud["P"].data ), 2975 )
self.assertEqual( set( pointCloud.blindData()['PTCParticleIO'].keys() ), set( [ "boundingBox", "worldToEye", "worldToNdc", "variableTypes" ] ) )
r['percentage'] = 50
pointCloud2 = r.read()
self.assertEqual( len( pointCloud2["P"].data ), 1502 )
def testColorLoading( self ) :
r = IECoreRI.PTCParticleReader( "test/IECoreRI/data/cube.ptc" )
n = r.numParticles()
p = r.read()
self.assertEqual( p.variableSize( IECore.PrimitiveVariable.Interpolation.Vertex ), n )
c = p["Ci"].data
self.assertEqual( len( c ), n )
self.assert_( c.isInstanceOf( IECore.Color3fVectorData.staticTypeId() ) )
def testCanRead(self) :
self.assertEqual( IECoreRI.PTCParticleReader.canRead( "test/IECoreRI/data/test.3Dbake" ), True )
self.assertEqual( IECoreRI.PTCParticleReader.canRead( "test/IECoreRI/data/sphere.cob" ), False )
self.assertEqual( IECoreRI.PTCParticleReader.canRead( "test/IECoreRI/data" ), False )
self.assertEqual( IECoreRI.PTCParticleReader.canRead( "test/IECoreRI/thisFileDoesNotExist" ), False )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 5,119,297,769,649,362,000 | 41.044944 | 147 | 0.699893 | false |
NickRuiz/wikitrans-pootle | local_apps/wt_translation/forms.py | 1 | 1913 | from django import forms
from django.forms.formsets import formset_factory
from django.db.models import Q
from wt_translation.models import MachineTranslator, LanguagePair, ServerlandHost, TranslationRequest
from pootle_language.models import Language
#class MachineTranslatorSelectorForm(forms.Form):
# """
# A custom form used to select a machine translator to translate a language pair.
# """
# def __init__(self, source_language, target_language, *args, **kwargs):
# self.source_language = source_language
# self.target_language = target_language
#
# # Call the parent constructor
# super(MachineTranslatorSelectorForm, self).__init__(*args, **kwargs)
# self.fields['translators'].queryset = MachineTranslator.objects.filter(
# supported_languages__source_language = source_language,
# supported_languages__target_language = target_language
# )
#
# translators = forms.ModelMultipleChoiceField(_("Translators"))
class TranslationRequestForm(forms.ModelForm):
def __init__(self, translation_project=None, *args, **kwargs):
super(TranslationRequestForm, self).__init__(*args, **kwargs)
if translation_project != None:
self.translation_project = translation_project
self.fields['translator'].queryset = MachineTranslator.objects.filter(supported_languages__in =
LanguagePair.objects.filter(
Q(source_language=translation_project.project.source_language),
Q(target_language=translation_project.language)
))
class Meta:
model = TranslationRequest
exclude = ('status', 'external_id', 'timestamp',) | gpl-2.0 | -5,019,332,645,198,896,000 | 46.85 | 134 | 0.612128 | false |
idkwim/pysmt | pysmt/shortcuts.py | 1 | 19187 | #
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides the most used functions in a nicely wrapped API.
This module defines a global environment, so that most methods can be
called without the need to specify an environment or a FormulaManager.
Functions trying to access the global environment should use the
method get_global_env(). Keep in mind that the global state of the
environment might lead to inconsistency and unexpected bugs. This is
particularly true for tests. For tests it is recommended to perform an
environment reset in the setUp phase, to be guaranteed that a fresh
environment is used.
"""
# Enable default deprecation warnings!
import warnings
warnings.simplefilter('default')
import pysmt.typing as types
import pysmt.configuration as config
import pysmt.environment
def get_env():
"""Returns the global environment."""
return pysmt.environment.get_env()
def reset_env():
"""Resets the global environment, and returns the new one."""
return pysmt.environment.reset_env()
##### Shortcuts for FormulaManager #####
def get_type(formula):
"""Returns the type of the formula."""
return get_env().stc.get_type(formula)
def simplify(formula):
"""Returns the simplified version of the formula."""
return get_env().simplifier.simplify(formula)
def substitute(formula, subs):
"""Applies the substitutions defined in the dictionary to the formula."""
return get_env().substituter.substitute(formula, subs)
def serialize(formula, threshold=None):
"""Provides a string representing the formula."""
return get_env().serializer.serialize(formula,
threshold=threshold)
def get_free_variables(formula):
"""Returns the simplified version of the formula."""
return get_env().fvo.get_free_variables(formula)
def get_atoms(formula):
"""Returns the set of atoms of the formula."""
return get_env().ao.get_atoms(formula)
def get_formula_size(formula, measure=None):
"""Returns the size of the formula as measured by the given counting type.
See pysmt.oracles.SizeOracle for details.
"""
return get_env().sizeo.get_size(formula, measure)
##### Nodes Creation #####
def ForAll(variables, formula):
r""".. math:: \forall v_1, \cdots, v_n . \varphi(v_1, \cdots, v_n)"""
return get_env().formula_manager.ForAll(variables, formula)
def Exists(variables, formula):
r""".. math:: \exists v_1, \cdots, v_n . \varphi(v_1, \cdots, v_n)"""
return get_env().formula_manager.Exists(variables, formula)
def Function(vname, params):
r""".. math:: vname(p_1, \cdots, p_n)"""
return get_env().formula_manager.Function(vname, params)
def Not(formula):
r""".. math:: \lnot \varphi"""
return get_env().formula_manager.Not(formula)
def Implies(left, right):
r""".. math:: l \rightarrow r"""
return get_env().formula_manager.Implies(left, right)
def Iff(left, right):
r""".. math:: l \leftrightarrow r """
return get_env().formula_manager.Iff(left, right)
def GE(left, right):
r""".. math:: l \ge r"""
return get_env().formula_manager.GE(left, right)
def Minus(left, right):
r""".. math:: l - r """
return get_env().formula_manager.Minus(left, right)
def Times(left, right):
r""".. math:: l * r"""
return get_env().formula_manager.Times(left, right)
def Div(left, right):
r""".. math:: \frac{l}{r}"""
return get_env().formula_manager.Div(left, right)
def Equals(left, right):
r""".. math:: l = r"""
return get_env().formula_manager.Equals(left, right)
def GT(left, right):
r""".. math:: l > r"""
return get_env().formula_manager.GT(left, right)
def LE(left, right):
r""".. math:: l \le r"""
return get_env().formula_manager.LE(left, right)
def LT(left, right):
r""".. math:: l < r"""
return get_env().formula_manager.LT(left, right)
def Ite(iff, left, right):
r""".. math:: \text{ If } i \text{ Then } l \text{ Else } r"""
return get_env().formula_manager.Ite(iff, left, right)
def Symbol(name, typename=types.BOOL):
"""Returns a symbol with the given name and type."""
return get_env().formula_manager.Symbol(name, typename)
def FreshSymbol(typename=types.BOOL, template=None):
"""Returns a symbol with a fresh name and given type."""
return get_env().formula_manager.FreshSymbol(typename, template)
def Int(value):
"""Returns an Integer constant with the given value."""
return get_env().formula_manager.Int(value)
def Bool(value):
"""Returns a Boolean constant with the given value."""
return get_env().formula_manager.Bool(value)
def Real(value):
"""Returns a Real constant with the given value."""
return get_env().formula_manager.Real(value)
def TRUE():
"""Returns the Boolean constant TRUE."""
return get_env().formula_manager.TRUE()
def FALSE():
"""Returns the Boolean constant FALSE."""
return get_env().formula_manager.FALSE()
def And(*args):
r""".. math:: \varphi_0 \land \cdots \land \varphi_n """
return get_env().formula_manager.And(*args)
def Or(*args):
r""".. math:: \varphi_0 \lor \cdots \lor \varphi_n """
return get_env().formula_manager.Or(*args)
def Plus(*args):
r""".. math:: \varphi_0 + \cdots + \varphi_n """
return get_env().formula_manager.Plus(*args)
def ToReal(formula):
"""Explicit cast of a term into a Real term."""
return get_env().formula_manager.ToReal(formula)
def AtMostOne(*args):
"""
Cardinality constraint over a set of boolean expressions.
At most one can be true at anytime.
"""
return get_env().formula_manager.AtMostOne(*args)
def ExactlyOne(*args):
"""Given a set of boolean expressions requires that exactly one holds."""
return get_env().formula_manager.ExactlyOne(*args)
def AllDifferent(*args):
"""Given a set of non-boolean expressions, requires that each of them
has value different from all the others
"""
return get_env().formula_manager.AllDifferent(*args)
def Xor(left, right):
"""Returns the XOR of left and right"""
return get_env().formula_manager.Xor(left, right)
def Min(*args):
"""
Minimum over a set of real or integer terms
"""
return get_env().formula_manager.Min(*args)
def Max(*args):
"""
Maximum over a set of real or integer terms
"""
return get_env().formula_manager.Max(*args)
# Bit Vectors
def BV(value, width=None):
"""Returns a constant of type BitVector.
value can be either:
- a string of 0s and 1s
- a string starting with "#b" followed by a sequence of 0s and 1s
- an integer number s.t. 0 <= value < 2**width
In order to create the BV representation of a signed integer,
the SBV() method shall be used.
"""
return get_env().formula_manager.BV(value, width)
def SBV(value, width=None):
"""Returns a constant of type BitVector interpreting the sign.
If the specified value is an integer, it is converted in the
2-complement representation of the given number, otherwise the
behavior is the same as BV().
"""
return get_env().formula_manager.SBV(value, width)
def BVOne(width=None):
"""Returns the unsigned one constant BitVector."""
return get_env().formula_manager.BVOne(width)
def BVZero(width=None):
"""Returns the zero constant BitVector."""
return get_env().formula_manager.BVZero(width)
def BVNot(formula):
"""Returns the bitvector Not(bv)"""
return get_env().formula_manager.BVNot(formula)
def BVAnd(left, right):
"""Returns the Bit-wise AND of two bitvectors of the same size."""
return get_env().formula_manager.BVAnd(left, right)
def BVOr(left, right):
"""Returns the Bit-wise OR of two bitvectors of the same size."""
return get_env().formula_manager.BVOr(left, right)
def BVXor(left, right):
"""Returns the Bit-wise XOR of two bitvectors of the same size."""
return get_env().formula_manager.BVXor(left, right)
def BVConcat(left, right):
"""Returns the Concatenation of the two BVs"""
return get_env().formula_manager.BVConcat(left, right)
def BVExtract(formula, start=0, end=None):
"""Returns the slice of formula from start to end (inclusive)."""
return get_env().formula_manager.BVExtract(formula, start=start, end=end)
def BVULT(left, right):
"""Returns the formula left < right."""
return get_env().formula_manager.BVULT(left, right)
def BVUGT(left, right):
"""Returns the formula left > right."""
return get_env().formula_manager.BVUGT(left, right)
def BVULE(left, right):
"""Returns the formula left <= right."""
return get_env().formula_manager.BVULE(left, right)
def BVUGE(left, right):
"""Returns the formula left >= right."""
return get_env().formula_manager.BVUGE(left, right)
def BVNeg(formula):
"""Returns the arithmetic negation of the BV."""
return get_env().formula_manager.BVNeg(formula)
def BVAdd(left, right):
"""Returns the sum of two BV."""
return get_env().formula_manager.BVAdd(left, right)
def BVSub(left, right):
"""Returns the difference of two BV."""
return get_env().formula_manager.BVSub(left, right)
def BVMul(left, right):
"""Returns the product of two BV."""
return get_env().formula_manager.BVMul(left, right)
def BVUDiv(left, right):
"""Returns the division of the two BV."""
return get_env().formula_manager.BVUDiv(left, right)
def BVURem(left, right):
"""Returns the reminder of the two BV."""
return get_env().formula_manager.BVURem(left, right)
def BVLShl(left, right):
"""Returns the logical left shift the BV."""
return get_env().formula_manager.BVLShl(left, right)
def BVLShr(left, right):
"""Returns the logical right shift the BV."""
return get_env().formula_manager.BVLShr(left, right)
def BVRol(formula, steps):
"""Returns the LEFT rotation of the BV by the number of steps."""
return get_env().formula_manager.BVRol(formula, steps)
def BVRor(formula, steps):
"""Returns the RIGHT rotation of the BV by the number of steps."""
return get_env().formula_manager.BVRor(formula, steps)
def BVZExt(formula, increase):
"""Returns the extension of the BV
New bits are set to zero.
"""
return get_env().formula_manager.BVZExt(formula, increase)
def BVSExt(formula, increase):
"""Returns the signed extension of the BV
New bits are set according to the most-significant-bit.
"""
return get_env().formula_manager.BVSExt(formula, increase)
def BVSLT(left, right):
"""Returns the SIGNED LOWER-THAN comparison for BV."""
return get_env().formula_manager.BVSLT(left, right)
def BVSLE(left, right):
"""Returns the SIGNED LOWER-THAN-OR-EQUAL-TO comparison for BV."""
return get_env().formula_manager.BVSLE(left, right)
def BVSGT(left, right):
"""Returns the SIGNED GREATER-THAN comparison for BV."""
return get_env().formula_manager.BVSGT(left, right)
def BVSGE(left, right):
"""Returns the SIGNED GREATER-THAN-OR-EQUAL-TO comparison for BV."""
return get_env().formula_manager.BVSGE(left, right)
def BVSDiv(left, right):
"""Returns the SIGNED DIVISION of left by right"""
return get_env().formula_manager.BVSDiv(left, right)
def BVSRem(left, right):
"""Returns the SIGNED REMAINDER of left divided by right"""
return get_env().formula_manager.BVSRem(left, right)
def BVComp(left, right):
"""Returns a BV of size 1 equal to 0 if left is equal to right,
otherwise 1 is returned."""
return get_env().formula_manager.BVComp(left, right)
def BVAShr(left, right):
"""Returns the RIGHT arithmetic rotation of the left BV by the number
of steps specified by the right BV."""
return get_env().formula_manager.BVAShr(left, right)
#### Shortcuts for Solvers Factory #####
def Solver(quantified=False, name=None, logic=None):
"""Returns a solver."""
return get_env().factory.Solver(quantified=quantified,
name=name,
logic=logic)
def UnsatCoreSolver(quantified=False, name=None, logic=None,
unsat_cores_mode="all"):
"""Returns a solver supporting unsat core extraction."""
return get_env().factory.UnsatCoreSolver(quantified=quantified,
name=name,
logic=logic,
unsat_cores_mode=unsat_cores_mode)
def QuantifierEliminator(name=None, logic=None):
"""Returns a quantifier eliminator"""
return get_env().factory.QuantifierEliminator(name=name, logic=logic)
def Interpolator(name=None, logic=None):
"""Returns an interpolator"""
return get_env().factory.Interpolator(name=name, logic=logic)
def is_sat(formula, solver_name=None, logic=None):
""" Returns whether a formula is satisfiable.
:param formula: The formula to check satisfiability
:type formula: FNode
:param solver_name: Specify the name of the solver to be used.
:param logic: Specify the logic that is going to be used.
:returns: Whether the formula is SAT or UNSAT.
:rtype: bool
"""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_sat")
formula = env.formula_manager.normalize(formula)
return env.factory.is_sat(formula,
solver_name=solver_name,
logic=logic)
def get_model(formula, solver_name=None, logic=None):
""" Similar to :py:func:`is_sat` but returns a model if the formula is
satisfiable, otherwise None."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during get_model")
formula = env.formula_manager.normalize(formula)
return env.factory.get_model(formula,
solver_name=solver_name,
logic=logic)
def get_implicant(formula, solver_name=None, logic=None):
"""Returns a formula f_i such that Implies(f_i, formula) is valid or None
if formula is unsatisfiable.
if complete is set to true, all the variables appearing in the
formula are forced to appear in f_i.
"""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during get_model")
formula = env.formula_manager.normalize(formula)
return env.factory.get_implicant(formula,
solver_name=solver_name,
logic=logic)
def get_unsat_core(clauses, solver_name=None, logic=None):
"""Similar to :py:func:`get_model` but returns the unsat core of the
conjunction of the input clauses"""
env = get_env()
if any(c not in env.formula_manager for c in clauses):
warnings.warn("Warning: Contextualizing formula during get_model")
clauses = [env.formula_manager.normalize(c) for c in clauses]
return env.factory.get_unsat_core(clauses,
solver_name=solver_name,
logic=logic)
def is_valid(formula, solver_name=None, logic=None):
"""Similar to :py:func:`is_sat` but checks validity."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_valid")
formula = env.formula_manager.normalize(formula)
return env.factory.is_valid(formula,
solver_name=solver_name,
logic=logic)
def is_unsat(formula, solver_name=None, logic=None):
"""Similar to :py:func:`is_sat` but checks unsatisfiability."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_unsat")
formula = env.formula_manager.normalize(formula)
return env.factory.is_unsat(formula,
solver_name=solver_name,
logic=logic)
def qelim(formula, solver_name=None, logic=None):
"""Performs quantifier elimination of the given formula."""
env = get_env()
if formula not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during is_unsat")
formula = env.formula_manager.normalize(formula)
return env.factory.qelim(formula,
solver_name=solver_name,
logic=logic)
def binary_interpolant(formula_a, formula_b, solver_name=None, logic=None):
"""Computes an interpolant of (formula_a, formula_b). Returns None
if the conjunction is satisfiable"""
env = get_env()
formulas = [formula_a, formula_b]
for i, f in enumerate(formulas):
if f not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during "
"binary_interpolant")
formulas[i] = env.formula_manager.normalize(f)
return env.factory.binary_interpolant(formulas[0], formulas[1],
solver_name=solver_name,
logic=logic)
def sequence_interpolant(formulas, solver_name=None, logic=None):
"""Computes a sequence interpolant of the formulas. Returns None
if the conjunction is satisfiable"""
env = get_env()
formulas = list(formulas)
for i, f in enumerate(formulas):
if f not in env.formula_manager:
warnings.warn("Warning: Contextualizing formula during "
"sequence_interpolant")
formulas[i] = env.formula_manager.normalize(f)
return env.factory.sequence_interpolant(formulas,
solver_name=solver_name,
logic=logic)
def read_configuration(config_filename, environment=None):
"""
Reads the pysmt configuration of the given file path and applies
it on the specified environment. If no environment is specified,
the top-level environment will be used.
"""
if environment is None:
environment = get_env()
config.configure_environment(config_filename, environment)
def write_configuration(config_filename, environment=None):
"""
Dumps the current pysmt configuration to the specified file path
"""
if environment is None:
environment = get_env()
config.write_environment_configuration(config_filename, environment)
| apache-2.0 | -1,244,767,525,166,926,600 | 34.465804 | 79 | 0.655548 | false |
MrLpk/nba | P.py | 1 | 2242 | #coding=utf-8
import os
import json
from extlibs.MTool import MTool
class P(object):
"""docstring for P"""
def __init__(self):
self.a = 0
self.b = 0
self.f = 0
self.p = 0
self.s = 0
self.r = 0
self.t = 0
self.path = 'db/wu.db'
def count_playoffs(self, items):
_score = 0
_hscore = 0
_lscore = 9999
_times = 0
for item in items:
_s = (item['a']*self.a + item['b']*self.b + item['f']*self.f + item['p']*self.p + item['s']*self.s + item['r']*self.r + item['t']*self.t)
if _s > _hscore:
_hscore = _s
if _s < _lscore:
_lscore = _s
_score += _s
_times += 1
if len(items) > 2:
_score = _score - _hscore - _lscore
_times -= 2
''' 计算平均分 '''
print ' tt -- ', _times
_point = _score / float(_times)
return _point
def count(self, items):
_score = 0
_hscore = 0
_lscore = 9999
_times = 0
for item in items:
if item['po'] == 0:
_s = (item['a']*self.a + item['b']*self.b + item['f']*self.f + item['p']*self.p + item['s']*self.s + item['r']*self.r + item['t']*self.t)
if _s > _hscore:
_hscore = _s
if _s < _lscore:
_lscore = _s
_score += _s
_times += 1
if len(items) > 2:
_score = _score - _hscore - _lscore
_times -= 2
''' 计算平均分 '''
print ' tt -- ', _times
_point = _score / float(_times)
return _point
def start(self):
_path = 'db/1/'
_data = []
_count = 1
for x in os.listdir(_path):
if x == '.DS_Store':
continue
_p = _path + '3704.db'
# print 'path:', _p
_f = open(_p, 'r').read()
_json = json.loads(_f)
'''view one player'''
for i in _json:
_point = self.count(_json[i])
_playoffsP = self.count_playoffs(_json[i])
_o = {i:[{'n':_point}, {'o':_playoffsP}]}
_data.append(_o)
print u'%s -- %f -- %d times' %(i, _point, _count)
print u'%s -- %f -- %d times' %(i, _playoffsP, _count)
_count+=1
# break
return
_json = json.dumps({'p':_data})
m = MTool()
m.save(self.path, _json)
def case1(self):
self.a = 2
self.b = 1
self.f = -1.3
self.p = 1
self.s = 0.7
self.r = 0.65
self.t = -0.8
self.path = 'db/case1.db'
if __name__ == '__main__':
# start()
p = P()
p.case1()
p.start()
print p.p | agpl-3.0 | -1,887,281,974,756,119,600 | 18.672566 | 141 | 0.506751 | false |
ioreshnikov/wells | ztqho_.py | 1 | 3665 | #!/usr/bin/env python3
import argparse
import scipy
import scipy.interpolate as interpolate
import scipy.sparse as sparse
import scipy.optimize as optimize
import wells.util as util
parser = argparse.ArgumentParser()
parser.add_argument("--delta",
help="Detuning between the pump and the resonance",
type=float,
default=0.0)
parser.add_argument("--loss",
help="Linear losses",
type=float,
default=0.0)
parser.add_argument("--pump",
help="Pump",
type=float,
default=0.0)
parser.add_argument("--n",
help="Mode number",
type=int,
default=0)
parser.add_argument("--label",
help="Auxiliary label",
type=str,
default="0")
parser.add_argument("--scale",
help="Initial guess scaling",
type=float,
default=0.0)
parser.add_argument("--input",
help="Read initial condition from a file",
type=str)
parser.add_argument("--interpolate",
help="Interpolate and oversample initial guess",
action="store_true")
args = parser.parse_args()
# Coordinate grid parameters.
minx = -128
maxx = +128
nx = 2**14
dx = (minx - maxx) / (nx - 1)
# Coordinate grid.
x = scipy.linspace(minx, maxx, nx)
# Potential.
l = 10.0
u = 1/2 * x**2
u[abs(x) >= l] = 1/2 * l**2
# Define operators for the Newton-CG method.
laplacian = util.laplacian(nx, dx)
potential = sparse.diags(u, 0, (nx, nx))
delta = args.delta * sparse.eye(nx, nx)
loss = args.loss * sparse.eye(nx, nx)
pump = args.pump * scipy.ones(2*nx)
pump[nx:] = 0
laplacian = sparse.bmat([[laplacian, None], [None, laplacian]])
potential = sparse.bmat([[potential, None], [None, potential]])
delta = sparse.bmat([[delta, None], [None, delta]])
loss = sparse.bmat([[None, loss], [-loss, None]])
# Nonlinear operator.
def l0(state):
real = state[:nx]
imag = state[nx:]
power = (real**2 + imag**2)
focusing = sparse.diags(power, 0, (nx, nx))
focusing = sparse.bmat([[focusing, None], [None, focusing]])
operator = (
-1/2 * laplacian +
potential -
focusing +
delta +
loss)
return operator.dot(state) - pump
initial = scipy.zeros(2*nx)
if args.input:
workspace = scipy.load(args.input)
solution = workspace["solution"]
if args.interpolate:
x_ = workspace["x"]
real = solution.real
imag = solution.imag
real = interpolate.interp1d(
x_, real,
fill_value=(real[0], real[-1]),
bounds_error=False)(x)
imag = interpolate.interp1d(
x_, imag,
fill_value=(imag[0], imag[-1]),
bounds_error=False)(x)
initial[:nx] = real
initial[nx:] = imag
else:
solution = args.scale * solution
initial[:nx] = solution.real
initial[nx:] = solution.imag
else:
initial[:] = 0
# Solve using Newton-Krylov method.
solution = optimize.newton_krylov(l0, initial)
filename = ("mode=%d_delta=%.3f_pump=%.2E_loss=%.2E_%s.npz" %
(args.n, args.delta, args.pump, args.loss, args.label))
workspace = {}
workspace["x"] = x
workspace["potential"] = u
workspace["n"] = args.n
workspace["delta"] = args.delta
workspace["solution"] = solution[:nx] + 1j * solution[nx:]
workspace["pump"] = args.pump
workspace["loss"] = args.loss
scipy.savez(filename, **workspace)
print(filename)
| mit | -1,293,314,591,098,960,400 | 25.948529 | 71 | 0.562074 | false |
violine1101/MCEdit-Unified | albow/utils.py | 9 | 1500 | from pygame import Surface
from pygame.locals import SRCALPHA
def frame_rect(surface, color, rect, thick=1):
o = 1
surface.fill(color, (rect.left + o, rect.top, rect.width - o - o, thick))
surface.fill(color, (rect.left + o, rect.bottom - thick, rect.width - o - o, thick))
surface.fill(color, (rect.left, rect.top + o, thick, rect.height - o - o))
surface.fill(color, (rect.right - thick, rect.top + o, thick, rect.height - o - o))
def blit_tinted(surface, image, pos, tint, src_rect=None):
from Numeric import add, minimum
from pygame.surfarray import array3d, pixels3d
if src_rect:
image = image.subsurface(src_rect)
buf = Surface(image.get_size(), SRCALPHA, 32)
buf.blit(image, (0, 0))
src_rgb = array3d(image)
buf_rgb = pixels3d(buf)
buf_rgb[...] = minimum(255, add(tint, src_rgb)).astype('b')
surface.blit(buf, pos)
def blit_in_rect(dst, src, frame, align='tl', margin=0):
r = src.get_rect()
align_rect(r, frame, align, margin)
dst.blit(src, r)
def align_rect(r, frame, align='tl', margin=0):
if 'l' in align:
r.left = frame.left + margin
elif 'r' in align:
r.right = frame.right - margin
else:
r.centerx = frame.centerx
if 't' in align:
r.top = frame.top + margin
elif 'b' in align:
r.bottom = frame.bottom - margin
else:
r.centery = frame.centery
def brighten(rgb, factor):
return [min(255, int(round(factor * c))) for c in rgb]
| isc | 7,692,801,639,705,441,000 | 29.612245 | 88 | 0.618 | false |
googleapis/python-datalabeling | google/cloud/datalabeling_v1beta1/__init__.py | 1 | 12514 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.data_labeling_service import DataLabelingServiceClient
from .services.data_labeling_service import DataLabelingServiceAsyncClient
from .types.annotation import Annotation
from .types.annotation import AnnotationMetadata
from .types.annotation import AnnotationValue
from .types.annotation import BoundingPoly
from .types.annotation import ImageBoundingPolyAnnotation
from .types.annotation import ImageClassificationAnnotation
from .types.annotation import ImagePolylineAnnotation
from .types.annotation import ImageSegmentationAnnotation
from .types.annotation import NormalizedBoundingPoly
from .types.annotation import NormalizedPolyline
from .types.annotation import NormalizedVertex
from .types.annotation import ObjectTrackingFrame
from .types.annotation import OperatorMetadata
from .types.annotation import Polyline
from .types.annotation import SequentialSegment
from .types.annotation import TextClassificationAnnotation
from .types.annotation import TextEntityExtractionAnnotation
from .types.annotation import TimeSegment
from .types.annotation import Vertex
from .types.annotation import VideoClassificationAnnotation
from .types.annotation import VideoEventAnnotation
from .types.annotation import VideoObjectTrackingAnnotation
from .types.annotation import AnnotationSentiment
from .types.annotation import AnnotationSource
from .types.annotation import AnnotationType
from .types.annotation_spec_set import AnnotationSpec
from .types.annotation_spec_set import AnnotationSpecSet
from .types.data_labeling_service import CreateAnnotationSpecSetRequest
from .types.data_labeling_service import CreateDatasetRequest
from .types.data_labeling_service import CreateEvaluationJobRequest
from .types.data_labeling_service import CreateInstructionRequest
from .types.data_labeling_service import DeleteAnnotatedDatasetRequest
from .types.data_labeling_service import DeleteAnnotationSpecSetRequest
from .types.data_labeling_service import DeleteDatasetRequest
from .types.data_labeling_service import DeleteEvaluationJobRequest
from .types.data_labeling_service import DeleteInstructionRequest
from .types.data_labeling_service import ExportDataRequest
from .types.data_labeling_service import GetAnnotatedDatasetRequest
from .types.data_labeling_service import GetAnnotationSpecSetRequest
from .types.data_labeling_service import GetDataItemRequest
from .types.data_labeling_service import GetDatasetRequest
from .types.data_labeling_service import GetEvaluationJobRequest
from .types.data_labeling_service import GetEvaluationRequest
from .types.data_labeling_service import GetExampleRequest
from .types.data_labeling_service import GetInstructionRequest
from .types.data_labeling_service import ImportDataRequest
from .types.data_labeling_service import LabelImageRequest
from .types.data_labeling_service import LabelTextRequest
from .types.data_labeling_service import LabelVideoRequest
from .types.data_labeling_service import ListAnnotatedDatasetsRequest
from .types.data_labeling_service import ListAnnotatedDatasetsResponse
from .types.data_labeling_service import ListAnnotationSpecSetsRequest
from .types.data_labeling_service import ListAnnotationSpecSetsResponse
from .types.data_labeling_service import ListDataItemsRequest
from .types.data_labeling_service import ListDataItemsResponse
from .types.data_labeling_service import ListDatasetsRequest
from .types.data_labeling_service import ListDatasetsResponse
from .types.data_labeling_service import ListEvaluationJobsRequest
from .types.data_labeling_service import ListEvaluationJobsResponse
from .types.data_labeling_service import ListExamplesRequest
from .types.data_labeling_service import ListExamplesResponse
from .types.data_labeling_service import ListInstructionsRequest
from .types.data_labeling_service import ListInstructionsResponse
from .types.data_labeling_service import PauseEvaluationJobRequest
from .types.data_labeling_service import ResumeEvaluationJobRequest
from .types.data_labeling_service import SearchEvaluationsRequest
from .types.data_labeling_service import SearchEvaluationsResponse
from .types.data_labeling_service import SearchExampleComparisonsRequest
from .types.data_labeling_service import SearchExampleComparisonsResponse
from .types.data_labeling_service import UpdateEvaluationJobRequest
from .types.data_payloads import ImagePayload
from .types.data_payloads import TextPayload
from .types.data_payloads import VideoPayload
from .types.data_payloads import VideoThumbnail
from .types.dataset import AnnotatedDataset
from .types.dataset import AnnotatedDatasetMetadata
from .types.dataset import BigQuerySource
from .types.dataset import ClassificationMetadata
from .types.dataset import DataItem
from .types.dataset import Dataset
from .types.dataset import Example
from .types.dataset import GcsDestination
from .types.dataset import GcsFolderDestination
from .types.dataset import GcsSource
from .types.dataset import InputConfig
from .types.dataset import LabelStats
from .types.dataset import OutputConfig
from .types.dataset import TextMetadata
from .types.dataset import DataType
from .types.evaluation import BoundingBoxEvaluationOptions
from .types.evaluation import ClassificationMetrics
from .types.evaluation import ConfusionMatrix
from .types.evaluation import Evaluation
from .types.evaluation import EvaluationConfig
from .types.evaluation import EvaluationMetrics
from .types.evaluation import ObjectDetectionMetrics
from .types.evaluation import PrCurve
from .types.evaluation_job import Attempt
from .types.evaluation_job import EvaluationJob
from .types.evaluation_job import EvaluationJobAlertConfig
from .types.evaluation_job import EvaluationJobConfig
from .types.human_annotation_config import BoundingPolyConfig
from .types.human_annotation_config import EventConfig
from .types.human_annotation_config import HumanAnnotationConfig
from .types.human_annotation_config import ImageClassificationConfig
from .types.human_annotation_config import ObjectDetectionConfig
from .types.human_annotation_config import ObjectTrackingConfig
from .types.human_annotation_config import PolylineConfig
from .types.human_annotation_config import SegmentationConfig
from .types.human_annotation_config import SentimentConfig
from .types.human_annotation_config import TextClassificationConfig
from .types.human_annotation_config import TextEntityExtractionConfig
from .types.human_annotation_config import VideoClassificationConfig
from .types.human_annotation_config import StringAggregationType
from .types.instruction import CsvInstruction
from .types.instruction import Instruction
from .types.instruction import PdfInstruction
from .types.operations import CreateInstructionMetadata
from .types.operations import ExportDataOperationMetadata
from .types.operations import ExportDataOperationResponse
from .types.operations import ImportDataOperationMetadata
from .types.operations import ImportDataOperationResponse
from .types.operations import LabelImageBoundingBoxOperationMetadata
from .types.operations import LabelImageBoundingPolyOperationMetadata
from .types.operations import LabelImageClassificationOperationMetadata
from .types.operations import LabelImageOrientedBoundingBoxOperationMetadata
from .types.operations import LabelImagePolylineOperationMetadata
from .types.operations import LabelImageSegmentationOperationMetadata
from .types.operations import LabelOperationMetadata
from .types.operations import LabelTextClassificationOperationMetadata
from .types.operations import LabelTextEntityExtractionOperationMetadata
from .types.operations import LabelVideoClassificationOperationMetadata
from .types.operations import LabelVideoEventOperationMetadata
from .types.operations import LabelVideoObjectDetectionOperationMetadata
from .types.operations import LabelVideoObjectTrackingOperationMetadata
__all__ = (
"DataLabelingServiceAsyncClient",
"AnnotatedDataset",
"AnnotatedDatasetMetadata",
"Annotation",
"AnnotationMetadata",
"AnnotationSentiment",
"AnnotationSource",
"AnnotationSpec",
"AnnotationSpecSet",
"AnnotationType",
"AnnotationValue",
"Attempt",
"BigQuerySource",
"BoundingBoxEvaluationOptions",
"BoundingPoly",
"BoundingPolyConfig",
"ClassificationMetadata",
"ClassificationMetrics",
"ConfusionMatrix",
"CreateAnnotationSpecSetRequest",
"CreateDatasetRequest",
"CreateEvaluationJobRequest",
"CreateInstructionMetadata",
"CreateInstructionRequest",
"CsvInstruction",
"DataItem",
"DataLabelingServiceClient",
"DataType",
"Dataset",
"DeleteAnnotatedDatasetRequest",
"DeleteAnnotationSpecSetRequest",
"DeleteDatasetRequest",
"DeleteEvaluationJobRequest",
"DeleteInstructionRequest",
"Evaluation",
"EvaluationConfig",
"EvaluationJob",
"EvaluationJobAlertConfig",
"EvaluationJobConfig",
"EvaluationMetrics",
"EventConfig",
"Example",
"ExportDataOperationMetadata",
"ExportDataOperationResponse",
"ExportDataRequest",
"GcsDestination",
"GcsFolderDestination",
"GcsSource",
"GetAnnotatedDatasetRequest",
"GetAnnotationSpecSetRequest",
"GetDataItemRequest",
"GetDatasetRequest",
"GetEvaluationJobRequest",
"GetEvaluationRequest",
"GetExampleRequest",
"GetInstructionRequest",
"HumanAnnotationConfig",
"ImageBoundingPolyAnnotation",
"ImageClassificationAnnotation",
"ImageClassificationConfig",
"ImagePayload",
"ImagePolylineAnnotation",
"ImageSegmentationAnnotation",
"ImportDataOperationMetadata",
"ImportDataOperationResponse",
"ImportDataRequest",
"InputConfig",
"Instruction",
"LabelImageBoundingBoxOperationMetadata",
"LabelImageBoundingPolyOperationMetadata",
"LabelImageClassificationOperationMetadata",
"LabelImageOrientedBoundingBoxOperationMetadata",
"LabelImagePolylineOperationMetadata",
"LabelImageRequest",
"LabelImageSegmentationOperationMetadata",
"LabelOperationMetadata",
"LabelStats",
"LabelTextClassificationOperationMetadata",
"LabelTextEntityExtractionOperationMetadata",
"LabelTextRequest",
"LabelVideoClassificationOperationMetadata",
"LabelVideoEventOperationMetadata",
"LabelVideoObjectDetectionOperationMetadata",
"LabelVideoObjectTrackingOperationMetadata",
"LabelVideoRequest",
"ListAnnotatedDatasetsRequest",
"ListAnnotatedDatasetsResponse",
"ListAnnotationSpecSetsRequest",
"ListAnnotationSpecSetsResponse",
"ListDataItemsRequest",
"ListDataItemsResponse",
"ListDatasetsRequest",
"ListDatasetsResponse",
"ListEvaluationJobsRequest",
"ListEvaluationJobsResponse",
"ListExamplesRequest",
"ListExamplesResponse",
"ListInstructionsRequest",
"ListInstructionsResponse",
"NormalizedBoundingPoly",
"NormalizedPolyline",
"NormalizedVertex",
"ObjectDetectionConfig",
"ObjectDetectionMetrics",
"ObjectTrackingConfig",
"ObjectTrackingFrame",
"OperatorMetadata",
"OutputConfig",
"PauseEvaluationJobRequest",
"PdfInstruction",
"Polyline",
"PolylineConfig",
"PrCurve",
"ResumeEvaluationJobRequest",
"SearchEvaluationsRequest",
"SearchEvaluationsResponse",
"SearchExampleComparisonsRequest",
"SearchExampleComparisonsResponse",
"SegmentationConfig",
"SentimentConfig",
"SequentialSegment",
"StringAggregationType",
"TextClassificationAnnotation",
"TextClassificationConfig",
"TextEntityExtractionAnnotation",
"TextEntityExtractionConfig",
"TextMetadata",
"TextPayload",
"TimeSegment",
"UpdateEvaluationJobRequest",
"Vertex",
"VideoClassificationAnnotation",
"VideoClassificationConfig",
"VideoEventAnnotation",
"VideoObjectTrackingAnnotation",
"VideoPayload",
"VideoThumbnail",
)
| apache-2.0 | 1,552,084,165,026,922,800 | 41.564626 | 76 | 0.823238 | false |
systers/mailman | src/mailman/model/member.py | 7 | 7113 | # Copyright (C) 2007-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Model for members."""
__all__ = [
'Member',
]
from mailman.core.constants import system_preferences
from mailman.database.model import Model
from mailman.database.transaction import dbconnection
from mailman.database.types import Enum, UUID
from mailman.interfaces.action import Action
from mailman.interfaces.address import IAddress
from mailman.interfaces.listmanager import IListManager
from mailman.interfaces.member import (
IMember, MemberRole, MembershipError, UnsubscriptionEvent)
from mailman.interfaces.user import IUser, UnverifiedAddressError
from mailman.interfaces.usermanager import IUserManager
from mailman.utilities.uid import UniqueIDFactory
from sqlalchemy import Column, ForeignKey, Integer, Unicode
from sqlalchemy.orm import relationship
from zope.component import getUtility
from zope.event import notify
from zope.interface import implementer
uid_factory = UniqueIDFactory(context='members')
@implementer(IMember)
class Member(Model):
"""See `IMember`."""
__tablename__ = 'member'
id = Column(Integer, primary_key=True)
_member_id = Column(UUID)
role = Column(Enum(MemberRole))
list_id = Column(Unicode)
moderation_action = Column(Enum(Action))
address_id = Column(Integer, ForeignKey('address.id'))
_address = relationship('Address')
preferences_id = Column(Integer, ForeignKey('preferences.id'))
preferences = relationship('Preferences')
user_id = Column(Integer, ForeignKey('user.id'))
_user = relationship('User')
def __init__(self, role, list_id, subscriber):
self._member_id = uid_factory.new_uid()
self.role = role
self.list_id = list_id
if IAddress.providedBy(subscriber):
self._address = subscriber
# Look this up dynamically.
self._user = None
elif IUser.providedBy(subscriber):
self._user = subscriber
# Look this up dynamically.
self._address = None
else:
raise ValueError('subscriber must be a user or address')
if role in (MemberRole.owner, MemberRole.moderator):
self.moderation_action = Action.accept
elif role is MemberRole.member:
self.moderation_action = getUtility(IListManager).get_by_list_id(
list_id).default_member_action
else:
assert role is MemberRole.nonmember, (
'Invalid MemberRole: {0}'.format(role))
self.moderation_action = getUtility(IListManager).get_by_list_id(
list_id).default_nonmember_action
def __repr__(self):
return '<Member: {0} on {1} as {2}>'.format(
self.address, self.mailing_list.fqdn_listname, self.role)
@property
def mailing_list(self):
"""See `IMember`."""
list_manager = getUtility(IListManager)
return list_manager.get_by_list_id(self.list_id)
@property
def member_id(self):
"""See `IMember`."""
return self._member_id
@property
def address(self):
"""See `IMember`."""
return (self._user.preferred_address
if self._address is None
else self._address)
@address.setter
def address(self, new_address):
"""See `IMember`."""
if self._address is None:
# XXX Either we need a better exception here, or we should allow
# changing a subscription from preferred address to explicit
# address (and vice versa via del'ing the .address attribute.
raise MembershipError('Membership is via preferred address')
if new_address.verified_on is None:
# A member cannot change their subscription address to an
# unverified address.
raise UnverifiedAddressError(new_address)
user = getUtility(IUserManager).get_user(new_address.email)
if user is None or user != self.user:
raise MembershipError('Address is not controlled by user')
self._address = new_address
@property
def user(self):
"""See `IMember`."""
return (self._user
if self._address is None
else getUtility(IUserManager).get_user(self._address.email))
@property
def subscriber(self):
return (self._user if self._address is None else self._address)
def _lookup(self, preference, default=None):
pref = getattr(self.preferences, preference)
if pref is not None:
return pref
pref = getattr(self.address.preferences, preference)
if pref is not None:
return pref
if self.address.user:
pref = getattr(self.address.user.preferences, preference)
if pref is not None:
return pref
if default is None:
return getattr(system_preferences, preference)
return default
@property
def acknowledge_posts(self):
"""See `IMember`."""
return self._lookup('acknowledge_posts')
@property
def preferred_language(self):
"""See `IMember`."""
missing = object()
language = self._lookup('preferred_language', missing)
if language is missing:
language = ((self.mailing_list and
self.mailing_list.preferred_language) or
system_preferences.preferred_language)
return language
@property
def receive_list_copy(self):
"""See `IMember`."""
return self._lookup('receive_list_copy')
@property
def receive_own_postings(self):
"""See `IMember`."""
return self._lookup('receive_own_postings')
@property
def delivery_mode(self):
"""See `IMember`."""
return self._lookup('delivery_mode')
@property
def delivery_status(self):
"""See `IMember`."""
return self._lookup('delivery_status')
@property
def options_url(self):
"""See `IMember`."""
# XXX Um, this is definitely wrong
return 'http://example.com/' + self.address.email
@dbconnection
def unsubscribe(self, store):
"""See `IMember`."""
# Yes, this must get triggered before self is deleted.
notify(UnsubscriptionEvent(self.mailing_list, self))
store.delete(self.preferences)
store.delete(self)
| gpl-3.0 | -6,929,159,021,274,875,000 | 33.529126 | 78 | 0.642204 | false |
machinecoin-project/machinecoin | test/functional/p2p_sendheaders.py | 2 | 26496 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes:
- node0 is the node-under-test. We create two p2p connections to it. The
first p2p connection is a control and should only ever receive inv's. The
second p2p connection tests the headers sending logic.
- node1 is used to create reorgs.
test_null_locators
==================
Sends two getheaders requests with null locator values. First request's hashstop
value refers to validated block, while second request's hashstop value refers to
a block which hasn't been validated. Verifies only the first request returns
headers.
test_nonnull_locators
=====================
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import CInv
from test_framework.mininode import (
CBlockHeader,
NODE_WITNESS,
P2PInterface,
mininode_lock,
msg_block,
msg_getblocks,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_sendheaders,
)
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.util import (
assert_equal,
sync_blocks,
wait_until,
)
DIRECT_FETCH_RESPONSE_TIME = 0.05
class BaseNode(P2PInterface):
def __init__(self):
super().__init__()
self.block_announced = False
self.last_blockhash_announced = None
self.recent_headers_announced = []
def send_get_data(self, block_hashes):
"""Request data for a list of block hashes."""
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.send_message(msg)
def send_get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def on_inv(self, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, message):
if len(message.headers):
self.block_announced = True
for x in message.headers:
x.calc_sha256()
# append because headers may be announced over multiple messages.
self.recent_headers_announced.append(x.sha256)
self.last_blockhash_announced = message.headers[-1].sha256
def clear_block_announcements(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_headers_announcement(self, headers):
"""Test whether the last headers announcements received are right.
Headers may be announced across more than one message."""
test_function = lambda: (len(self.recent_headers_announced) >= len(headers))
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
assert_equal(self.recent_headers_announced, headers)
self.block_announced = False
self.last_message.pop("headers", None)
self.recent_headers_announced = []
def check_last_inv_announcement(self, inv):
"""Test whether the last announcement received had the right inv.
inv should be a list of block hashes."""
test_function = lambda: self.block_announced
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
assert_equal(compare_inv, inv)
self.block_announced = False
self.last_message.pop("inv", None)
class SendHeadersTest(MachinecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def mine_blocks(self, count):
"""Mine count blocks and return the new tip."""
# Clear out block announcements from each p2p listener
[x.clear_block_announcements() for x in self.nodes[0].p2ps]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
def mine_reorg(self, length):
"""Mine a reorg that invalidates length blocks (replacing them with # length+1 blocks).
Note: we clear the state of our p2p connections after the
to-be-reorged-out blocks are mined, so that we don't break later tests.
return the list of block hashes newly mined."""
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.nodes[0].p2ps:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_block_announcements()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height - (length - 1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length + 1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections
inv_node = self.nodes[0].add_p2p_connection(BaseNode())
# Make sure NODE_NETWORK is not set for test_node, so no block download
# will occur outside of direct fetching
test_node = self.nodes[0].add_p2p_connection(BaseNode(), services=NODE_WITNESS)
# Ensure verack's have been processed by our peer
inv_node.sync_with_ping()
test_node.sync_with_ping()
self.test_null_locators(test_node, inv_node)
self.test_nonnull_locators(test_node, inv_node)
def test_null_locators(self, test_node, inv_node):
tip = self.nodes[0].getblockheader(self.nodes[0].generate(1)[0])
tip_hash = int(tip["hash"], 16)
inv_node.check_last_inv_announcement(inv=[tip_hash])
test_node.check_last_inv_announcement(inv=[tip_hash])
self.log.info("Verify getheaders with null locator and valid hashstop returns headers.")
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=tip_hash)
test_node.check_last_headers_announcement(headers=[tip_hash])
self.log.info("Verify getheaders with null locator and invalid hashstop does not return headers.")
block = create_block(int(tip["hash"], 16), create_coinbase(tip["height"] + 1), tip["mediantime"] + 1)
block.solve()
test_node.send_header_for_blocks([block])
test_node.clear_block_announcements()
test_node.send_get_headers(locator=[], hashstop=int(block.hash, 16))
test_node.sync_with_ping()
assert_equal(test_node.block_announced, False)
inv_node.clear_block_announcements()
test_node.send_message(msg_block(block))
inv_node.check_last_inv_announcement(inv=[int(block.hash, 16)])
def test_nonnull_locators(self, test_node, inv_node):
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
self.log.debug("Part 1.{}: starting...".format(i))
old_tip = tip
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# next try requesting header and block
test_node.send_get_headers(locator=[old_tip], hashstop=tip)
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_block_announcements() # since we requested headers...
elif i == 2:
# this time announce own block via headers
inv_node.clear_block_announcements()
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height + 1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
wait_until(lambda: inv_node.block_announced, timeout=60, lock=mininode_lock)
inv_node.clear_block_announcements()
test_node.clear_block_announcements()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.send_get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height = self.nodes[0].getblockcount() + 1
block_time += 10 # Advance far enough ahead
for i in range(10):
self.log.debug("Part 2.{}: starting...".format(i))
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
self.log.debug("Part 2.{}.{}: starting...".format(i, j))
blocks = []
for b in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders()
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[inv_node.send_block_inv(x.sha256) for x in blocks]
test_node.wait_for_getdata([x.sha256 for x in blocks])
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert "inv" not in inv_node.last_message
assert "headers" not in inv_node.last_message
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
self.log.debug("Part 3.{}: starting...".format(j))
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=new_block_hashes)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator=[fork_point])
test_node.check_last_inv_announcement(inv=new_block_hashes)
test_node.send_get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
self.log.debug("Part 3.{}.{}: starting...".format(j, i))
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_inv_announcement(inv=[tip])
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.send_get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
test_node.send_get_data([tip])
test_node.wait_for_block(tip)
if j == 0:
test_node.send_get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
inv_node.check_last_inv_announcement(inv=[tip])
test_node.check_last_headers_announcement(headers=[tip])
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert "getdata" not in test_node.last_message
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=DIRECT_FETCH_RESPONSE_TIME)
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=DIRECT_FETCH_RESPONSE_TIME)
# Announcing 1 more header should not trigger any response
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[test_node.send_message(msg_block(x)) for x in blocks]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
self.log.debug("Part 5.{}: starting...".format(i))
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders()
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[test_node.send_message(msg_block(x)) for x in blocks]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders()
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5 * MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i % len(blocks)]])
test_node.wait_for_getheaders()
# Eventually this stops working.
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert "getdata" not in inv_node.last_message
if __name__ == '__main__':
SendHeadersTest().main()
| mit | 6,390,755,988,870,320,000 | 42.504105 | 130 | 0.612139 | false |
holgerBerger/go_ludalo | top/topgui.py | 1 | 23689 | #!/usr/bin/env python
# IDEEN
# job anklichbar im bereich der ersten schriftzeile -> fenster mit jobs details wie
# aggregierte werte und zeitlicher verlauf, anzahl genutzter OSTs
import sys, time
from PySide import QtCore, QtGui
import top
INTERVAL=10
FONTSIZE=11 # FIXME some places need check for this size, is a bit hardcoded
FILESYSTEMLIST = [ "alnec", "nobnec" ]
# helper class
class Job(object):
def __init__(self,name,owner,nodes,meta,wrqs,wbw,rrqs,rbw):
self.name = name
self.owner = owner
self.nodes = nodes
self.meta = meta
self.wrqs = wrqs
self.rrqs = rrqs
#self.readratio = float(rrqs)/float(wrqs)
self.wbw = wbw
self.rbw = rbw
def __repr__(self):
return self.name + " " + self.owner+ " " + str(self.nodes)
# Widget to show last minutes of loaf of a filesystem and name 202x102 pixels
class Filesystem(QtGui.QWidget):
def __init__(self, name):
super(Filesystem, self).__init__()
#self.setGeometry(0, 0, 100, 100)
self.setMinimumSize(203, 102)
self.setMaximumSize(203, 102)
self.name = name
self.topfs = top.filesystem("localhost",self.name)
print name
timer = QtCore.QTimer(self)
timer.timeout.connect(self.doUpdate)
timer.start(1000*INTERVAL)
#self.setMouseTracking(True)
self.doUpdate()
self.selected = False
def doUpdate(self):
self.getData()
self.update()
def mousePressEvent(self, event):
self.parentWidget().changeFS(self.name)
def getData(self):
now = self.topfs.getLatestTs()
self.timevalues = self.topfs.getFSvalues(now-100*INTERVAL, now)
def select(self):
self.selected = True
self.update()
def deselect(self):
self.selected = False
self.update()
def paintEvent(self, event):
qp = QtGui.QPainter()
# box + titel
qp.begin(self)
if self.selected:
qp.setPen(QtGui.QPen(QtGui.QColor("black")))
else:
qp.setPen(QtGui.QPen(QtGui.QColor("white")))
qp.setBrush(QtGui.QBrush(QtGui.QColor("white")))
qp.drawRect(0,0,203,102) # border
qp.setBrush(QtGui.QBrush(QtGui.QColor(0,0,0,0)))
qp.drawRect(1,1,202,101) # border
qp.setPen(QtGui.QPen(QtGui.QColor("black")))
qp.setBrush(QtGui.QBrush(QtGui.QColor("black")))
qp.setFont(QtGui.QFont('Decorative', FONTSIZE))
qp.drawText(2,FONTSIZE+3,self.name)
# draw data
maxima = self.topfs.readFSMaxima()
# take maximum of IOPS counters ratio to max as FS load
x = 3
for i in sorted(self.timevalues):
ratio_meta = float(self.timevalues[i]["miops"])/float(maxima[1])
ratio_wr = float(self.timevalues[i]["wiops"])/float(maxima[2])
ratio_rr = float(self.timevalues[i]["riops"])/float(maxima[3])
ratio = max(ratio_meta,ratio_rr,ratio_wr)
setHeatMapColor(qp, 0, 1, ratio)
qp.drawLine(x, 99, x, 99-min(80,ratio*80.0))
qp.drawLine(x+1, 99, x+1, 99-min(80,ratio*80.0))
x += 2 # drawn 2 pixels wide
qp.end()
# main window, embed list of windows on left side, stacked coordinates on right side
class Window(QtGui.QWidget):
def __init__(self):
super(Window, self).__init__()
self.W = 1500
self.H = 1100
self.so = StackedCoordinates(FILESYSTEMLIST[0])
self.fslist = {}
for f in FILESYSTEMLIST:
self.fslist[f]=Filesystem(f)
self.fslist[FILESYSTEMLIST[0]].select()
vbox = QtGui.QVBoxLayout()
vbox.setAlignment(QtCore.Qt.AlignTop)
for f in self.fslist:
vbox.addWidget(self.fslist[f])
hbox = QtGui.QHBoxLayout()
# hbox.addStretch(1)
hbox.addLayout(vbox)
hbox.addWidget(self.so)
self.setLayout(hbox)
self.setGeometry(300, 300, self.W, self.H)
self.setWindowTitle('ludalo top')
# self.setStyleSheet("background-color:lightgrey;");
self.show()
# call StackedCoordinates.changeFS
def changeFS(self, name):
for f in self.fslist:
if self.fslist[f].selected:
self.fslist[f].deselect()
self.fslist[name].select()
self.so.changeFS(name)
# stacked coordinates widget, show jobs and load info of FS
# call changeFS to change FS and update
class StackedCoordinates(QtGui.QWidget):
def __init__(self, filesystem):
super(StackedCoordinates, self).__init__()
self.W = 1400
self.H = 1000
#self.setMinimumSize(self.W, self.H)
self.maxima = [0, 0, 0, 0, 0, 0]
self.fsname = filesystem
# mouseras contains tuples with y coordinates for jobnames
self.mouseareas={}
self.mouseareas["nodes"] = {}
self.mouseareas["meta"] = {}
self.mouseareas["rqs"] = {}
self.mouseareas["bw"] = {}
self.topfs = top.filesystem("localhost",self.fsname)
self.initUI()
# start timer for auto update # FIXME move to Window and Update all at once with one timer?
timer = QtCore.QTimer(self)
timer.timeout.connect(self.doUpdate)
timer.start(1000*INTERVAL)
# change FS and update
def changeFS(self, fsname):
self.fsname = fsname
self.topfs = top.filesystem("localhost",self.fsname)
self.doUpdate()
def initUI(self):
# self.showFullScreen()
self.setMinimumHeight(800)
self.setMinimumWidth(1000)
self.setGeometry(300, 300, self.W, self.H+50)
self.setWindowTitle('ludalo top')
self.doUpdate()
self.show()
# map mouse position to job
def mousePressEvent(self, e):
x,y = e.x(), e.y()
if x > 0 and x < 2 * self.W/8:
col = "nodes"
elif x > 3 * self.W/8 and x < 4 * self.W/8:
col = "meta"
elif x > 5 * self.W/8 and x < 6 * self.W/8:
col = "rqs"
elif x > 7 * self.W/8 and x < 8 * self.W/8:
col = "bw"
else:
return
job = ""
for j in self.mouseareas[col]:
yu, yl = self.mouseareas[col][j]
if y>yu and y<yl:
job = j
break
else:
return
# print "show details about job", job
job = self.topfs.getOneRunningJob(job)
if job != None:
jobs = {job.jobid: job}
jobdata = self.topfs.accumulateJobStats(jobs)
self.tl = TimeLine(jobdata[job.jobid])
# get data and start redraw
def doUpdate(self):
self.getData()
self.update()
# get a sorted list of Job classes, giving top N nodes
# in size, in meta iops, int iops, in bw
# and a dict containing dicts giving shares of total
# for those 4 metrics
# and a tuple with (totalnodes, totalmeta, totalrqs, totalbw)
def getJobList(self, N=5):
self.fs = top.filesystem(top.DBHOST, self.fsname)
(timestamp, nodes) = self.fs.currentNodesstats()
jobs = self.fs.mapNodesToJobs(timestamp, nodes)
joblist = {}
# convert in local format
for j in jobs:
cj = jobs[j]
joblist[j] = Job(j,cj.owner, len(cj.nodelist), cj.miops/INTERVAL, cj.wiops/INTERVAL, cj.wbw/INTERVAL, cj.riops/INTERVAL, cj.rbw/INTERVAL )
totalnodes= 0
totalmeta = 0
totalwrqs = 0
totalrrqs = 0
totalwbw = 0
totalrbw = 0
for j in joblist:
totalnodes += joblist[j].nodes
totalmeta += joblist[j].meta
totalwrqs += joblist[j].wrqs
totalrrqs += joblist[j].rrqs
totalwbw += joblist[j].wbw
totalrbw += joblist[j].rbw
totals = (totalnodes, totalmeta, totalwrqs, totalrrqs, totalwbw, totalrbw)
# idea: make a list of
# N largest nodes
# N nodes doing most metadata
# N nodes doing most rqs
# N nodes doing most BW
# if a node is doubled, take next from local list
# this summed list will be display (double/dashed line inbetween??)
toplist=[]
for list in [
sorted(joblist.values(), key=lambda x: x.nodes, reverse=True),
sorted(joblist.values(), key=lambda x: x.meta, reverse=True),
sorted(joblist.values(), key=lambda x: x.wrqs+x.rrqs, reverse=True),
sorted(joblist.values(), key=lambda x: x.wbw+x.rbw, reverse=True),
]:
i=0
for j in list:
if j not in toplist:
toplist.append(j)
i+=1
if i>=N: break
listnodes=0
for j in toplist:
listnodes += j.nodes
# sort again, so total list os sorted now according to node number
toplist = sorted(toplist, key=lambda x: x.nodes, reverse=True)
shares={}
for j in toplist:
if totalmeta == 0: totalmeta = 1
if (totalwrqs+totalrrqs) == 0: totalrrqs = 1
if (totalwbw+totalrbw) == 0: totalrbw = 1
shares[j.name]={
"name":j.name,
"owner": j.owner,
# "nodes": float(j.nodes)/totalnodes,
"nodes": float(j.nodes)/listnodes,
"meta": float(j.meta)/totalmeta,
"rqs": float(j.wrqs+j.rrqs)/(totalwrqs+totalrrqs),
"bw": float(j.wbw+j.rbw)/(totalwbw+totalrbw)
}
# get maxima from DB
self.maxima = self.fs.readFSMaxima()
#print "from db:", self.maxima
return (toplist, shares, totals)
# get data from DB
def getData(self):
(self.jobs,self.shares,self.totals) = self.getJobList(5) # FIXME should be decoupled from redraw!
# draw everything
def paintEvent(self, event):
geometry = self.geometry()
self.W = geometry.width()
self.H = geometry.height()-100 # space for lower bound
(jobs, shares, totals) = (self.jobs, self.shares, self.totals)
# print "paintEvent"
qp = QtGui.QPainter()
qp.begin(self)
# background
qp.setPen(QtGui.QPen(QtGui.QColor("black")))
qp.drawRect(0, 0, self.W-1, self.H-1+100)
# polygons
off = [0, 0, 0, 0]
lastline = [ QtCore.QPoint(0,0), QtCore.QPoint(0,0), QtCore.QPoint(0,0), QtCore.QPoint(0,0),
QtCore.QPoint(0,0), QtCore.QPoint(0,0), QtCore.QPoint(0,0), QtCore.QPoint(self.W,0) ]
# mouseras contains tuples with y coordinates for jobnames
self.mouseareas={}
self.mouseareas["nodes"] = {}
self.mouseareas["meta"] = {}
self.mouseareas["rqs"] = {}
self.mouseareas["bw"] = {}
counter = 1
for job in jobs:
j = job.name
newline = [
# size
QtCore.QPoint(0 * self.W/8, self.H*shares[j]["nodes"] + off[0]),
QtCore.QPoint(2 * self.W/8, self.H*shares[j]["nodes"] + off[0]),
# transition
QtCore.QPoint(3 * self.W/8, self.H*shares[j]["meta"] + off[1]),
# meta
QtCore.QPoint(4 * self.W/8, self.H*shares[j]["meta"] + off[1]),
# transition
QtCore.QPoint(5 * self.W/8, self.H*shares[j]["rqs"] + off[2]),
# rqs
QtCore.QPoint(6 * self.W/8, self.H*shares[j]["rqs"] + off[2]),
# transition
QtCore.QPoint(7 * self.W/8, self.H*shares[j]["bw"] + off[3]),
# bw
QtCore.QPoint(8 * self.W/8, self.H*shares[j]["bw"] + off[3]),
]
self.mouseareas["nodes"][j] = (off[0], off[0]+self.H*shares[j]["nodes"])
self.mouseareas["meta"][j] = (off[1], off[1]+self.H*shares[j]["meta"])
self.mouseareas["rqs"][j] = (off[2], off[2]+self.H*shares[j]["rqs"])
self.mouseareas["bw"][j] = (off[3], off[3]+self.H*shares[j]["bw"])
off[0] += self.H*shares[j]["nodes"]
off[1] += self.H*shares[j]["meta"]
off[2] += self.H*shares[j]["rqs"]
off[3] += self.H*shares[j]["bw"]
points=[]
points.extend(list(reversed(lastline)))
points.extend(newline)
lastline = newline
# print counter
brush = QtGui.QBrush(QtGui.QColor(*rgb(1,len(jobs),len(jobs)-counter+1)))
qp.setBrush(brush)
pen = QtGui.QPen(QtGui.QColor(*rgb(1,len(jobs),len(jobs)-counter+1)))
qp.setPen(pen)
qp.drawPolygon(points, QtCore.Qt.OddEvenFill)
# labels
pen = QtGui.QPen(QtGui.QColor(0,0,0,255))
qp.setPen(pen)
qp.setFont(QtGui.QFont('Decorative', FONTSIZE))
displayed=False
# name + owner + nodes
if self.H*shares[j]["nodes"] > FONTSIZE:
qp.drawText(10, off[0], j.split("-")[0])
qp.drawText(150, off[0], job.owner)
qp.drawText(250, off[0], str(job.nodes)+" nodes")
displayed=True
# meta
if self.H*shares[j]["meta"] > FONTSIZE:
qp.drawText(2 + 3 * self.W/8, off[1], str(job.meta)+" meta ops/s")
if (not displayed and (self.H*shares[j]["meta"] > FONTSIZE * 2)) or (self.H*shares[j]["meta"] > FONTSIZE * 3):
qp.setFont(QtGui.QFont('Decorative', FONTSIZE-2))
qp.drawText(2 + 3 * self.W/8, off[1]-FONTSIZE-2, j.split("-")[0]+" "+job.owner)
qp.setFont(QtGui.QFont('Decorative', FONTSIZE))
displayed=True
# rqs
if self.H*shares[j]["rqs"] > FONTSIZE:
qp.drawText(2 + 5 * self.W/8, off[2], str(job.wrqs+job.rrqs)+" iops/s")
if (not displayed and (self.H*shares[j]["rqs"] > FONTSIZE * 2)) or (self.H*shares[j]["rqs"] > FONTSIZE * 3):
qp.setFont(QtGui.QFont('Decorative', FONTSIZE-2))
qp.drawText(2 + 5 * self.W/8, off[2]-FONTSIZE-2, j.split("-")[0]+" "+job.owner)
qp.setFont(QtGui.QFont('Decorative', FONTSIZE))
displayed=True
if self.H*shares[j]["rqs"] > FONTSIZE * 4:
qp.setFont(QtGui.QFont('Decorative', FONTSIZE-3))
qp.drawText(2 + 5 * self.W/8, off[2]-2*FONTSIZE-2, "read %-1.2f" % (job.rrqs))
qp.drawText(2 + 5 * self.W/8, off[2]-3*FONTSIZE-2, "write %-1.2f" % (job.wrqs))
qp.setFont(QtGui.QFont('Decorative', FONTSIZE))
# bw
if self.H*shares[j]["bw"] > FONTSIZE:
(bw,unit) = normalize_bw(job.wbw+job.rbw)
qp.drawText(2 + 7 * self.W/8, off[3], "%-1.2f" % (bw)+unit)
if (not displayed and (self.H*shares[j]["bw"] > FONTSIZE * 2)) or (self.H*shares[j]["bw"] > FONTSIZE * 3):
qp.setFont(QtGui.QFont('Decorative', FONTSIZE-2))
qp.drawText(2 + 7 * self.W/8, off[3]-FONTSIZE-2, j.split("-")[0]+" "+job.owner)
qp.setFont(QtGui.QFont('Decorative', FONTSIZE))
displayed=True
if self.H*shares[j]["bw"] > FONTSIZE * 4:
(wbw,wunit) = normalize_bw(job.wbw)
(rbw,runit) = normalize_bw(job.rbw)
qp.setFont(QtGui.QFont('Decorative', FONTSIZE-3))
qp.drawText(2 + 7 * self.W/8, off[3]-2*FONTSIZE-2, "read %-1.2f%s" % (rbw,runit))
qp.drawText(2 + 7 * self.W/8, off[3]-3*FONTSIZE-2, "write %-1.2f%s" % (wbw,wunit))
qp.setFont(QtGui.QFont('Decorative', FONTSIZE))
counter += 1
# fill remainder at bottom (in case the X jobs do not fill the 100%)
newline = [ QtCore.QPoint(0,self.H), QtCore.QPoint(self.W,self.H) ]
points=[]
points.extend(list(reversed(lastline)))
points.extend(newline)
brush = QtGui.QBrush(QtGui.QColor(180,180,180))
qp.setBrush(brush)
pen = QtGui.QPen(QtGui.QColor(200,200,200))
qp.setPen(pen)
qp.drawPolygon(points, QtCore.Qt.OddEvenFill)
# search maxima
changed = False
for i in range(0,6):
if totals[i] > self.maxima[i]:
self.maxima[i] = totals[i]
changed = True
# update DB
if changed:
self.fs.writeFSMaxima(self.maxima)
# print totals
pen = QtGui.QPen(QtGui.QColor(0,0,0,255))
qp.setPen(pen)
brush = QtGui.QBrush(QtGui.QColor(0,0,0,0))
qp.setBrush(brush)
qp.setFont(QtGui.QFont('Decorative', FONTSIZE+2))
qp.drawText(10, self.H+30, self.fsname+" TOTAL")
setHeatMapColor(qp, 0, self.maxima[0],totals[0])
qp.drawText(250, self.H+30, str(totals[0])+" nodes")
s = 98.0*(float(totals[0])/float(self.maxima[0]))
qp.drawRect(250-20, self.H+100-s-1, 10, s)
# meta + bar
setHeatMapColor(qp, 0, self.maxima[1],totals[1])
qp.drawText(2 + 3 * self.W/8, self.H+30, str(totals[1])+" meta ops/s")
s = 98.0*(float(totals[1])/float(self.maxima[1]))
qp.drawRect(2 + 3 * self.W/8-20, self.H+100-s-1, 10, s)
qp.setFont(QtGui.QFont('Decorative', (FONTSIZE+1)))
# write iops + bar
setHeatMapColor(qp, 0, self.maxima[2],totals[2])
qp.drawText(2 + 5 * self.W/8, self.H+20, str(totals[2])+" write iops/s")
s = 98.0*(float(totals[2])/float(self.maxima[2]))
qp.drawRect(2 + 5 * self.W/8-30, self.H+100-s-1, 10, s)
# read iops + bar
setHeatMapColor(qp, 0, self.maxima[3],totals[3])
qp.drawText(2 + 5 * self.W/8, self.H+40, str(totals[3])+" read iops/s")
s = 98.0*(float(totals[3])/float(self.maxima[3]))
qp.drawRect(2 + 5 * self.W/8-20, self.H+100-s-1, 10, s)
# write BW + bar
setHeatMapColor(qp, 0, self.maxima[4],totals[4])
(bw,unit) = normalize_bw(totals[4])
qp.drawText(2 + 7 * self.W/8, self.H+20, "write %6.2f" % (bw)+unit)
s = 98.0*(float(totals[4])/float(self.maxima[4]))
qp.drawRect(2 + 7 * self.W/8-30, self.H+100-s-1, 10, s)
# read BW + bar
setHeatMapColor(qp, 0, self.maxima[5],totals[5])
(bw,unit) = normalize_bw(totals[5])
qp.drawText(2 + 7 * self.W/8, self.H+40, "read %6.2f" % (bw)+unit)
s = 98.0*(float(totals[5])/float(self.maxima[5]))
qp.drawRect(2 + 7 * self.W/8-20, self.H+100-s-1, 10, s)
# print maxima
setBlack(qp)
qp.setFont(QtGui.QFont('Decorative', FONTSIZE+2))
qp.drawText(10, self.H+70, self.fsname+" MAXIMUM")
qp.drawText(250, self.H+70, str(self.maxima[0])+" nodes")
qp.drawText(2 + 3 * self.W/8, self.H+70, str(self.maxima[1])+" meta ops/s")
qp.setFont(QtGui.QFont('Decorative', (FONTSIZE+1)))
qp.drawText(2 + 5 * self.W/8, self.H+65, str(self.maxima[2])+" write iops/s")
qp.drawText(2 + 5 * self.W/8, self.H+85, str(self.maxima[3])+" read iops/s")
qp.setFont(QtGui.QFont('Decorative', (FONTSIZE+1)))
(bw,unit) = normalize_bw(self.maxima[4])
qp.drawText(2 + 7 * self.W/8, self.H+65, "write %1.2f" % (bw)+unit)
(bw,unit) = normalize_bw(self.maxima[5])
qp.drawText(2 + 7 * self.W/8, self.H+85, "read %1.2f" % (bw)+unit)
qp.end()
# widget to show timeline of job
class TimeLine(QtGui.QWidget):
def __init__(self, job):
super(TimeLine, self).__init__()
self.job = job
self.jobid = job.jobid
self.setMinimumSize(800, 200)
self.setWindowTitle('job details for'+self.jobid)
self.setGeometry(300, 400, 800, 200)
self.setStyleSheet("background-color:white;");
self.setWindowFlags(QtCore.Qt.Window)
self.show()
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
qp.setFont(QtGui.QFont('Decorative', FONTSIZE))
qp.drawText(10,20, "Jobid: "+self.jobid)
qp.drawText(10,20+FONTSIZE*2, "Owner: "+self.job.owner)
qp.drawText(10,20+FONTSIZE*4, "Nodes: "+str(len(self.job.nodelist)))
qp.drawText(250,20, "Start : "+time.ctime(self.job.start))
if self.job.end==-1:
qp.drawText(250,20+FONTSIZE*2, "End : still running")
end = time.time()
else:
qp.drawText(250,20+FONTSIZE*2, "End : "+time.ctime(self.job.end))
end = self.job.end
qp.drawText(250,20+FONTSIZE*4, "Walltime: "+normalize_time(end-self.job.start))
qp.drawText(550,20, "Cmd : "+self.job.cmd)
qp.drawText(10,20+FONTSIZE*6, "Bytes written: "+normalize_size(self.job.wbw))
qp.drawText(250,20+FONTSIZE*6, "Bytes read: "+normalize_size(self.job.rbw))
qp.drawText(550,20+FONTSIZE*6, "Metadata operations: "+str(self.job.miops))
qp.drawText(10,20+FONTSIZE*8, "Write Requests: "+str(self.job.wiops))
qp.drawText(250,20+FONTSIZE*8, "Read Requests: "+str(self.job.riops))
qp.end()
# rgb heatmap from stackoverflow
# http://stackoverflow.com/questions/20792445/calculate-rgb-value-for-a-range-of-values-to-create-heat-map
def rgb(minimum, maximum, value, t=128):
minimum, maximum = float(minimum), float(maximum)
ratio = 2 * (value-minimum) / (maximum - minimum)
b = int(max(0, 255*(1 - ratio)))
r = int(max(0, 255*(ratio - 1)))
g = 255 - b - r
# print r,g,b
return r, g, b, t
# reset qp to black
def setBlack(qp):
pen = QtGui.QPen(QtGui.QColor(0,0,0,255))
qp.setPen(pen)
brush = QtGui.QBrush(QtGui.QColor(0,0,0,255))
qp.setBrush(brush)
# heatmap color helper, no alpha
def setHeatMapColor(qp, min, max, value):
if value > max: value = max
pen = QtGui.QPen(QtGui.QColor(*rgb(min, max, value, 255)))
qp.setPen(pen)
brush = QtGui.QBrush(QtGui.QColor(*rgb(min, max, value, 255)))
qp.setBrush(brush)
# helper for time difference normalizaton, returns string
def normalize_time(secs):
s = 0
m = 0
h = 0
if secs > 60:
s = secs % 60
secs /= 60
if secs > 60:
m = secs % 60
h = secs / 60
return "%2.2d:%2.2d:%2.2d" % (h,m,s)
# helper for BW units
def normalize_bw(bw):
bw = float(bw)
unit = " B/s"
if bw > 1000:
bw /= 1000
unit = " KB/s"
if bw > 1000:
bw /= 1000
unit = " MB/s"
if bw > 1000:
bw /= 1000
unit = " GB/s"
return (bw,unit)
# helper for BW units
def normalize_size(bw):
bw = float(bw)
unit = " B"
if bw > 1000:
bw /= 1000
unit = " KB"
if bw > 1000:
bw /= 1000
unit = " MB"
if bw > 1000:
bw /= 1000
unit = " GB"
return "%1.2f %s" % (bw,unit)
# MAIN MESS
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| gpl-2.0 | 9,095,556,538,172,107,000 | 35.00152 | 150 | 0.536452 | false |
blackrain-audio/supercollider | external_libraries/simplejson-2.3.2/__init__.py | 44 | 18618 | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.3.2'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <[email protected]>'
from decimal import Decimal
from .decoder import JSONDecoder, JSONDecodeError
from .encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from ._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
**kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| gpl-3.0 | -2,998,556,551,591,209,500 | 38.95279 | 79 | 0.653078 | false |
archf/ansible | lib/ansible/modules/system/filesystem.py | 16 | 8834 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Alexander Bulimov <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author: "Alexander Bulimov (@abulimov)"
module: filesystem
short_description: Makes file system on block device
description:
- This module creates file system.
version_added: "1.2"
options:
fstype:
description:
- File System type to be created.
- reiserfs support was added in 2.2.
required: true
dev:
description:
- Target block device.
required: true
force:
choices: [ "yes", "no" ]
default: "no"
description:
- If yes, allows to create new filesystem on devices that already has filesystem.
required: false
resizefs:
choices: [ "yes", "no" ]
default: "no"
description:
- If yes, if the block device and filessytem size differ, grow the filesystem into the space. Note, XFS Will only grow if mounted.
required: false
version_added: "2.0"
opts:
description:
- List of options to be passed to mkfs command.
notes:
- uses mkfs command
'''
EXAMPLES = '''
# Create a ext2 filesystem on /dev/sdb1.
- filesystem:
fstype: ext2
dev: /dev/sdb1
# Create a ext4 filesystem on /dev/sdb1 and check disk blocks.
- filesystem:
fstype: ext4
dev: /dev/sdb1
opts: -cc
'''
import os
from ansible.module_utils.basic import AnsibleModule
def _get_dev_size(dev, module):
""" Return size in bytes of device. Returns int """
blockdev_cmd = module.get_bin_path("blockdev", required=True)
rc, devsize_in_bytes, err = module.run_command("%s %s %s" % (blockdev_cmd, "--getsize64", dev))
return int(devsize_in_bytes)
def _get_fs_size(fssize_cmd, dev, module):
""" Return size in bytes of filesystem on device. Returns int """
cmd = module.get_bin_path(fssize_cmd, required=True)
if 'tune2fs' == fssize_cmd:
# Get Block count and Block size
rc, size, err = module.run_command("%s %s %s" % (cmd, '-l', dev))
if rc == 0:
for line in size.splitlines():
if 'Block count:' in line:
block_count = int(line.split(':')[1].strip())
elif 'Block size:' in line:
block_size = int(line.split(':')[1].strip())
break
else:
module.fail_json(msg="Failed to get block count and block size of %s with %s" % (dev, cmd), rc=rc, err=err )
elif 'xfs_growfs' == fssize_cmd:
# Get Block count and Block size
rc, size, err = module.run_command([cmd, '-n', dev])
if rc == 0:
for line in size.splitlines():
col = line.split('=')
if col[0].strip() == 'data':
if col[1].strip() != 'bsize':
module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "bsize")')
if col[2].split()[1] != 'blocks':
module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "blocks")')
block_size = int(col[2].split()[0])
block_count = int(col[3].split(',')[0])
break
else:
module.fail_json(msg="Failed to get block count and block size of %s with %s" % (dev, cmd), rc=rc, err=err )
elif 'btrfs' == fssize_cmd:
#ToDo
# There is no way to get the blocksize and blockcount for btrfs filesystems
block_size = 1
block_count = 1
return block_size*block_count
def main():
module = AnsibleModule(
argument_spec = dict(
fstype=dict(required=True, aliases=['type']),
dev=dict(required=True, aliases=['device']),
opts=dict(),
force=dict(type='bool', default='no'),
resizefs=dict(type='bool', default='no'),
),
supports_check_mode=True,
)
# There is no "single command" to manipulate filesystems, so we map them all out and their options
fs_cmd_map = {
'ext2' : {
'mkfs' : 'mkfs.ext2',
'grow' : 'resize2fs',
'grow_flag' : None,
'force_flag' : '-F',
'fsinfo': 'tune2fs',
},
'ext3' : {
'mkfs' : 'mkfs.ext3',
'grow' : 'resize2fs',
'grow_flag' : None,
'force_flag' : '-F',
'fsinfo': 'tune2fs',
},
'ext4' : {
'mkfs' : 'mkfs.ext4',
'grow' : 'resize2fs',
'grow_flag' : None,
'force_flag' : '-F',
'fsinfo': 'tune2fs',
},
'reiserfs' : {
'mkfs' : 'mkfs.reiserfs',
'grow' : 'resize_reiserfs',
'grow_flag' : None,
'force_flag' : '-f',
'fsinfo': 'reiserfstune',
},
'ext4dev' : {
'mkfs' : 'mkfs.ext4',
'grow' : 'resize2fs',
'grow_flag' : None,
'force_flag' : '-F',
'fsinfo': 'tune2fs',
},
'xfs' : {
'mkfs' : 'mkfs.xfs',
'grow' : 'xfs_growfs',
'grow_flag' : None,
'force_flag' : '-f',
'fsinfo': 'xfs_growfs',
},
'btrfs' : {
'mkfs' : 'mkfs.btrfs',
'grow' : 'btrfs',
'grow_flag' : 'filesystem resize',
'force_flag' : '-f',
'fsinfo': 'btrfs',
}
}
dev = module.params['dev']
fstype = module.params['fstype']
opts = module.params['opts']
force = module.boolean(module.params['force'])
resizefs = module.boolean(module.params['resizefs'])
changed = False
try:
_ = fs_cmd_map[fstype]
except KeyError:
module.exit_json(changed=False, msg="WARNING: module does not support this filesystem yet. %s" % fstype)
mkfscmd = fs_cmd_map[fstype]['mkfs']
force_flag = fs_cmd_map[fstype]['force_flag']
growcmd = fs_cmd_map[fstype]['grow']
fssize_cmd = fs_cmd_map[fstype]['fsinfo']
if not os.path.exists(dev):
module.fail_json(msg="Device %s not found."%dev)
cmd = module.get_bin_path('blkid', required=True)
rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
fs = raw_fs.strip()
if fs == fstype and resizefs is False and not force:
module.exit_json(changed=False)
elif fs == fstype and resizefs is True:
# Get dev and fs size and compare
devsize_in_bytes = _get_dev_size(dev, module)
fssize_in_bytes = _get_fs_size(fssize_cmd, dev, module)
if fssize_in_bytes < devsize_in_bytes:
fs_smaller = True
else:
fs_smaller = False
if module.check_mode and fs_smaller:
module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (fstype,dev))
elif module.check_mode and not fs_smaller:
module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev))
elif fs_smaller:
cmd = module.get_bin_path(growcmd, required=True)
rc,out,err = module.run_command("%s %s" % (cmd, dev))
# Sadly there is no easy way to determine if this has changed. For now, just say "true" and move on.
# in the future, you would have to parse the output to determine this.
# thankfully, these are safe operations if no change is made.
if rc == 0:
module.exit_json(changed=True, msg=out)
else:
module.fail_json(msg="Resizing filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err)
else:
module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev))
elif fs and not force:
module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite"%(dev,fs), rc=rc, err=err)
### create fs
if module.check_mode:
changed = True
else:
mkfs = module.get_bin_path(mkfscmd, required=True)
cmd = None
if opts is None:
cmd = "%s %s '%s'" % (mkfs, force_flag, dev)
else:
cmd = "%s %s %s '%s'" % (mkfs, force_flag, opts, dev)
rc,_,err = module.run_command(cmd)
if rc == 0:
changed = True
else:
module.fail_json(msg="Creating filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err)
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,855,467,766,178,922,500 | 32.589354 | 134 | 0.546525 | false |
texta-tk/texta | grammar_builder/migrations/0001_initial.py | 1 | 2532 | # Generated by Django 2.0.4 on 2019-01-10 11:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('permission_admin', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Grammar',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=64)),
('json', models.CharField(max_length=2048)),
('last_modified', models.DateTimeField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='permission_admin.Dataset')),
],
),
migrations.CreateModel(
name='GrammarComponent',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=256)),
('type', models.CharField(max_length=64)),
('content', models.CharField(blank=True, max_length=512, null=True)),
('layer', models.CharField(blank=True, max_length=64, null=True)),
('join_by', models.CharField(blank=True, max_length=32, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='grammar_components', to=settings.AUTH_USER_MODEL)),
('sub_components', models.ManyToManyField(to='grammar_builder.GrammarComponent')),
],
),
migrations.CreateModel(
name='GrammarPageMapping',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('search_id', models.IntegerField()),
('inclusive_grammar', models.IntegerField()),
('exclusive_grammar', models.IntegerField()),
('polarity', models.CharField(max_length=8)),
('page', models.IntegerField()),
('elastic_start', models.IntegerField()),
('elastic_end', models.IntegerField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| gpl-3.0 | 1,643,911,842,151,949,600 | 44.214286 | 155 | 0.579779 | false |
valum-framework/FrameworkBenchmarks | toolset/benchmark/benchmarker.py | 1 | 47579 | from setup.linux import setup_util
from benchmark import framework_test
from benchmark.test_types import *
from utils import header
from utils import gather_tests
from utils import gather_frameworks
from utils import verify_database_connections
import os
import uuid
import shutil
import stat
import json
import requests
import subprocess
import traceback
import time
import pprint
import csv
import sys
import logging
import socket
import threading
import textwrap
from pprint import pprint
from contextlib import contextmanager
from multiprocessing import Process
from datetime import datetime
# Cross-platform colored text
from colorama import Fore, Back, Style
# Text-based progress indicators
import progressbar
class Benchmarker:
##########################################################################################
# Public methods
##########################################################################################
############################################################
# Prints all the available tests
############################################################
def run_list_tests(self):
all_tests = self.__gather_tests
for test in all_tests:
print test.name
self.__finish()
############################################################
# End run_list_tests
############################################################
############################################################
# Prints the metadata for all the available tests
############################################################
def run_list_test_metadata(self):
all_tests = self.__gather_tests
all_tests_json = json.dumps(map(lambda test: {
"name": test.name,
"approach": test.approach,
"classification": test.classification,
"database": test.database,
"framework": test.framework,
"language": test.language,
"orm": test.orm,
"platform": test.platform,
"webserver": test.webserver,
"os": test.os,
"database_os": test.database_os,
"display_name": test.display_name,
"notes": test.notes,
"versus": test.versus
}, all_tests))
with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
f.write(all_tests_json)
############################################################
# End run_list_test_metadata
############################################################
############################################################
# parse_timestamp
# Re-parses the raw data for a given timestamp
############################################################
def parse_timestamp(self):
all_tests = self.__gather_tests
for test in all_tests:
test.parse_all()
self.__parse_results(all_tests)
self.__finish()
############################################################
# End parse_timestamp
############################################################
############################################################
# Run the tests:
# This process involves setting up the client/server machines
# with any necessary change. Then going through each test,
# running their setup script, verifying the URLs, and
# running benchmarks against them.
############################################################
def run(self):
##########################
# Generate metadata
##########################
self.run_list_test_metadata()
##########################
# Get a list of all known
# tests that we can run.
##########################
all_tests = self.__gather_tests
##########################
# Setup client/server
##########################
print header("Preparing Server, Database, and Client ...", top='=', bottom='=')
with self.quiet_out.enable():
self.__setup_server()
self.__setup_database()
self.__setup_client()
## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
#if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
# raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
##########################
# Run tests
##########################
print header("Running Tests...", top='=', bottom='=')
result = self.__run_tests(all_tests)
##########################
# Parse results
##########################
if self.mode == "benchmark":
print header("Parsing Results ...", top='=', bottom='=')
self.__parse_results(all_tests)
self.__set_completion_time()
self.__upload_results()
self.__finish()
return result
############################################################
# End run
############################################################
############################################################
# database_sftp_string(batch_file)
# generates a fully qualified URL for sftp to database
############################################################
def database_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.database_identity_file != None:
sftp_string += " -i " + self.database_identity_file + " "
return sftp_string + self.database_user + "@" + self.database_host
############################################################
# End database_sftp_string
############################################################
############################################################
# client_sftp_string(batch_file)
# generates a fully qualified URL for sftp to client
############################################################
def client_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.client_identity_file != None:
sftp_string += " -i " + self.client_identity_file + " "
return sftp_string + self.client_user + "@" + self.client_host
############################################################
# End client_sftp_string
############################################################
############################################################
# generate_url(url, port)
# generates a fully qualified URL for accessing a test url
############################################################
def generate_url(self, url, port):
return self.server_host + ":" + str(port) + url
############################################################
# End generate_url
############################################################
############################################################
# get_output_file(test_name, test_type)
# returns the output file name for this test_name and
# test_type timestamp/test_type/test_name/raw.txt
############################################################
def get_output_file(self, test_name, test_type):
return os.path.join(self.result_directory, self.timestamp, test_name, test_type, "raw.txt")
############################################################
# End get_output_file
############################################################
############################################################
# output_file(test_name, test_type)
# returns the output file for this test_name and test_type
# timestamp/test_type/test_name/raw.txt
############################################################
def output_file(self, test_name, test_type):
path = self.get_output_file(test_name, test_type)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End output_file
############################################################
############################################################
# get_stats_file(test_name, test_type)
# returns the stats file name for this test_name and
# test_type timestamp/test_type/test_name/stats.txt
############################################################
def get_stats_file(self, test_name, test_type):
return os.path.join(self.result_directory, self.timestamp, test_name, test_type, "stats.txt")
############################################################
# End get_stats_file
############################################################
############################################################
# stats_file(test_name, test_type)
# returns the stats file for this test_name and test_type
# timestamp/test_type/test_name/stats.txt
############################################################
def stats_file(self, test_name, test_type):
path = self.get_stats_file(test_name, test_type)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End stats_file
############################################################
############################################################
# full_results_directory
############################################################
def full_results_directory(self):
path = os.path.join(self.fwroot, self.result_directory, self.timestamp)
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# End full_results_directory
############################################################
############################################################
# report_verify_results
# Used by FrameworkTest to add verification details to our results
#
# TODO: Technically this is an IPC violation - we are accessing
# the parent process' memory from the child process
############################################################
def report_verify_results(self, framework, test, result):
if framework.name not in self.results['verify'].keys():
self.results['verify'][framework.name] = dict()
self.results['verify'][framework.name][test] = result
############################################################
# report_benchmark_results
# Used by FrameworkTest to add benchmark data to this
#
# TODO: Technically this is an IPC violation - we are accessing
# the parent process' memory from the child process
############################################################
def report_benchmark_results(self, framework, test, results):
if test not in self.results['rawData'].keys():
self.results['rawData'][test] = dict()
# If results has a size from the parse, then it succeeded.
if results:
self.results['rawData'][test][framework.name] = results
# This may already be set for single-tests
if framework.name not in self.results['succeeded'][test]:
self.results['succeeded'][test].append(framework.name)
else:
# This may already be set for single-tests
if framework.name not in self.results['failed'][test]:
self.results['failed'][test].append(framework.name)
############################################################
# End report_results
############################################################
##########################################################################################
# Private methods
##########################################################################################
############################################################
# Gathers all the tests
############################################################
@property
def __gather_tests(self):
tests = gather_tests(include=self.test,
exclude=self.exclude,
benchmarker=self)
# If the tests have been interrupted somehow, then we want to resume them where we left
# off, rather than starting from the beginning
if os.path.isfile(self.current_benchmark):
with open(self.current_benchmark, 'r') as interrupted_benchmark:
interrupt_bench = interrupted_benchmark.read().strip()
for index, atest in enumerate(tests):
if atest.name == interrupt_bench:
tests = tests[index:]
break
return tests
############################################################
# End __gather_tests
############################################################
############################################################
# Makes any necessary changes to the server that should be
# made before running the tests. This involves setting kernal
# settings to allow for more connections, or more file
# descriptiors
#
# http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
############################################################
def __setup_server(self):
try:
if os.name == 'nt':
return True
subprocess.call(['sudo', 'sysctl', '-w', 'net.ipv4.tcp_max_syn_backlog=65535'], stdout=self.quiet_out, stderr=subprocess.STDOUT)
subprocess.call(['sudo', 'sysctl', '-w', 'net.core.somaxconn=65535'], stdout=self.quiet_out, stderr=subprocess.STDOUT)
subprocess.call(['sudo', 'sysctl', 'net.ipv4.tcp_tw_reuse=1'], stdout=self.quiet_out, stderr=subprocess.STDOUT)
subprocess.call(['sudo', 'sysctl', 'net.ipv4.tcp_tw_recycle=1'], stdout=self.quiet_out, stderr=subprocess.STDOUT)
subprocess.call(['sudo', 'sysctl', '-w', 'kernel.shmmax=134217728'], stdout=self.quiet_out, stderr=subprocess.STDOUT)
subprocess.call(['sudo', 'sysctl', '-w', 'kernel.shmall=2097152'], stdout=self.quiet_out, stderr=subprocess.STDOUT)
with open(os.path.join(self.full_results_directory(), 'sysctl.txt'), 'w') as f:
f.write(subprocess.check_output(['sudo','sysctl','-a']))
except subprocess.CalledProcessError:
return False
############################################################
# End __setup_server
############################################################
############################################################
# Clean up any processes that run with root privileges
############################################################
def __cleanup_leftover_processes_before_test(self):
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=self.quiet_out, stderr=subprocess.STDOUT)
p.communicate("""
sudo service mysql stop
sudo service mongod stop
sudo kill -9 $(pgrep postgres)
sudo kill -9 $(pgrep mysql)
sudo kill -9 $(pgrep mongo)
""")
############################################################
# Makes any necessary changes to the database machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include database specific
# changes.
############################################################
def __setup_database(self):
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=self.quiet_out, stderr=subprocess.STDOUT)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo sysctl -w kernel.sched_autogroup_enabled=0
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
sudo sysctl -w kernel.sem="250 32000 256 512"
""")
# TODO - print kernel configuration to file
# echo "Printing kernel configuration:" && sudo sysctl -a
# Explanations:
# net.ipv4.tcp_max_syn_backlog, net.core.somaxconn, kernel.sched_autogroup_enabled: http://tweaked.io/guide/kernel/
# ulimit -n: http://www.cyberciti.biz/faq/linux-increase-the-maximum-number-of-open-files/
# net.ipv4.tcp_tw_*: http://www.linuxbrigade.com/reduce-time_wait-socket-connections/
# kernel.shm*: http://seriousbirder.com/blogs/linux-understanding-shmmax-and-shmall-settings/
# For kernel.sem: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/5/html/Tuning_and_Optimizing_Red_Hat_Enterprise_Linux_for_Oracle_9i_and_10g_Databases/chap-Oracle_9i_and_10g_Tuning_Guide-Setting_Semaphores.html
############################################################
# End __setup_database
############################################################
############################################################
# Makes any necessary changes to the client machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include client specific
# changes.
############################################################
def __setup_client(self):
p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True, stdout=self.quiet_out, stderr=subprocess.STDOUT)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_client
############################################################
############################################################
# __run_tests
#
# 2013-10-02 ASB Calls each test passed in tests to
# __run_test in a separate process. Each
# test is given a set amount of time and if
# kills the child process (and subsequently
# all of its child processes). Uses
# multiprocessing module.
############################################################
def __run_tests(self, tests):
if len(tests) == 0:
return 0
logging.debug("Start __run_tests.")
logging.debug("__name__ = %s",__name__)
error_happened = False
if self.os.lower() == 'windows':
logging.debug("Executing __run_tests on Windows")
for test in tests:
with open(self.current_benchmark, 'w') as benchmark_resume_file:
benchmark_resume_file.write(test.name)
with self.quiet_out.enable():
if self.__run_test(test) != 0:
error_happened = True
else:
logging.debug("Executing __run_tests on Linux")
# Setup a nice progressbar and ETA indicator
widgets = [self.mode, ': ', progressbar.Percentage(),
' ', progressbar.Bar(),
' Rough ', progressbar.ETA()]
pbar = progressbar.ProgressBar(widgets=widgets, maxval=len(tests)).start()
pbar_test = 0
# These features do not work on Windows
for test in tests:
pbar.update(pbar_test)
pbar_test = pbar_test + 1
if __name__ == 'benchmark.benchmarker':
print header("Running Test: %s" % test.name)
with open(self.current_benchmark, 'w') as benchmark_resume_file:
benchmark_resume_file.write(test.name)
with self.quiet_out.enable():
test_process = Process(target=self.__run_test, name="Test Runner (%s)" % test.name, args=(test,))
test_process.start()
test_process.join(self.run_test_timeout_seconds)
self.__load_results() # Load intermediate result from child process
if(test_process.is_alive()):
logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
test_process.terminate()
test_process.join()
if test_process.exitcode != 0:
error_happened = True
pbar.finish()
if os.path.isfile(self.current_benchmark):
os.remove(self.current_benchmark)
logging.debug("End __run_tests.")
if error_happened:
return 1
return 0
############################################################
# End __run_tests
############################################################
############################################################
# __run_test
# 2013-10-02 ASB Previously __run_tests. This code now only
# processes a single test.
#
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __run_test(self, test):
# Used to capture return values
def exit_with_code(code):
if self.os.lower() == 'windows':
return code
else:
sys.exit(code)
logDir = os.path.join(self.full_results_directory(), test.name.lower())
try:
os.makedirs(logDir)
except Exception:
pass
with open(os.path.join(logDir, 'out.txt'), 'w') as out:
if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
out.write("OS or Database OS specified in benchmark_config.json does not match the current environment. Skipping.\n")
return exit_with_code(0)
# If the test is in the excludes list, we skip it
if self.exclude != None and test.name in self.exclude:
out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
return exit_with_code(0)
out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
out.write("test.name: {name}\n".format(name=str(test.name)))
out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
if self.results['frameworks'] != None and test.name in self.results['completed']:
out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
print 'WARNING: Test {test} exists in the results directory; this must be removed before running a new test.\n'.format(test=str(test.name))
return exit_with_code(1)
out.flush()
out.write(header("Beginning %s" % test.name, top='='))
out.flush()
##########################
# Start this test
##########################
out.write(header("Starting %s" % test.name))
out.flush()
try:
self.__cleanup_leftover_processes_before_test()
if self.__is_port_bound(test.port):
time.sleep(60)
if self.__is_port_bound(test.port):
# We gave it our all
self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
out.write(header("Error: Port %s is not available, cannot start %s" % (test.port, test.name)))
out.flush()
print "Error: Unable to recover port, cannot start test"
return exit_with_code(1)
result, process = test.start(out)
self.__process = process
if result != 0:
self.__process.terminate()
time.sleep(5)
out.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
out.flush()
self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
return exit_with_code(1)
logging.info("Sleeping %s seconds to ensure framework is ready" % self.sleep)
time.sleep(self.sleep)
##########################
# Verify URLs
##########################
if self.mode == "debug":
logging.info("Entering debug mode. Server has started. CTRL-c to stop.")
while True:
time.sleep(1)
else:
logging.info("Verifying framework URLs")
passed_verify = test.verify_urls(logDir)
##########################
# Benchmark this test
##########################
if self.mode == "benchmark":
logging.info("Benchmarking")
out.write(header("Benchmarking %s" % test.name))
out.flush()
test.benchmark(logDir)
##########################
# Stop this test
##########################
self.__stop_test(test, out)
out.write(header("Stopped %s" % test.name))
out.flush()
##########################################################
# Remove contents of /tmp folder
##########################################################
try:
subprocess.check_call('sudo rm -rf /tmp/*', shell=True, stderr=out, stdout=out)
except Exception:
out.write(header("Error: Could not empty /tmp"))
##########################################################
# Remove apt sources to avoid pkg errors and collisions
##########################################################
os.system("sudo rm -rf /etc/apt/sources.list.d/*")
##########################################################
# Save results thus far into the latest results directory
##########################################################
out.write(header("Saving results through %s" % test.name))
out.flush()
self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
##########################################################
# Upload the results thus far to another server (optional)
##########################################################
self.__upload_results()
if self.mode == "verify" and not passed_verify:
print "Failed verify!"
return exit_with_code(1)
except KeyboardInterrupt:
self.__stop_test(test, out)
except (OSError, IOError, subprocess.CalledProcessError) as e:
self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
out.write(header("Subprocess Error %s" % test.name))
traceback.print_exc(file=out)
out.flush()
out.close()
return exit_with_code(1)
out.close()
return exit_with_code(0)
############################################################
# End __run_tests
############################################################
############################################################
# __stop_test
# Attempts to stop the running test.
############################################################
def __stop_test(self, test, out):
# self.__process may not be set if the user hit ctrl+c prior to the test
# starting properly.
if self.__process is not None:
out.write(header("Stopping %s" % test.name))
out.flush()
# Ask TFBReaper to nicely terminate itself
self.__process.terminate()
slept = 0
returnCode = None
# Check once a second to see if TFBReaper has exited
while(slept < 300 and returnCode is None):
time.sleep(1)
slept += 1
returnCode = self.__process.poll()
# If TFBReaper has not exited at this point, we have a problem
if returnCode is None:
self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
out.write(header("Error: Port %s was not released by stop - %s" % (test.port, test.name)))
out.write(header("Running Processes"))
out.write(subprocess.check_output(['ps -aux'], shell=True))
out.flush()
return exit_with_code(1)
############################################################
# End __stop_test
############################################################
def is_port_bound(self, port):
return self.__is_port_bound(port)
############################################################
# __is_port_bound
# Check if the requested port is available. If it
# isn't available, then a previous test probably didn't
# shutdown properly.
############################################################
def __is_port_bound(self, port):
port = int(port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Try to bind to all IP addresses, this port
s.bind(("", port))
# If we get here, we were able to bind successfully,
# which means the port is free.
except socket.error:
# If we get an exception, it might be because the port is still bound
# which would be bad, or maybe it is a privileged port (<1024) and we
# are not running as root, or maybe the server is gone, but sockets are
# still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
# connect.
try:
s.connect(("127.0.0.1", port))
# If we get here, we were able to connect to something, which means
# that the port is still bound.
return True
except socket.error:
# An exception means that we couldn't connect, so a server probably
# isn't still running on the port.
pass
finally:
s.close()
return False
############################################################
# End __is_port_bound
############################################################
############################################################
# __parse_results
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __parse_results(self, tests):
# Run the method to get the commmit count of each framework.
self.__count_commits()
# Call the method which counts the sloc for each framework
self.__count_sloc()
# Time to create parsed files
# Aggregate JSON file
with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
f.write(json.dumps(self.results, indent=2))
############################################################
# End __parse_results
############################################################
#############################################################
# __count_sloc
#############################################################
def __count_sloc(self):
frameworks = gather_frameworks(include=self.test,
exclude=self.exclude, benchmarker=self)
jsonResult = {}
for framework, testlist in frameworks.iteritems():
if not os.path.exists(os.path.join(testlist[0].directory, "source_code")):
logging.warn("Cannot count lines of code for %s - no 'source_code' file", framework)
continue
# Unfortunately the source_code files use lines like
# ./cpoll_cppsp/www/fortune_old instead of
# ./www/fortune_old
# so we have to back our working dir up one level
wd = os.path.dirname(testlist[0].directory)
try:
command = "cloc --list-file=%s/source_code --yaml" % testlist[0].directory
if os.path.exists(os.path.join(testlist[0].directory, "cloc_defs.txt")):
command += " --read-lang-def %s" % os.path.join(testlist[0].directory, "cloc_defs.txt")
logging.info("Using custom cloc definitions for %s", framework)
# Find the last instance of the word 'code' in the yaml output. This should
# be the line count for the sum of all listed files or just the line count
# for the last file in the case where there's only one file listed.
command = command + "| grep code | tail -1 | cut -d: -f 2"
logging.debug("Running \"%s\" (cwd=%s)", command, wd)
lineCount = subprocess.check_output(command, cwd=wd, shell=True)
jsonResult[framework] = int(lineCount)
except subprocess.CalledProcessError:
continue
except ValueError as ve:
logging.warn("Unable to get linecount for %s due to error '%s'", framework, ve)
self.results['rawData']['slocCounts'] = jsonResult
############################################################
# End __count_sloc
############################################################
############################################################
# __count_commits
#
############################################################
def __count_commits(self):
frameworks = gather_frameworks(include=self.test,
exclude=self.exclude, benchmarker=self)
def count_commit(directory, jsonResult):
command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
try:
commitCount = subprocess.check_output(command, shell=True)
jsonResult[framework] = int(commitCount)
except subprocess.CalledProcessError:
pass
# Because git can be slow when run in large batches, this
# calls git up to 4 times in parallel. Normal improvement is ~3-4x
# in my trials, or ~100 seconds down to ~25
# This is safe to parallelize as long as each thread only
# accesses one key in the dictionary
threads = []
jsonResult = {}
t1 = datetime.now()
for framework, testlist in frameworks.iteritems():
directory = testlist[0].directory
t = threading.Thread(target=count_commit, args=(directory,jsonResult))
t.start()
threads.append(t)
# Git has internal locks, full parallel will just cause contention
# and slowness, so we rate-limit a bit
if len(threads) >= 4:
threads[0].join()
threads.remove(threads[0])
# Wait for remaining threads
for t in threads:
t.join()
t2 = datetime.now()
# print "Took %s seconds " % (t2 - t1).seconds
self.results['rawData']['commitCounts'] = jsonResult
self.commits = jsonResult
############################################################
# End __count_commits
############################################################
def __write_intermediate_results(self,test_name,status_message):
self.results["completed"][test_name] = status_message
self.__write_results()
def __write_results(self):
try:
with open(os.path.join(self.full_results_directory(), 'results.json'), 'w') as f:
f.write(json.dumps(self.results, indent=2))
except (IOError):
logging.error("Error writing results.json")
def __set_completion_time(self):
self.results['completionTime'] = int(round(time.time() * 1000))
self.__write_results()
def __upload_results(self):
if self.results_upload_uri != None:
try:
requests.post(self.results_upload_uri, headers={ 'Content-Type': 'application/json' }, data=json.dumps(self.results, indent=2))
except (Exception):
logging.error("Error uploading results.json")
def __load_results(self):
try:
with open(os.path.join(self.full_results_directory(), 'results.json')) as f:
self.results = json.load(f)
except (ValueError, IOError):
pass
############################################################
# __finish
############################################################
def __finish(self):
if not self.list_tests and not self.parse:
tests = self.__gather_tests
# Normally you don't have to use Fore.BLUE before each line, but
# Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
# or stream flush, so we have to ensure that the color code is printed repeatedly
prefix = Fore.CYAN
for line in header("Verification Summary", top='=', bottom='').split('\n'):
print prefix + line
for test in tests:
print prefix + "| Test: %s" % test.name
if test.name in self.results['verify'].keys():
for test_type, result in self.results['verify'][test.name].iteritems():
if result.upper() == "PASS":
color = Fore.GREEN
elif result.upper() == "WARN":
color = Fore.YELLOW
else:
color = Fore.RED
print prefix + "| " + test_type.ljust(13) + ' : ' + color + result.upper()
else:
print prefix + "| " + Fore.RED + "NO RESULTS (Did framework launch?)"
print prefix + header('', top='', bottom='=') + Style.RESET_ALL
print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
############################################################
# End __finish
############################################################
##########################################################################################
# Constructor
##########################################################################################
############################################################
# Initialize the benchmarker. The args are the arguments
# parsed via argparser.
############################################################
def __init__(self, args):
# Map type strings to their objects
types = dict()
types['json'] = JsonTestType()
types['db'] = DBTestType()
types['query'] = QueryTestType()
types['fortune'] = FortuneTestType()
types['update'] = UpdateTestType()
types['plaintext'] = PlaintextTestType()
types['cached_query'] = CachedQueryTestType()
# Turn type into a map instead of a string
if args['type'] == 'all':
args['types'] = types
else:
args['types'] = { args['type'] : types[args['type']] }
del args['type']
args['max_concurrency'] = max(args['concurrency_levels'])
if 'pipeline_concurrency_levels' not in args:
args['pipeline_concurrency_levels'] = [256,1024,4096,16384]
self.__dict__.update(args)
# pprint(self.__dict__)
self.quiet_out = QuietOutputStream(self.quiet)
self.start_time = time.time()
self.run_test_timeout_seconds = 7200
# setup logging
logging.basicConfig(stream=self.quiet_out, level=logging.INFO)
# setup some additional variables
if self.database_user == None: self.database_user = self.client_user
if self.database_host == None: self.database_host = self.client_host
if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
# Remember root directory
self.fwroot = setup_util.get_fwroot()
# setup current_benchmark.txt location
self.current_benchmark = "/tmp/current_benchmark.txt"
if hasattr(self, 'parse') and self.parse != None:
self.timestamp = self.parse
else:
self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
# setup results and latest_results directories
self.result_directory = os.path.join(self.fwroot, "results")
if (args['clean'] or args['clean_all']) and os.path.exists(os.path.join(self.fwroot, "results")):
os.system("sudo rm -rf " + self.result_directory + "/*")
# remove installs directories if --clean-all provided
self.install_root = "%s/%s" % (self.fwroot, "installs")
if args['clean_all']:
os.system("sudo rm -rf " + self.install_root)
os.mkdir(self.install_root)
self.results = None
try:
with open(os.path.join(self.full_results_directory(), 'results.json'), 'r') as f:
#Load json file into results object
self.results = json.load(f)
except IOError:
logging.warn("results.json for test not found.")
if self.results == None:
self.results = dict()
self.results['uuid'] = str(uuid.uuid4())
self.results['name'] = datetime.now().strftime(self.results_name)
self.results['environmentDescription'] = self.results_environment
self.results['startTime'] = int(round(time.time() * 1000))
self.results['completionTime'] = None
self.results['concurrencyLevels'] = self.concurrency_levels
self.results['pipelineConcurrencyLevels'] = self.pipeline_concurrency_levels
self.results['queryIntervals'] = self.query_levels
self.results['cachedQueryIntervals'] = self.cached_query_levels
self.results['frameworks'] = [t.name for t in self.__gather_tests]
self.results['duration'] = self.duration
self.results['rawData'] = dict()
self.results['rawData']['json'] = dict()
self.results['rawData']['db'] = dict()
self.results['rawData']['query'] = dict()
self.results['rawData']['fortune'] = dict()
self.results['rawData']['update'] = dict()
self.results['rawData']['plaintext'] = dict()
self.results['rawData']['cached_query'] = dict()
self.results['completed'] = dict()
self.results['succeeded'] = dict()
self.results['succeeded']['json'] = []
self.results['succeeded']['db'] = []
self.results['succeeded']['query'] = []
self.results['succeeded']['fortune'] = []
self.results['succeeded']['update'] = []
self.results['succeeded']['plaintext'] = []
self.results['succeeded']['cached_query'] = []
self.results['failed'] = dict()
self.results['failed']['json'] = []
self.results['failed']['db'] = []
self.results['failed']['query'] = []
self.results['failed']['fortune'] = []
self.results['failed']['update'] = []
self.results['failed']['plaintext'] = []
self.results['failed']['cached_query'] = []
self.results['verify'] = dict()
else:
#for x in self.__gather_tests():
# if x.name not in self.results['frameworks']:
# self.results['frameworks'] = self.results['frameworks'] + [x.name]
# Always overwrite framework list
self.results['frameworks'] = [t.name for t in self.__gather_tests]
# Setup the ssh command string
self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
if self.database_identity_file != None:
self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
if self.client_identity_file != None:
self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
self.__process = None
############################################################
# End __init__
############################################################
class QuietOutputStream:
def __init__(self, is_quiet):
self.is_quiet = is_quiet
self.null_out = open(os.devnull, 'w')
def fileno(self):
with self.enable():
return sys.stdout.fileno()
def write(self, message):
with self.enable():
sys.stdout.write(message)
@contextmanager
def enable(self):
if self.is_quiet:
old_out = sys.stdout
old_err = sys.stderr
try:
sys.stdout = self.null_out
sys.stderr = self.null_out
yield
finally:
sys.stdout = old_out
sys.stderr = old_err
else:
yield
| bsd-3-clause | 2,604,231,988,045,656,000 | 43.424837 | 242 | 0.471511 | false |
nkgilley/home-assistant | homeassistant/components/roomba/config_flow.py | 7 | 3831 | """Config flow to configure roomba component."""
import logging
from roomba import Roomba
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.const import CONF_HOST, CONF_PASSWORD
from homeassistant.core import callback
from . import CannotConnect, async_connect_or_timeout, async_disconnect_or_timeout
from .const import (
CONF_BLID,
CONF_CONTINUOUS,
CONF_DELAY,
CONF_NAME,
DEFAULT_CONTINUOUS,
DEFAULT_DELAY,
ROOMBA_SESSION,
)
from .const import DOMAIN # pylint:disable=unused-import
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Required(CONF_BLID): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_CONTINUOUS, default=DEFAULT_CONTINUOUS): bool,
vol.Optional(CONF_DELAY, default=DEFAULT_DELAY): int,
}
)
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
roomba = Roomba(
address=data[CONF_HOST],
blid=data[CONF_BLID],
password=data[CONF_PASSWORD],
continuous=data[CONF_CONTINUOUS],
delay=data[CONF_DELAY],
)
info = await async_connect_or_timeout(hass, roomba)
return {
ROOMBA_SESSION: info[ROOMBA_SESSION],
CONF_NAME: info[CONF_NAME],
CONF_HOST: data[CONF_HOST],
}
class RoombaConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Roomba configuration flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
async def async_step_import(self, import_info):
"""Set the config entry up from yaml."""
return await self.async_step_user(import_info)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
await self.async_set_unique_id(user_input[CONF_BLID])
self._abort_if_unique_id_configured()
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors = {"base": "cannot_connect"}
if "base" not in errors:
await async_disconnect_or_timeout(self.hass, info[ROOMBA_SESSION])
return self.async_create_entry(title=info[CONF_NAME], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_CONTINUOUS,
default=self.config_entry.options.get(
CONF_CONTINUOUS, DEFAULT_CONTINUOUS
),
): bool,
vol.Optional(
CONF_DELAY,
default=self.config_entry.options.get(
CONF_DELAY, DEFAULT_DELAY
),
): int,
}
),
)
| apache-2.0 | 4,751,508,098,243,531,000 | 29.648 | 86 | 0.592274 | false |
soulmachine/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 23 | 3120 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset, including sparse versions."""
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
"""Check consistency on dataset iris."""
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
"""Check consistency on dataset iris, when using shrinkage."""
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause | 1,796,872,590,107,335,400 | 29.291262 | 75 | 0.638462 | false |
job/exscript | tests/Exscript/util/fileTest.py | 6 | 2964 | import sys, unittest, re, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
import base64
import Exscript.util.file
from tempfile import NamedTemporaryFile
account_pool = [('user1', 'password1'),
('user2', 'password2'),
('user3', 'password3'),
('user4', 'password4')]
hosts = ['localhost', '1.2.3.4', 'ssh://test', 'ssh1://another:23']
expected_hosts = ['localhost', '1.2.3.4', 'test', 'another']
class fileTest(unittest.TestCase):
CORRELATE = Exscript.util.file
def setUp(self):
data = '[account-pool]\n'
data += 'user1=' + base64.encodestring('password1') + '\n'
data += 'user2:' + base64.encodestring('password2') + '\n'
data += 'user3 = ' + base64.encodestring('password3') + '\n'
data += 'user4 : ' + base64.encodestring('password4') + '\n'
self.account_file = NamedTemporaryFile()
self.account_file.write(data)
self.account_file.flush()
self.host_file = NamedTemporaryFile()
self.host_file.write('\n'.join(hosts))
self.host_file.flush()
self.csv_host_file = NamedTemporaryFile()
self.csv_host_file.write('hostname test\n')
self.csv_host_file.write('\n'.join([h + ' blah' for h in hosts]))
self.csv_host_file.flush()
self.lib_file = NamedTemporaryFile()
self.lib_file.write('__lib__ = {"test": object}\n')
self.lib_file.flush()
def tearDown(self):
self.account_file.close()
self.host_file.close()
self.csv_host_file.close()
def testGetAccountsFromFile(self):
from Exscript.util.file import get_accounts_from_file
accounts = get_accounts_from_file(self.account_file.name)
result = [(a.get_name(), a.get_password()) for a in accounts]
result.sort()
self.assertEqual(account_pool, result)
def testGetHostsFromFile(self):
from Exscript.util.file import get_hosts_from_file
result = get_hosts_from_file(self.host_file.name)
self.assertEqual([h.get_name() for h in result], expected_hosts)
def testGetHostsFromCsv(self):
from Exscript.util.file import get_hosts_from_csv
result = get_hosts_from_csv(self.csv_host_file.name)
hostnames = [h.get_name() for h in result]
testvars = [h.get('test')[0] for h in result]
self.assertEqual(hostnames, expected_hosts)
self.assertEqual(testvars, ['blah' for h in result])
def testLoadLib(self):
from Exscript.util.file import load_lib
functions = load_lib(self.lib_file.name)
name = os.path.splitext(os.path.basename(self.lib_file.name))[0]
self.assertEqual({name + '.test': object}, functions)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(fileTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| gpl-2.0 | -4,539,002,868,833,348,000 | 38 | 84 | 0.612011 | false |
cosm0s/graphite-web | webapp/graphite/render/functions.py | 1 | 123892 | #Copyright 2008 Orbitz WorldWide
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import math
import random
import re
import time
from datetime import datetime, timedelta
from itertools import izip, imap
from os import environ
from graphite.logger import log
from graphite.render.attime import parseTimeOffset, parseATTime
from graphite.events import models
from graphite.util import epoch, timestamp, deltaseconds
# XXX format_units() should go somewhere else
if environ.get('READTHEDOCS'):
format_units = lambda *args, **kwargs: (0,'')
else:
from graphite.render.glyph import format_units
from graphite.render.datalib import TimeSeries
NAN = float('NaN')
INF = float('inf')
DAY = 86400
HOUR = 3600
MINUTE = 60
#Utility functions
def safeSum(values):
safeValues = [v for v in values if v is not None]
if safeValues:
return sum(safeValues)
def safeDiff(values):
safeValues = [v for v in values if v is not None]
if safeValues:
values = map(lambda x: x*-1, safeValues[1:])
values.insert(0, safeValues[0])
return sum(values)
def safeLen(values):
return len([v for v in values if v is not None])
def safeDiv(a, b):
if a is None: return None
if b in (0,None): return None
return float(a) / float(b)
def safePow(a, b):
if a is None: return None
if b is None: return None
try:
result = math.pow(a, b)
except ValueError:
return None
return result
def safeMul(*factors):
if None in factors:
return None
factors = [float(x) for x in factors]
product = reduce(lambda x,y: x*y, factors)
return product
def safeSubtract(a,b):
if a is None or b is None: return None
return float(a) - float(b)
def safeAvg(a):
return safeDiv(safeSum(a),safeLen(a))
def safeStdDev(a):
sm = safeSum(a)
ln = safeLen(a)
avg = safeDiv(sm,ln)
if avg is None: return None
sum = 0
safeValues = [v for v in a if v is not None]
for val in safeValues:
sum = sum + (val - avg) * (val - avg)
return math.sqrt(sum/ln)
def safeLast(values):
for v in reversed(values):
if v is not None: return v
def safeMin(values):
safeValues = [v for v in values if v is not None]
if safeValues:
return min(safeValues)
def safeMax(values):
safeValues = [v for v in values if v is not None]
if safeValues:
return max(safeValues)
def safeMap(function, values):
safeValues = [v for v in values if v is not None]
if safeValues:
return [function(x) for x in safeValues]
def safeAbs(value):
if value is None: return None
return abs(value)
def safeIsNotEmpty(values):
safeValues = [v for v in values if v is not None]
return len(safeValues) > 0
# Greatest common divisor
def gcd(a, b):
if b == 0:
return a
return gcd(b, a%b)
# Least common multiple
def lcm(a, b):
if a == b: return a
if a < b: (a, b) = (b, a) #ensure a > b
return a / gcd(a,b) * b
def normalize(seriesLists):
if seriesLists:
seriesList = reduce(lambda L1,L2: L1+L2,seriesLists)
if seriesList:
step = reduce(lcm,[s.step for s in seriesList])
for s in seriesList:
s.consolidate( step / s.step )
start = min([s.start for s in seriesList])
end = max([s.end for s in seriesList])
end -= (end - start) % step
return (seriesList,start,end,step)
raise NormalizeEmptyResultError()
class NormalizeEmptyResultError(Exception):
# throw error for normalize() when empty
pass
def matchSeries(seriesList1, seriesList2):
assert len(seriesList2) == len(seriesList1), "The number of series in each argument must be the same"
return izip(sorted(seriesList1, lambda a,b: cmp(a.name, b.name)), sorted(seriesList2, lambda a,b: cmp(a.name, b.name)))
def formatPathExpressions(seriesList):
# remove duplicates
pathExpressions = []
[pathExpressions.append(s.pathExpression) for s in seriesList if not pathExpressions.count(s.pathExpression)]
return ','.join(pathExpressions)
# Series Functions
#NOTE: Some of the functions below use izip, which may be problematic.
#izip stops when it hits the end of the shortest series
#in practice this *shouldn't* matter because all series will cover
#the same interval, despite having possibly different steps...
def sumSeries(requestContext, *seriesLists):
"""
Short form: sum()
This will add metrics together and return the sum at each datapoint. (See
integral for a sum over time)
Example:
.. code-block:: none
&target=sum(company.server.application*.requestsHandled)
This would show the sum of all requests handled per minute (provided
requestsHandled are collected once a minute). If metrics with different
retention rates are combined, the coarsest metric is graphed, and the sum
of the other metrics is averaged for the metrics with finer retention rates.
"""
try:
(seriesList,start,end,step) = normalize(seriesLists)
except:
return []
name = "sumSeries(%s)" % formatPathExpressions(seriesList)
values = ( safeSum(row) for row in izip(*seriesList) )
series = TimeSeries(name,start,end,step,values)
series.pathExpression = name
return [series]
def sumSeriesWithWildcards(requestContext, seriesList, *position): #XXX
"""
Call sumSeries after inserting wildcards at the given position(s).
Example:
.. code-block:: none
&target=sumSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value, 1)
This would be the equivalent of
``target=sumSeries(host.cpu-[0-7].cpu-user.value)&target=sumSeries(host.cpu-[0-7].cpu-system.value)``
"""
if isinstance(position, int):
positions = [position]
else:
positions = position
newSeries = {}
newNames = list()
for series in seriesList:
newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.')))))
if newname in newSeries:
newSeries[newname] = sumSeries(requestContext, (series, newSeries[newname]))[0]
else:
newSeries[newname] = series
newNames.append(newname)
newSeries[newname].name = newname
return [newSeries[name] for name in newNames]
def averageSeriesWithWildcards(requestContext, seriesList, *position): #XXX
"""
Call averageSeries after inserting wildcards at the given position(s).
Example:
.. code-block:: none
&target=averageSeriesWithWildcards(host.cpu-[0-7].cpu-{user,system}.value, 1)
This would be the equivalent of
``target=averageSeries(host.*.cpu-user.value)&target=averageSeries(host.*.cpu-system.value)``
"""
if isinstance(position, int):
positions = [position]
else:
positions = position
result = []
matchedList = {}
for series in seriesList:
newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.')))))
if newname not in matchedList:
matchedList[newname] = []
matchedList[newname].append(series)
for name in matchedList.keys():
result.append( averageSeries(requestContext, (matchedList[name]))[0] )
result[-1].name = name
return result
def multiplySeriesWithWildcards(requestContext, seriesList, *position): #XXX
"""
Call multiplySeries after inserting wildcards at the given position(s).
Example:
.. code-block:: none
&target=multiplySeriesWithWildcards(web.host-[0-7].{avg-response,total-request}.value, 2)
This would be the equivalent of
.. code-block:: none
&target=multiplySeries(web.host-0.{avg-response,total-request}.value)&target=multiplySeries(web.host-1.{avg-response,total-request}.value)...
"""
if type(position) is int:
positions = [position]
else:
positions = position
newSeries = {}
newNames = list()
for series in seriesList:
newname = '.'.join(map(lambda x: x[1], filter(lambda i: i[0] not in positions, enumerate(series.name.split('.')))))
if newname in newSeries:
newSeries[newname] = multiplySeries(requestContext, (newSeries[newname], series))[0]
else:
newSeries[newname] = series
newNames.append(newname)
newSeries[newname].name = newname
return [newSeries[name] for name in newNames]
def diffSeries(requestContext, *seriesLists):
"""
Subtracts series 2 through n from series 1.
Example:
.. code-block:: none
&target=diffSeries(service.connections.total,service.connections.failed)
To diff a series and a constant, one should use offset instead of (or in
addition to) diffSeries
Example:
.. code-block:: none
&target=offset(service.connections.total,-5)
&target=offset(diffSeries(service.connections.total,service.connections.failed),-4)
"""
(seriesList,start,end,step) = normalize(seriesLists)
name = "diffSeries(%s)" % formatPathExpressions(seriesList)
values = ( safeDiff(row) for row in izip(*seriesList) )
series = TimeSeries(name,start,end,step,values)
series.pathExpression = name
return [series]
def averageSeries(requestContext, *seriesLists):
"""
Short Alias: avg()
Takes one metric or a wildcard seriesList.
Draws the average value of all metrics passed at each time.
Example:
.. code-block:: none
&target=averageSeries(company.server.*.threads.busy)
"""
(seriesList,start,end,step) = normalize(seriesLists)
name = "averageSeries(%s)" % formatPathExpressions(seriesList)
values = ( safeDiv(safeSum(row),safeLen(row)) for row in izip(*seriesList) )
series = TimeSeries(name,start,end,step,values)
series.pathExpression = name
return [series]
def stddevSeries(requestContext, *seriesLists):
"""
Takes one metric or a wildcard seriesList.
Draws the standard deviation of all metrics passed at each time.
Example:
.. code-block:: none
&target=stddevSeries(company.server.*.threads.busy)
"""
(seriesList,start,end,step) = normalize(seriesLists)
name = "stddevSeries(%s)" % formatPathExpressions(seriesList)
values = ( safeStdDev(row) for row in izip(*seriesList) )
series = TimeSeries(name,start,end,step,values)
series.pathExpression = name
return [series]
def minSeries(requestContext, *seriesLists):
"""
Takes one metric or a wildcard seriesList.
For each datapoint from each metric passed in, pick the minimum value and graph it.
Example:
.. code-block:: none
&target=minSeries(Server*.connections.total)
"""
(seriesList, start, end, step) = normalize(seriesLists)
name = "minSeries(%s)" % formatPathExpressions(seriesList)
values = ( safeMin(row) for row in izip(*seriesList) )
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
return [series]
def maxSeries(requestContext, *seriesLists):
"""
Takes one metric or a wildcard seriesList.
For each datapoint from each metric passed in, pick the maximum value and graph it.
Example:
.. code-block:: none
&target=maxSeries(Server*.connections.total)
"""
(seriesList, start, end, step) = normalize(seriesLists)
name = "maxSeries(%s)" % formatPathExpressions(seriesList)
values = ( safeMax(row) for row in izip(*seriesList) )
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
return [series]
def rangeOfSeries(requestContext, *seriesLists):
"""
Takes a wildcard seriesList.
Distills down a set of inputs into the range of the series
Example:
.. code-block:: none
&target=rangeOfSeries(Server*.connections.total)
"""
(seriesList,start,end,step) = normalize(seriesLists)
name = "rangeOfSeries(%s)" % formatPathExpressions(seriesList)
values = ( safeSubtract(max(row), min(row)) for row in izip(*seriesList) )
series = TimeSeries(name,start,end,step,values)
series.pathExpression = name
return [series]
def percentileOfSeries(requestContext, seriesList, n, interpolate=False):
"""
percentileOfSeries returns a single series which is composed of the n-percentile
values taken across a wildcard series at each point. Unless `interpolate` is
set to True, percentile values are actual values contained in one of the
supplied series.
"""
if n <= 0:
raise ValueError('The requested percent is required to be greater than 0')
name = 'percentileOfSeries(%s,%g)' % (seriesList[0].pathExpression, n)
(start, end, step) = normalize([seriesList])[1:]
values = [ _getPercentile(row, n, interpolate) for row in izip(*seriesList) ]
resultSeries = TimeSeries(name, start, end, step, values)
resultSeries.pathExpression = name
return [resultSeries]
def keepLastValue(requestContext, seriesList, limit = INF):
"""
Takes one metric or a wildcard seriesList, and optionally a limit to the number of 'None' values to skip over.
Continues the line with the last received value when gaps ('None' values) appear in your data, rather than breaking your line.
Example:
.. code-block:: none
&target=keepLastValue(Server01.connections.handled)
&target=keepLastValue(Server01.connections.handled, 10)
"""
for series in seriesList:
series.name = "keepLastValue(%s)" % (series.name)
series.pathExpression = series.name
consecutiveNones = 0
for i,value in enumerate(series):
series[i] = value
# No 'keeping' can be done on the first value because we have no idea
# what came before it.
if i == 0:
continue
if value is None:
consecutiveNones += 1
else:
if 0 < consecutiveNones <= limit:
# If a non-None value is seen before the limit of Nones is hit,
# backfill all the missing datapoints with the last known value.
for index in xrange(i - consecutiveNones, i):
series[index] = series[i - consecutiveNones - 1]
consecutiveNones = 0
# If the series ends with some None values, try to backfill a bit to cover it.
if 0 < consecutiveNones <= limit:
for index in xrange(len(series) - consecutiveNones, len(series)):
series[index] = series[len(series) - consecutiveNones - 1]
return seriesList
def interpolate(requestContext, seriesList, limit = INF):
"""
Takes one metric or a wildcard seriesList, and optionally a limit to the number of 'None' values to skip over.
Continues the line with the last received value when gaps ('None' values) appear in your data, rather than breaking your line.
Example:
.. code-block:: none
&target=interpolate(Server01.connections.handled)
&target=interpolate(Server01.connections.handled, 10)
"""
for series in seriesList:
series.name = "interpolate(%s)" % (series.name)
series.pathExpression = series.name
consecutiveNones = 0
for i,value in enumerate(series):
series[i] = value
# No 'keeping' can be done on the first value because we have no idea
# what came before it.
if i == 0:
continue
if value is None:
consecutiveNones += 1
elif consecutiveNones == 0: # have a value but no need to interpolate
continue
elif series[i - consecutiveNones - 1] is None: # have a value but can't interpolate: reset count
consecutiveNones = 0
continue
else: # have a value and can interpolate
# If a non-None value is seen before the limit of Nones is hit,
# backfill all the missing datapoints with the last known value.
if 0 < consecutiveNones <= limit:
for index in xrange(i - consecutiveNones, i):
series[index] = series[i - consecutiveNones - 1] + (index - (i - consecutiveNones -1)) * (value - series[i - consecutiveNones - 1]) / (consecutiveNones + 1)
consecutiveNones = 0
# If the series ends with some None values, try to backfill a bit to cover it.
# if 0 < consecutiveNones < limit:
# for index in xrange(len(series) - consecutiveNones, len(series)):
# series[index] = series[len(series) - consecutiveNones - 1]
return seriesList
def changed(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
Output 1 when the value changed, 0 when null or the same
Example:
.. code-block:: none
&target=changed(Server01.connections.handled)
"""
for series in seriesList:
series.name = "changed(%s)" % (series.name)
series.pathExpression = series.name
previous = None
for i,value in enumerate(series):
if previous is None:
previous = value
series[i] = 0
elif value is not None and previous != value:
series[i] = 1
previous = value
else:
series[i] = 0
return seriesList
def asPercent(requestContext, seriesList, total=None):
"""
Calculates a percentage of the total of a wildcard series. If `total` is specified,
each series will be calculated as a percentage of that total. If `total` is not specified,
the sum of all points in the wildcard series will be used instead.
The `total` parameter may be a single series, reference the same number of series as `seriesList` or a numeric value.
Example:
.. code-block:: none
&target=asPercent(Server01.connections.{failed,succeeded}, Server01.connections.attempted)
&target=asPercent(Server*.connections.{failed,succeeded}, Server*.connections.attempted)
&target=asPercent(apache01.threads.busy,1500)
&target=asPercent(Server01.cpu.*.jiffies)
"""
normalize([seriesList])
if total is None:
totalValues = [ safeSum(row) for row in izip(*seriesList) ]
totalText = "sumSeries(%s)" % formatPathExpressions(seriesList)
elif type(total) is list:
if len(total) != 1 and len(total) != len(seriesList):
raise ValueError("asPercent second argument must be missing, a single digit, reference exactly 1 series or reference the same number of series as the first argument")
if len(total) == 1:
normalize([seriesList, total])
totalValues = total[0]
totalText = totalValues.name
else:
totalValues = [total] * len(seriesList[0])
totalText = str(total)
resultList = []
if type(total) is list and len(total) == len(seriesList):
for series1, series2 in matchSeries(seriesList, total):
name = "asPercent(%s,%s)" % (series1.name,series2.name)
(seriesList,start,end,step) = normalize([(series1, series2)])
resultValues = [ safeMul(safeDiv(v1, v2), 100.0) for v1,v2 in izip(series1,series2) ]
resultSeries = TimeSeries(name,start,end,step,resultValues)
resultSeries.pathExpression = name
resultList.append(resultSeries)
else:
for series in seriesList:
resultValues = [ safeMul(safeDiv(val, totalVal), 100.0) for val,totalVal in izip(series,totalValues) ]
name = "asPercent(%s,%s)" % (series.name, totalText or series.pathExpression)
resultSeries = TimeSeries(name,series.start,series.end,series.step,resultValues)
resultSeries.pathExpression = name
resultList.append(resultSeries)
return resultList
def divideSeries(requestContext, dividendSeriesList, divisorSeries):
"""
Takes a dividend metric and a divisor metric and draws the division result.
A constant may *not* be passed. To divide by a constant, use the scale()
function (which is essentially a multiplication operation) and use the inverse
of the dividend. (Division by 8 = multiplication by 1/8 or 0.125)
Example:
.. code-block:: none
&target=divideSeries(Series.dividends,Series.divisors)
"""
if len(divisorSeries) == 0:
for series in dividendSeriesList:
series.name = "divideSeries(%s,MISSING)" % series.name
series.pathExpression = series.name
for i in range(len(series)):
series[i] = None
return dividendSeriesList
if len(divisorSeries) > 1:
raise ValueError("divideSeries second argument must reference exactly 1 series (got {0})".format(len(divisorSeries)))
divisorSeries = divisorSeries[0]
results = []
for dividendSeries in dividendSeriesList:
name = "divideSeries(%s,%s)" % (dividendSeries.name, divisorSeries.name)
bothSeries = (dividendSeries, divisorSeries)
step = reduce(lcm,[s.step for s in bothSeries])
for s in bothSeries:
s.consolidate( step / s.step )
start = min([s.start for s in bothSeries])
end = max([s.end for s in bothSeries])
end -= (end - start) % step
values = ( safeDiv(v1,v2) for v1,v2 in izip(*bothSeries) )
quotientSeries = TimeSeries(name, start, end, step, values)
quotientSeries.pathExpression = name
results.append(quotientSeries)
return results
def multiplySeries(requestContext, *seriesLists):
"""
Takes two or more series and multiplies their points. A constant may not be
used. To multiply by a constant, use the scale() function.
Example:
.. code-block:: none
&target=multiplySeries(Series.dividends,Series.divisors)
"""
(seriesList,start,end,step) = normalize(seriesLists)
if len(seriesList) == 1:
return seriesList
name = "multiplySeries(%s)" % ','.join([s.name for s in seriesList])
product = imap(lambda x: safeMul(*x), izip(*seriesList))
resultSeries = TimeSeries(name, start, end, step, product)
resultSeries.pathExpression = name
return [ resultSeries ]
def weightedAverage(requestContext, seriesListAvg, seriesListWeight, *nodes):
"""
Takes a series of average values and a series of weights and
produces a weighted average for all values.
The corresponding values should share one or more zero-indexed nodes.
Example:
.. code-block:: none
&target=weightedAverage(*.transactions.mean,*.transactions.count,0)
&target=weightedAverage(*.transactions.mean,*.transactions.count,1,3,4)
"""
if isinstance(nodes, int):
nodes=[nodes]
sortedSeries={}
for seriesAvg, seriesWeight in izip(seriesListAvg , seriesListWeight):
key = ''
for node in nodes:
key += seriesAvg.name.split(".")[node]
if key not in sortedSeries:
sortedSeries[key]={}
sortedSeries[key]['avg']=seriesAvg
key = ''
for node in nodes:
key += seriesWeight.name.split(".")[node]
if key not in sortedSeries:
sortedSeries[key]={}
sortedSeries[key]['weight']=seriesWeight
productList = []
for key in sortedSeries.keys():
if 'weight' not in sortedSeries[key]:
continue
if 'avg' not in sortedSeries[key]:
continue
seriesWeight = sortedSeries[key]['weight']
seriesAvg = sortedSeries[key]['avg']
productValues = [ safeMul(val1, val2) for val1,val2 in izip(seriesAvg,seriesWeight) ]
name='product(%s,%s)' % (seriesWeight.name, seriesAvg.name)
productSeries = TimeSeries(name,seriesAvg.start,seriesAvg.end,seriesAvg.step,productValues)
productSeries.pathExpression=name
productList.append(productSeries)
if not productList:
return []
sumProducts=sumSeries(requestContext, productList)[0]
sumWeights=sumSeries(requestContext, seriesListWeight)[0]
resultValues = [ safeDiv(val1, val2) for val1,val2 in izip(sumProducts,sumWeights) ]
name = "weightedAverage(%s, %s, %s)" % (','.join(sorted(set(s.pathExpression for s in seriesListAvg))) ,','.join(sorted(set(s.pathExpression for s in sorted(seriesListWeight)))), ','.join(map(str,nodes)))
resultSeries = TimeSeries(name,sumProducts.start,sumProducts.end,sumProducts.step,resultValues)
resultSeries.pathExpression = name
return [resultSeries]
def movingMedian(requestContext, seriesList, windowSize):
"""
Graphs the moving median of a metric (or metrics) over a fixed number of
past points, or a time interval.
Takes one metric or a wildcard seriesList followed by a number N of datapoints
or a quoted string with a length of time like '1hour' or '5min' (See ``from /
until`` in the render\_api_ for examples of time formats). Graphs the
median of the preceeding datapoints for each point on the graph.
Example:
.. code-block:: none
&target=movingMedian(Server.instance01.threads.busy,10)
&target=movingMedian(Server.instance*.threads.idle,'5min')
"""
if not seriesList:
return []
windowInterval = None
if isinstance(windowSize, basestring):
delta = parseTimeOffset(windowSize)
windowInterval = abs(delta.seconds + (delta.days * 86400))
if windowInterval:
previewSeconds = windowInterval
else:
previewSeconds = max([s.step for s in seriesList]) * int(windowSize)
# ignore original data and pull new, including our preview
# data from earlier is needed to calculate the early results
newContext = requestContext.copy()
newContext['startTime'] = requestContext['startTime'] - timedelta(seconds=previewSeconds)
previewList = evaluateTokens(newContext, requestContext['args'][0])
result = []
for series in previewList:
if windowInterval:
windowPoints = windowInterval / series.step
else:
windowPoints = int(windowSize)
if isinstance(windowSize, basestring):
newName = 'movingMedian(%s,"%s")' % (series.name, windowSize)
else:
newName = "movingMedian(%s,%s)" % (series.name, windowSize)
newSeries = TimeSeries(newName, series.start + previewSeconds, series.end, series.step, [])
newSeries.pathExpression = newName
for i in range(windowPoints,len(series)):
window = series[i - windowPoints:i]
nonNull = [v for v in window if v is not None]
if nonNull:
m_index = len(nonNull) / 2
newSeries.append(sorted(nonNull)[m_index])
else:
newSeries.append(None)
result.append(newSeries)
return result
def scale(requestContext, seriesList, factor):
"""
Takes one metric or a wildcard seriesList followed by a constant, and multiplies the datapoint
by the constant provided at each point.
Example:
.. code-block:: none
&target=scale(Server.instance01.threads.busy,10)
&target=scale(Server.instance*.threads.busy,10)
"""
for series in seriesList:
series.name = "scale(%s,%g)" % (series.name,float(factor))
series.pathExpression = series.name
for i,value in enumerate(series):
series[i] = safeMul(value,factor)
return seriesList
def scaleToSeconds(requestContext, seriesList, seconds):
"""
Takes one metric or a wildcard seriesList and returns "value per seconds" where
seconds is a last argument to this functions.
Useful in conjunction with derivative or integral function if you want
to normalize its result to a known resolution for arbitrary retentions
"""
for series in seriesList:
series.name = "scaleToSeconds(%s,%d)" % (series.name,seconds)
series.pathExpression = series.name
for i,value in enumerate(series):
factor = seconds * 1.0 / series.step
series[i] = safeMul(value,factor)
return seriesList
def pow(requestContext, seriesList, factor):
"""
Takes one metric or a wildcard seriesList followed by a constant, and raises the datapoint
by the power of the constant provided at each point.
Example:
.. code-block:: none
&target=pow(Server.instance01.threads.busy,10)
&target=pow(Server.instance*.threads.busy,10)
"""
for series in seriesList:
series.name = "pow(%s,%g)" % (series.name,float(factor))
series.pathExpression = series.name
for i,value in enumerate(series):
series[i] = safePow(value,factor)
return seriesList
def squareRoot(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList, and computes the square root of each datapoint.
Example:
.. code-block:: none
&target=squareRoot(Server.instance01.threads.busy)
"""
for series in seriesList:
series.name = "squareRoot(%s)" % (series.name)
for i,value in enumerate(series):
series[i] = safePow(value, 0.5)
return seriesList
def invert(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList, and inverts each datapoint (i.e. 1/x).
Example:
.. code-block:: none
&target=invert(Server.instance01.threads.busy)
"""
for series in seriesList:
series.name = "invert(%s)" % (series.name)
for i,value in enumerate(series):
series[i] = safePow(value, -1)
return seriesList
def absolute(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList and applies the mathematical abs function to each
datapoint transforming it to its absolute value.
Example:
.. code-block:: none
&target=absolute(Server.instance01.threads.busy)
&target=absolute(Server.instance*.threads.busy)
"""
for series in seriesList:
series.name = "absolute(%s)" % (series.name)
series.pathExpression = series.name
for i,value in enumerate(series):
series[i] = safeAbs(value)
return seriesList
def offset(requestContext, seriesList, factor):
"""
Takes one metric or a wildcard seriesList followed by a constant, and adds the constant to
each datapoint.
Example:
.. code-block:: none
&target=offset(Server.instance01.threads.busy,10)
"""
for series in seriesList:
series.name = "offset(%s,%g)" % (series.name,float(factor))
series.pathExpression = series.name
for i,value in enumerate(series):
if value is not None:
series[i] = value + factor
return seriesList
def offsetToZero(requestContext, seriesList):
"""
Offsets a metric or wildcard seriesList by subtracting the minimum
value in the series from each datapoint.
Useful to compare different series where the values in each series
may be higher or lower on average but you're only interested in the
relative difference.
An example use case is for comparing different round trip time
results. When measuring RTT (like pinging a server), different
devices may come back with consistently different results due to
network latency which will be different depending on how many
network hops between the probe and the device. To compare different
devices in the same graph, the network latency to each has to be
factored out of the results. This is a shortcut that takes the
fastest response (lowest number in the series) and sets that to zero
and then offsets all of the other datapoints in that series by that
amount. This makes the assumption that the lowest response is the
fastest the device can respond, of course the more datapoints that
are in the series the more accurate this assumption is.
Example:
.. code-block:: none
&target=offsetToZero(Server.instance01.responseTime)
&target=offsetToZero(Server.instance*.responseTime)
"""
for series in seriesList:
series.name = "offsetToZero(%s)" % (series.name)
minimum = safeMin(series)
for i,value in enumerate(series):
if value is not None:
series[i] = value - minimum
return seriesList
def movingAverage(requestContext, seriesList, windowSize):
"""
Graphs the moving average of a metric (or metrics) over a fixed number of
past points, or a time interval.
Takes one metric or a wildcard seriesList followed by a number N of datapoints
or a quoted string with a length of time like '1hour' or '5min' (See ``from /
until`` in the render\_api_ for examples of time formats). Graphs the
average of the preceeding datapoints for each point on the graph.
Example:
.. code-block:: none
&target=movingAverage(Server.instance01.threads.busy,10)
&target=movingAverage(Server.instance*.threads.idle,'5min')
"""
if not seriesList:
return []
windowInterval = None
if isinstance(windowSize, basestring):
delta = parseTimeOffset(windowSize)
windowInterval = abs(delta.seconds + (delta.days * 86400))
if windowInterval:
previewSeconds = windowInterval
else:
previewSeconds = max([s.step for s in seriesList]) * int(windowSize)
# ignore original data and pull new, including our preview
# data from earlier is needed to calculate the early results
newContext = requestContext.copy()
newContext['startTime'] = requestContext['startTime'] - timedelta(seconds=previewSeconds)
previewList = evaluateTokens(newContext, requestContext['args'][0])
result = []
for series in previewList:
if windowInterval:
windowPoints = windowInterval / series.step
else:
windowPoints = int(windowSize)
if isinstance(windowSize, basestring):
newName = 'movingAverage(%s,"%s")' % (series.name, windowSize)
else:
newName = "movingAverage(%s,%s)" % (series.name, windowSize)
newSeries = TimeSeries(newName, series.start + previewSeconds, series.end, series.step, [])
newSeries.pathExpression = newName
window_sum = safeSum(series[:windowPoints]) or 0
count = safeLen(series[:windowPoints])
newSeries.append(safeDiv(window_sum, count))
for n, last in enumerate(series[windowPoints:-1]):
if series[n] is not None:
window_sum -= series[n]
count -= 1
if last is not None:
window_sum += last
count += 1
newSeries.append(safeDiv(window_sum, count))
result.append(newSeries)
return result
def cumulative(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
When a graph is drawn where width of the graph size in pixels is smaller than
the number of datapoints to be graphed, Graphite consolidates the values to
to prevent line overlap. The cumulative() function changes the consolidation
function from the default of 'average' to 'sum'. This is especially useful in
sales graphs, where fractional values make no sense and a 'sum' of consolidated
values is appropriate.
Alias for :func:`consolidateBy(series, 'sum') <graphite.render.functions.consolidateBy>`
.. code-block:: none
&target=cumulative(Sales.widgets.largeBlue)
"""
return consolidateBy(requestContext, seriesList, 'sum')
def consolidateBy(requestContext, seriesList, consolidationFunc):
"""
Takes one metric or a wildcard seriesList and a consolidation function name.
Valid function names are 'sum', 'average', 'min', and 'max'.
When a graph is drawn where width of the graph size in pixels is smaller than
the number of datapoints to be graphed, Graphite consolidates the values to
to prevent line overlap. The consolidateBy() function changes the consolidation
function from the default of 'average' to one of 'sum', 'max', or 'min'. This is
especially useful in sales graphs, where fractional values make no sense and a 'sum'
of consolidated values is appropriate.
.. code-block:: none
&target=consolidateBy(Sales.widgets.largeBlue, 'sum')
&target=consolidateBy(Servers.web01.sda1.free_space, 'max')
"""
for series in seriesList:
# datalib will throw an exception, so it's not necessary to validate here
series.consolidationFunc = consolidationFunc
series.name = 'consolidateBy(%s,"%s")' % (series.name, series.consolidationFunc)
series.pathExpression = series.name
return seriesList
def derivative(requestContext, seriesList):
"""
This is the opposite of the integral function. This is useful for taking a
running total metric and calculating the delta between subsequent data points.
This function does not normalize for periods of time, as a true derivative would.
Instead see the perSecond() function to calculate a rate of change over time.
Example:
.. code-block:: none
&target=derivative(company.server.application01.ifconfig.TXPackets)
Each time you run ifconfig, the RX and TXPackets are higher (assuming there
is network traffic.) By applying the derivative function, you can get an
idea of the packets per minute sent or received, even though you're only
recording the total.
"""
results = []
for series in seriesList:
newValues = []
prev = None
for val in series:
if None in (prev,val):
newValues.append(None)
prev = val
continue
newValues.append(val - prev)
prev = val
newName = "derivative(%s)" % series.name
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def perSecond(requestContext, seriesList, maxValue=None):
"""
Derivative adjusted for the series time interval
This is useful for taking a running total metric and showing how many requests
per second were handled.
Example:
.. code-block:: none
&target=perSecond(company.server.application01.ifconfig.TXPackets)
Each time you run ifconfig, the RX and TXPackets are higher (assuming there
is network traffic.) By applying the derivative function, you can get an
idea of the packets per minute sent or received, even though you're only
recording the total.
"""
results = []
for series in seriesList:
newValues = []
prev = None
step = series.step
for val in series:
if prev is None:
newValues.append(None)
prev = val
continue
if val is None:
newValues.append(None)
step = step * 2
continue
diff = val - prev
if diff >= 0:
newValues.append(diff / step)
elif maxValue is not None and maxValue >= val:
newValues.append( ((maxValue - prev) + val + 1) / step )
else:
newValues.append(None)
step = series.step
prev = val
newName = "perSecond(%s)" % series.name
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def delay(requestContext, seriesList, steps):
"""
This shifts all samples later by an integer number of steps. This can be
used for custom derivative calculations, among other things. Note: this
will pad the early end of the data with None for every step shifted.
This complements other time-displacement functions such as timeShift and
timeSlice, in that this function is indifferent about the step intervals
being shifted.
Example:
.. code-block:: none
&target=divideSeries(server.FreeSpace,delay(server.FreeSpace,1))
This computes the change in server free space as a percentage of the previous
free space.
"""
results = []
for series in seriesList:
newValues = []
prev = []
for val in series:
if len(prev) < steps:
newValues.append(None)
prev.append(val)
continue
newValues.append(prev.pop(0))
prev.append(val)
newName = "delay(%s,%d)" % (series.name, steps)
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def integral(requestContext, seriesList):
"""
This will show the sum over time, sort of like a continuous addition function.
Useful for finding totals or trends in metrics that are collected per minute.
Example:
.. code-block:: none
&target=integral(company.sales.perMinute)
This would start at zero on the left side of the graph, adding the sales each
minute, and show the total sales for the time period selected at the right
side, (time now, or the time specified by '&until=').
"""
results = []
for series in seriesList:
newValues = []
current = 0.0
for val in series:
if val is None:
newValues.append(None)
else:
current += val
newValues.append(current)
newName = "integral(%s)" % series.name
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def integralByInterval(requestContext, seriesList, intervalUnit):
"""
This will do the same as integral() funcion, except resetting the total to 0
at the given time in the parameter "from"
Useful for finding totals per hour/day/week/..
Example:
.. code-block:: none
&target=integralByInterval(company.sales.perMinute, "1d")&from=midnight-10days
This would start at zero on the left side of the graph, adding the sales each
minute, and show the evolution of sales per day during the last 10 days.
"""
intervalDuration = int(abs(deltaseconds(parseTimeOffset(intervalUnit))))
startTime = int(timestamp(requestContext['startTime']))
results = []
for series in seriesList:
newValues = []
currentTime = series.start # current time within series iteration
current = 0.0 # current accumulated value
for val in series:
# reset integral value if crossing an interval boundary
if (currentTime - startTime)/intervalDuration != (currentTime - startTime - series.step)/intervalDuration:
current = 0.0
if val is None:
# keep previous value since val can be None when resetting current to 0.0
newValues.append(current)
else:
current += val
newValues.append(current)
currentTime += series.step
newName = "integralByInterval(%s,'%s')" % (series.name, intervalUnit)
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def nonNegativeDerivative(requestContext, seriesList, maxValue=None):
"""
Same as the derivative function above, but ignores datapoints that trend
down. Useful for counters that increase for a long time, then wrap or
reset. (Such as if a network interface is destroyed and recreated by unloading
and re-loading a kernel module, common with USB / WiFi cards.
Example:
.. code-block:: none
&target=nonNegativederivative(company.server.application01.ifconfig.TXPackets)
"""
results = []
for series in seriesList:
newValues = []
prev = None
for val in series:
if None in (prev, val):
newValues.append(None)
prev = val
continue
diff = val - prev
if diff >= 0:
newValues.append(diff)
elif maxValue is not None and maxValue >= val:
newValues.append( (maxValue - prev) + val + 1 )
else:
newValues.append(None)
prev = val
newName = "nonNegativeDerivative(%s)" % series.name
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def stacked(requestContext,seriesLists,stackName='__DEFAULT__'):
"""
Takes one metric or a wildcard seriesList and change them so they are
stacked. This is a way of stacking just a couple of metrics without having
to use the stacked area mode (that stacks everything). By means of this a mixed
stacked and non stacked graph can be made
It can also take an optional argument with a name of the stack, in case there is
more than one, e.g. for input and output metrics.
Example:
.. code-block:: none
&target=stacked(company.server.application01.ifconfig.TXPackets, 'tx')
"""
if 'totalStack' in requestContext:
totalStack = requestContext['totalStack'].get(stackName, [])
else:
requestContext['totalStack'] = {}
totalStack = [];
results = []
for series in seriesLists:
newValues = []
for i in range(len(series)):
if len(totalStack) <= i: totalStack.append(0)
if series[i] is not None:
totalStack[i] += series[i]
newValues.append(totalStack[i])
else:
newValues.append(None)
# Work-around for the case when legend is set
if stackName=='__DEFAULT__':
newName = "stacked(%s)" % series.name
else:
newName = series.name
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.options['stacked'] = True
newSeries.pathExpression = newName
results.append(newSeries)
requestContext['totalStack'][stackName] = totalStack
return results
def areaBetween(requestContext, seriesList):
"""
Draws the vertical area in between the two series in seriesList. Useful for
visualizing a range such as the minimum and maximum latency for a service.
areaBetween expects **exactly one argument** that results in exactly two series
(see example below). The order of the lower and higher values series does not
matter. The visualization only works when used in conjunction with
``areaMode=stacked``.
Most likely use case is to provide a band within which another metric should
move. In such case applying an ``alpha()``, as in the second example, gives
best visual results.
Example:
.. code-block:: none
&target=areaBetween(service.latency.{min,max})&areaMode=stacked
&target=alpha(areaBetween(service.latency.{min,max}),0.3)&areaMode=stacked
If for instance, you need to build a seriesList, you should use the ``group``
function, like so:
.. code-block:: none
&target=areaBetween(group(minSeries(a.*.min),maxSeries(a.*.max)))
"""
assert len(seriesList) == 2, "areaBetween series argument must reference *exactly* 2 series"
lower = seriesList[0]
upper = seriesList[1]
lower.options['stacked'] = True
lower.options['invisible'] = True
upper.options['stacked'] = True
lower.name = upper.name = "areaBetween(%s)" % upper.pathExpression
return seriesList
def aliasSub(requestContext, seriesList, search, replace):
"""
Runs series names through a regex search/replace.
.. code-block:: none
&target=aliasSub(ip.*TCP*,"^.*TCP(\d+)","\\1")
"""
try:
seriesList.name = re.sub(search, replace, seriesList.name)
except AttributeError:
for series in seriesList:
series.name = re.sub(search, replace, series.name)
return seriesList
def alias(requestContext, seriesList, newName):
"""
Takes one metric or a wildcard seriesList and a string in quotes.
Prints the string instead of the metric name in the legend.
.. code-block:: none
&target=alias(Sales.widgets.largeBlue,"Large Blue Widgets")
"""
try:
seriesList.name = newName
except AttributeError:
for series in seriesList:
series.name = newName
return seriesList
def cactiStyle(requestContext, seriesList, system=None, units=None):
"""
Takes a series list and modifies the aliases to provide column aligned
output with Current, Max, and Min values in the style of cacti. Optionally
takes a "system" value to apply unit formatting in the same style as the
Y-axis, or a "unit" string to append an arbitrary unit suffix.
.. code-block:: none
&target=cactiStyle(ganglia.*.net.bytes_out,"si")
&target=cactiStyle(ganglia.*.net.bytes_out,"si","b")
A possible value for ``system`` is ``si``, which would express your values in
multiples of a thousand. A second option is to use ``binary`` which will
instead express your values in multiples of 1024 (useful for network devices).
Column alignment of the Current, Max, Min values works under two conditions:
you use a monospace font such as terminus and use a single cactiStyle call, as
separate cactiStyle calls are not aware of each other. In case you have
different targets for which you would like to have cactiStyle to line up, you
can use ``group()`` to combine them before applying cactiStyle, such as:
.. code-block:: none
&target=cactiStyle(group(metricA,metricB))
"""
if 0 == len(seriesList):
return seriesList
if system:
if units:
fmt = lambda x:"%.2f %s" % format_units(x,system=system,units=units)
else:
fmt = lambda x:"%.2f%s" % format_units(x,system=system)
else:
if units:
fmt = lambda x:"%.2f %s"%(x,units)
else:
fmt = lambda x:"%.2f"%x
nameLen = max([0] + [len(getattr(series,"name")) for series in seriesList])
lastLen = max([0] + [len(fmt(int(safeLast(series) or 3))) for series in seriesList]) + 3
maxLen = max([0] + [len(fmt(int(safeMax(series) or 3))) for series in seriesList]) + 3
minLen = max([0] + [len(fmt(int(safeMin(series) or 3))) for series in seriesList]) + 3
for series in seriesList:
name = series.name
last = safeLast(series)
maximum = safeMax(series)
minimum = safeMin(series)
if last is None:
last = NAN
else:
last = fmt(float(last))
if maximum is None:
maximum = NAN
else:
maximum = fmt(float(maximum))
if minimum is None:
minimum = NAN
else:
minimum = fmt(float(minimum))
series.name = "%*s Current:%*s Max:%*s Min:%*s " % \
(-nameLen, series.name,
-lastLen, last,
-maxLen, maximum,
-minLen, minimum)
return seriesList
def aliasByNode(requestContext, seriesList, *nodes):
"""
Takes a seriesList and applies an alias derived from one or more "node"
portion/s of the target name. Node indices are 0 indexed.
.. code-block:: none
&target=aliasByNode(ganglia.*.cpu.load5,1)
"""
if isinstance(nodes, int):
nodes=[nodes]
for series in seriesList:
metric_pieces = re.search('(?:.*\()?(?P<name>[-\w*\.]+)(?:,|\)?.*)?',series.name).groups()[0].split('.')
series.name = '.'.join(metric_pieces[n] for n in nodes)
return seriesList
def aliasByMetric(requestContext, seriesList):
"""
Takes a seriesList and applies an alias derived from the base metric name.
.. code-block:: none
&target=aliasByMetric(carbon.agents.graphite.creates)
"""
for series in seriesList:
series.name = series.name.split('.')[-1].split(',')[0]
return seriesList
def legendValue(requestContext, seriesList, *valueTypes):
"""
Takes one metric or a wildcard seriesList and a string in quotes.
Appends a value to the metric name in the legend. Currently one or several of: `last`, `avg`,
`total`, `min`, `max`.
The last argument can be `si` (default) or `binary`, in that case values will be formatted in the
corresponding system.
.. code-block:: none
&target=legendValue(Sales.widgets.largeBlue, 'avg', 'max', 'si')
"""
def last(s):
"Work-around for the missing last point"
v = s[-1]
if v is None:
return s[-2]
return v
valueFuncs = {
'avg': lambda s: safeDiv(safeSum(s), safeLen(s)),
'total': safeSum,
'min': safeMin,
'max': safeMax,
'last': last
}
system = None
if valueTypes[-1] in ('si', 'binary'):
system = valueTypes[-1]
valueTypes = valueTypes[:-1]
for valueType in valueTypes:
valueFunc = valueFuncs.get(valueType, lambda s: '(?)')
if system is None:
for series in seriesList:
series.name += " (%s: %s)" % (valueType, valueFunc(series))
else:
for series in seriesList:
value = valueFunc(series)
formatted = None
if value is not None:
formatted = "%.2f%s" % format_units(value, system=system)
series.name = "%-20s%-5s%-10s" % (series.name, valueType, formatted)
return seriesList
def alpha(requestContext, seriesList, alpha):
"""
Assigns the given alpha transparency setting to the series. Takes a float value between 0 and 1.
"""
for series in seriesList:
series.options['alpha'] = alpha
return seriesList
def color(requestContext, seriesList, theColor):
"""
Assigns the given color to the seriesList
Example:
.. code-block:: none
&target=color(collectd.hostname.cpu.0.user, 'green')
&target=color(collectd.hostname.cpu.0.system, 'ff0000')
&target=color(collectd.hostname.cpu.0.idle, 'gray')
&target=color(collectd.hostname.cpu.0.idle, '6464ffaa')
"""
for series in seriesList:
series.color = theColor
return seriesList
def substr(requestContext, seriesList, start=0, stop=0):
"""
Takes one metric or a wildcard seriesList followed by 1 or 2 integers. Assume that the
metric name is a list or array, with each element separated by dots. Prints
n - length elements of the array (if only one integer n is passed) or n - m
elements of the array (if two integers n and m are passed). The list starts
with element 0 and ends with element (length - 1).
Example:
.. code-block:: none
&target=substr(carbon.agents.hostname.avgUpdateTime,2,4)
The label would be printed as "hostname.avgUpdateTime".
"""
for series in seriesList:
left = series.name.rfind('(') + 1
right = series.name.find(')')
if right < 0:
right = len(series.name)+1
cleanName = series.name[left:right:]
if int(stop) == 0:
series.name = '.'.join(cleanName.split('.')[int(start)::])
else:
series.name = '.'.join(cleanName.split('.')[int(start):int(stop):])
# substr(func(a.b,'c'),1) becomes b instead of b,'c'
series.name = re.sub(',.*$', '', series.name)
return seriesList
def logarithm(requestContext, seriesList, base=10):
"""
Takes one metric or a wildcard seriesList, a base, and draws the y-axis in logarithmic
format. If base is omitted, the function defaults to base 10.
Example:
.. code-block:: none
&target=log(carbon.agents.hostname.avgUpdateTime,2)
"""
results = []
for series in seriesList:
newValues = []
for val in series:
if val is None:
newValues.append(None)
elif val <= 0:
newValues.append(None)
else:
newValues.append(math.log(val, base))
newName = "log(%s, %s)" % (series.name, base)
newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def maximumAbove(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by a constant n.
Draws only the metrics with a maximum value above n.
Example:
.. code-block:: none
&target=maximumAbove(system.interface.eth*.packetsSent,1000)
This would only display interfaces which sent more than 1000 packets/min.
"""
results = []
for series in seriesList:
if safeMax(series) > n:
results.append(series)
return results
def minimumAbove(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by a constant n.
Draws only the metrics with a minimum value above n.
Example:
.. code-block:: none
&target=minimumAbove(system.interface.eth*.packetsSent,1000)
This would only display interfaces which sent more than 1000 packets/min.
"""
results = []
for series in seriesList:
if safeMin(series) > n:
results.append(series)
return results
def maximumBelow(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by a constant n.
Draws only the metrics with a maximum value below n.
Example:
.. code-block:: none
&target=maximumBelow(system.interface.eth*.packetsSent,1000)
This would only display interfaces which sent less than 1000 packets/min.
"""
result = []
for series in seriesList:
if safeMax(series) <= n:
result.append(series)
return result
def minimumBelow(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by a constant n.
Draws only the metrics with a minimum value below n.
Example:
.. code-block:: none
&target=minimumBelow(system.interface.eth*.packetsSent,1000)
This would only display interfaces which at one point sent less than 1000 packets/min.
"""
result = []
for series in seriesList:
if safeMin(series) <= n:
result.append(series)
return result
def highestCurrent(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the N metrics with the highest value
at the end of the time period specified.
Example:
.. code-block:: none
&target=highestCurrent(server*.instance*.threads.busy,5)
Draws the 5 servers with the highest busy threads.
"""
return sorted( seriesList, key=safeLast )[-n:]
def highestMax(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the N metrics with the highest maximum
value in the time period specified.
Example:
.. code-block:: none
&target=highestMax(server*.instance*.threads.busy,5)
Draws the top 5 servers who have had the most busy threads during the time
period specified.
"""
result_list = sorted( seriesList, key=lambda s: safeMax(s) )[-n:]
return sorted(result_list, key=lambda s: max(s), reverse=True)
def lowestCurrent(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the N metrics with the lowest value at
the end of the time period specified.
Example:
.. code-block:: none
&target=lowestCurrent(server*.instance*.threads.busy,5)
Draws the 5 servers with the least busy threads right now.
"""
return sorted( seriesList, key=safeLast )[:n]
def currentAbove(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the metrics whose value is above N
at the end of the time period specified.
Example:
.. code-block:: none
&target=currentAbove(server*.instance*.threads.busy,50)
Draws the servers with more than 50 busy threads.
"""
return [ series for series in seriesList if safeLast(series) >= n ]
def currentBelow(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the metrics whose value is below N
at the end of the time period specified.
Example:
.. code-block:: none
&target=currentBelow(server*.instance*.threads.busy,3)
Draws the servers with less than 3 busy threads.
"""
return [ series for series in seriesList if safeLast(series) <= n ]
def highestAverage(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the top N metrics with the highest
average value for the time period specified.
Example:
.. code-block:: none
&target=highestAverage(server*.instance*.threads.busy,5)
Draws the top 5 servers with the highest average value.
"""
return sorted( seriesList, key=lambda s: safeDiv(safeSum(s),safeLen(s)) )[-n:]
def lowestAverage(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the bottom N metrics with the lowest
average value for the time period specified.
Example:
.. code-block:: none
&target=lowestAverage(server*.instance*.threads.busy,5)
Draws the bottom 5 servers with the lowest average value.
"""
return sorted( seriesList, key=lambda s: safeDiv(safeSum(s),safeLen(s)) )[:n]
def averageAbove(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the metrics with an average value
above N for the time period specified.
Example:
.. code-block:: none
&target=averageAbove(server*.instance*.threads.busy,25)
Draws the servers with average values above 25.
"""
return [ series for series in seriesList if safeDiv(safeSum(series),safeLen(series)) >= n ]
def averageBelow(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Out of all metrics passed, draws only the metrics with an average value
below N for the time period specified.
Example:
.. code-block:: none
&target=averageBelow(server*.instance*.threads.busy,25)
Draws the servers with average values below 25.
"""
return [ series for series in seriesList if safeDiv(safeSum(series),safeLen(series)) <= n ]
def _getPercentile(points, n, interpolate=False):
"""
Percentile is calculated using the method outlined in the NIST Engineering
Statistics Handbook:
http://www.itl.nist.gov/div898/handbook/prc/section2/prc252.htm
"""
sortedPoints = sorted([ p for p in points if p is not None])
if len(sortedPoints) == 0:
return None
fractionalRank = (n/100.0) * (len(sortedPoints) + 1)
rank = int(fractionalRank)
rankFraction = fractionalRank - rank
if not interpolate:
rank += int(math.ceil(rankFraction))
if rank == 0:
percentile = sortedPoints[0]
elif rank - 1 == len(sortedPoints):
percentile = sortedPoints[-1]
else:
percentile = sortedPoints[rank - 1] # Adjust for 0-index
if interpolate:
if rank != len(sortedPoints): # if a next value exists
nextValue = sortedPoints[rank]
percentile = percentile + rankFraction * (nextValue - percentile)
return percentile
def nPercentile(requestContext, seriesList, n):
"""Returns n-percent of each series in the seriesList."""
assert n, 'The requested percent is required to be greater than 0'
results = []
for s in seriesList:
# Create a sorted copy of the TimeSeries excluding None values in the values list.
s_copy = TimeSeries( s.name, s.start, s.end, s.step, sorted( [item for item in s if item is not None] ) )
if not s_copy:
continue # Skip this series because it is empty.
perc_val = _getPercentile(s_copy, n)
if perc_val is not None:
name = 'nPercentile(%s, %g)' % (s_copy.name, n)
point_count = int((s.end - s.start)/s.step)
perc_series = TimeSeries(name, s_copy.start, s_copy.end, s_copy.step, [perc_val] * point_count )
perc_series.pathExpression = name
results.append(perc_series)
return results
def averageOutsidePercentile(requestContext, seriesList, n):
"""
Removes functions lying inside an average percentile interval
"""
averages = []
for s in seriesList:
averages.append(safeDiv(safeSum(s), safeLen(s)))
if n < 50:
n = 100 - n;
lowPercentile = _getPercentile(averages, 100 - n)
highPercentile = _getPercentile(averages, n)
return [s for s in seriesList if not lowPercentile < safeDiv(safeSum(s), safeLen(s)) < highPercentile]
def removeBetweenPercentile(requestContext, seriesList, n):
"""
Removes lines who do not have an value lying in the x-percentile of all the values at a moment
"""
if n < 50:
n = 100 - n
transposed = zip(*seriesList)
lowPercentiles = [_getPercentile(col, 100-n) for col in transposed]
highPercentiles = [_getPercentile(col, n) for col in transposed]
return [l for l in seriesList if sum([not lowPercentiles[val_i] < val < highPercentiles[val_i]
for (val_i, val) in enumerate(l)]) > 0]
def removeAbovePercentile(requestContext, seriesList, n):
"""
Removes data above the nth percentile from the series or list of series provided.
Values above this percentile are assigned a value of None.
"""
for s in seriesList:
s.name = 'removeAbovePercentile(%s, %g)' % (s.name, n)
s.pathExpression = s.name
try:
percentile = nPercentile(requestContext, [s], n)[0][0]
except IndexError:
continue
for (index, val) in enumerate(s):
if val > percentile:
s[index] = None
return seriesList
def removeAboveValue(requestContext, seriesList, n):
"""
Removes data above the given threshold from the series or list of series provided.
Values above this threshold are assigned a value of None.
"""
for s in seriesList:
s.name = 'removeAboveValue(%s, %g)' % (s.name, n)
s.pathExpression = s.name
for (index, val) in enumerate(s):
if val > n:
s[index] = None
return seriesList
def removeBelowPercentile(requestContext, seriesList, n):
"""
Removes data below the nth percentile from the series or list of series provided.
Values below this percentile are assigned a value of None.
"""
for s in seriesList:
s.name = 'removeBelowPercentile(%s, %g)' % (s.name, n)
s.pathExpression = s.name
try:
percentile = nPercentile(requestContext, [s], n)[0][0]
except IndexError:
continue
for (index, val) in enumerate(s):
if val < percentile:
s[index] = None
return seriesList
def removeBelowValue(requestContext, seriesList, n):
"""
Removes data below the given threshold from the series or list of series provided.
Values below this threshold are assigned a value of None.
"""
for s in seriesList:
s.name = 'removeBelowValue(%s, %g)' % (s.name, n)
s.pathExpression = s.name
for (index, val) in enumerate(s):
if val < n:
s[index] = None
return seriesList
def limit(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Only draw the first N metrics. Useful when testing a wildcard in a metric.
Example:
.. code-block:: none
&target=limit(server*.instance*.memory.free,5)
Draws only the first 5 instance's memory free.
"""
return seriesList[0:n]
def sortByName(requestContext, seriesList, natural=False):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the metric name using either alphabetical order or natural sorting.
Natural sorting allows names containing numbers to be sorted more naturally, e.g:
- Alphabetical sorting: server1, server11, server12, server2
- Natural sorting: server1, server2, server11, server12
"""
def paddedName(name):
return re.sub("(\d+)", lambda x: "{0:010}".format(int(x.group(0))), name)
def compare(x,y):
return cmp(x.name, y.name)
def natSortCompare(x,y):
return cmp(paddedName(x.name), paddedName(y.name))
if natural:
seriesList.sort(natSortCompare)
else:
seriesList.sort(compare)
return seriesList
def sortByTotal(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the sum of values across the time period
specified.
"""
def compare(x,y):
return cmp(safeSum(y), safeSum(x))
seriesList.sort(compare)
return seriesList
def sortByMaxima(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the maximum value across the time period
specified. Useful with the &areaMode=all parameter, to keep the
lowest value lines visible.
Example:
.. code-block:: none
&target=sortByMaxima(server*.instance*.memory.free)
"""
def compare(x,y):
return cmp(max(y), max(x))
seriesList.sort(compare)
return seriesList
def sortByMinima(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
Sorts the list of metrics by the lowest value across the time period
specified.
Example:
.. code-block:: none
&target=sortByMinima(server*.instance*.memory.free)
"""
def compare(x,y):
return cmp(min(x), min(y))
newSeries = [series for series in seriesList if max(series) > 0]
newSeries.sort(compare)
return newSeries
def useSeriesAbove(requestContext, seriesList, value, search, replace):
"""
Compares the maximum of each series against the given `value`. If the series
maximum is greater than `value`, the regular expression search and replace is
applied against the series name to plot a related metric
e.g. given useSeriesAbove(ganglia.metric1.reqs,10,'reqs','time'),
the response time metric will be plotted only when the maximum value of the
corresponding request/s metric is > 10
.. code-block:: none
&target=useSeriesAbove(ganglia.metric1.reqs,10,"reqs","time")
"""
newSeries = []
for series in seriesList:
newname = re.sub(search, replace, series.name)
if max(series) > value:
n = evaluateTarget(requestContext, newname)
if n is not None and len(n) > 0:
newSeries.append(n[0])
return newSeries
def fallbackSeries(requestContext, seriesList, fallback):
"""
Takes a wildcard seriesList, and a second fallback metric.
If the wildcard does not match any series, draws the fallback metric.
Example:
.. code-block:: none
&target=fallbackSeries(server*.requests_per_second, constantLine(0))
Draws a 0 line when server metric does not exist.
"""
if len(seriesList) > 0:
return seriesList
else:
return fallback
def mostDeviant(requestContext, seriesList, n):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Draws the N most deviant metrics.
To find the deviants, the standard deviation (sigma) of each series
is taken and ranked. The top N standard deviations are returned.
Example:
.. code-block:: none
&target=mostDeviant(server*.instance*.memory.free, 5)
Draws the 5 instances furthest from the average memory free.
"""
deviants = []
for series in seriesList:
mean = safeDiv( safeSum(series), safeLen(series) )
if mean is None: continue
square_sum = sum([ (value - mean) ** 2 for value in series if value is not None ])
sigma = safeDiv(square_sum, safeLen(series))
if sigma is None: continue
deviants.append( (sigma, series) )
deviants.sort(key=lambda i: i[0], reverse=True) #sort by sigma
return [ series for (_, series) in deviants ][:n] #return the n most deviant series
def stdev(requestContext, seriesList, points, windowTolerance=0.1):
"""
Takes one metric or a wildcard seriesList followed by an integer N.
Draw the Standard Deviation of all metrics passed for the past N datapoints.
If the ratio of null points in the window is greater than windowTolerance,
skip the calculation. The default for windowTolerance is 0.1 (up to 10% of points
in the window can be missing). Note that if this is set to 0.0, it will cause large
gaps in the output anywhere a single point is missing.
Example:
.. code-block:: none
&target=stdev(server*.instance*.threads.busy,30)
&target=stdev(server*.instance*.cpu.system,30,0.0)
"""
# For this we take the standard deviation in terms of the moving average
# and the moving average of series squares.
for (seriesIndex,series) in enumerate(seriesList):
stdevSeries = TimeSeries("stdev(%s,%d)" % (series.name, int(points)), series.start, series.end, series.step, [])
stdevSeries.pathExpression = "stdev(%s,%d)" % (series.name, int(points))
validPoints = 0
currentSum = 0
currentSumOfSquares = 0
for (index, newValue) in enumerate(series):
# Mark whether we've reached our window size - dont drop points out otherwise
if index < points:
bootstrapping = True
droppedValue = None
else:
bootstrapping = False
droppedValue = series[index - points]
# Track non-None points in window
if not bootstrapping and droppedValue is not None:
validPoints -= 1
if newValue is not None:
validPoints += 1
# Remove the value that just dropped out of the window
if not bootstrapping and droppedValue is not None:
currentSum -= droppedValue
currentSumOfSquares -= droppedValue**2
# Add in the value that just popped in the window
if newValue is not None:
currentSum += newValue
currentSumOfSquares += newValue**2
if validPoints > 0 and \
float(validPoints)/points >= windowTolerance:
try:
deviation = math.sqrt(validPoints * currentSumOfSquares - currentSum**2)/validPoints
except ValueError:
deviation = None
stdevSeries.append(deviation)
else:
stdevSeries.append(None)
seriesList[seriesIndex] = stdevSeries
return seriesList
def secondYAxis(requestContext, seriesList):
"""
Graph the series on the secondary Y axis.
"""
for series in seriesList:
series.options['secondYAxis'] = True
series.name= 'secondYAxis(%s)' % series.name
return seriesList
def holtWintersIntercept(alpha,actual,last_season,last_intercept,last_slope):
return alpha * (actual - last_season) \
+ (1 - alpha) * (last_intercept + last_slope)
def holtWintersSlope(beta,intercept,last_intercept,last_slope):
return beta * (intercept - last_intercept) + (1 - beta) * last_slope
def holtWintersSeasonal(gamma,actual,intercept,last_season):
return gamma * (actual - intercept) + (1 - gamma) * last_season
def holtWintersDeviation(gamma,actual,prediction,last_seasonal_dev):
if prediction is None:
prediction = 0
return gamma * math.fabs(actual - prediction) + (1 - gamma) * last_seasonal_dev
def holtWintersAnalysis(series):
alpha = gamma = 0.1
beta = 0.0035
# season is currently one day
season_length = (24*60*60) / series.step
intercept = 0
slope = 0
pred = 0
intercepts = list()
slopes = list()
seasonals = list()
predictions = list()
deviations = list()
def getLastSeasonal(i):
j = i - season_length
if j >= 0:
return seasonals[j]
return 0
def getLastDeviation(i):
j = i - season_length
if j >= 0:
return deviations[j]
return 0
last_seasonal = 0
last_seasonal_dev = 0
next_last_seasonal = 0
next_pred = None
for i,actual in enumerate(series):
if actual is None:
# missing input values break all the math
# do the best we can and move on
intercepts.append(None)
slopes.append(0)
seasonals.append(0)
predictions.append(next_pred)
deviations.append(0)
next_pred = None
continue
if i == 0:
last_intercept = actual
last_slope = 0
# seed the first prediction as the first actual
prediction = actual
else:
last_intercept = intercepts[-1]
last_slope = slopes[-1]
if last_intercept is None:
last_intercept = actual
prediction = next_pred
last_seasonal = getLastSeasonal(i)
next_last_seasonal = getLastSeasonal(i+1)
last_seasonal_dev = getLastDeviation(i)
intercept = holtWintersIntercept(alpha,actual,last_seasonal
,last_intercept,last_slope)
slope = holtWintersSlope(beta,intercept,last_intercept,last_slope)
seasonal = holtWintersSeasonal(gamma,actual,intercept,last_seasonal)
next_pred = intercept + slope + next_last_seasonal
deviation = holtWintersDeviation(gamma,actual,prediction,last_seasonal_dev)
intercepts.append(intercept)
slopes.append(slope)
seasonals.append(seasonal)
predictions.append(prediction)
deviations.append(deviation)
# make the new forecast series
forecastName = "holtWintersForecast(%s)" % series.name
forecastSeries = TimeSeries(forecastName, series.start, series.end
, series.step, predictions)
forecastSeries.pathExpression = forecastName
# make the new deviation series
deviationName = "holtWintersDeviation(%s)" % series.name
deviationSeries = TimeSeries(deviationName, series.start, series.end
, series.step, deviations)
deviationSeries.pathExpression = deviationName
results = { 'predictions': forecastSeries
, 'deviations': deviationSeries
, 'intercepts': intercepts
, 'slopes': slopes
, 'seasonals': seasonals
}
return results
def holtWintersForecast(requestContext, seriesList):
"""
Performs a Holt-Winters forecast using the series as input data. Data from
one week previous to the series is used to bootstrap the initial forecast.
"""
previewSeconds = 7 * 86400 # 7 days
# ignore original data and pull new, including our preview
newContext = requestContext.copy()
newContext['startTime'] = requestContext['startTime'] - timedelta(seconds=previewSeconds)
previewList = evaluateTokens(newContext, requestContext['args'][0])
results = []
for series in previewList:
analysis = holtWintersAnalysis(series)
predictions = analysis['predictions']
windowPoints = previewSeconds / predictions.step
result = TimeSeries("holtWintersForecast(%s)" % series.name, predictions.start + previewSeconds, predictions.end, predictions.step, predictions[windowPoints:])
result.pathExpression = result.name
results.append(result)
return results
def holtWintersConfidenceBands(requestContext, seriesList, delta=3):
"""
Performs a Holt-Winters forecast using the series as input data and plots
upper and lower bands with the predicted forecast deviations.
"""
previewSeconds = 7 * 86400 # 7 days
# ignore original data and pull new, including our preview
newContext = requestContext.copy()
newContext['startTime'] = requestContext['startTime'] - timedelta(seconds=previewSeconds)
previewList = evaluateTokens(newContext, requestContext['args'][0])
results = []
for series in previewList:
analysis = holtWintersAnalysis(series)
data = analysis['predictions']
windowPoints = previewSeconds / data.step
forecast = TimeSeries(data.name, data.start + previewSeconds, data.end, data.step, data[windowPoints:])
forecast.pathExpression = data.pathExpression
data = analysis['deviations']
windowPoints = previewSeconds / data.step
deviation = TimeSeries(data.name, data.start + previewSeconds, data.end, data.step, data[windowPoints:])
deviation.pathExpression = data.pathExpression
seriesLength = len(forecast)
i = 0
upperBand = list()
lowerBand = list()
while i < seriesLength:
forecast_item = forecast[i]
deviation_item = deviation[i]
i = i + 1
if forecast_item is None or deviation_item is None:
upperBand.append(None)
lowerBand.append(None)
else:
scaled_deviation = delta * deviation_item
upperBand.append(forecast_item + scaled_deviation)
lowerBand.append(forecast_item - scaled_deviation)
upperName = "holtWintersConfidenceUpper(%s)" % series.name
lowerName = "holtWintersConfidenceLower(%s)" % series.name
upperSeries = TimeSeries(upperName, forecast.start, forecast.end
, forecast.step, upperBand)
lowerSeries = TimeSeries(lowerName, forecast.start, forecast.end
, forecast.step, lowerBand)
upperSeries.pathExpression = series.pathExpression
lowerSeries.pathExpression = series.pathExpression
results.append(lowerSeries)
results.append(upperSeries)
return results
def holtWintersAberration(requestContext, seriesList, delta=3):
"""
Performs a Holt-Winters forecast using the series as input data and plots the
positive or negative deviation of the series data from the forecast.
"""
results = []
for series in seriesList:
confidenceBands = holtWintersConfidenceBands(requestContext, [series], delta)
lowerBand = confidenceBands[0]
upperBand = confidenceBands[1]
aberration = list()
for i, actual in enumerate(series):
if series[i] is None:
aberration.append(0)
elif upperBand[i] is not None and series[i] > upperBand[i]:
aberration.append(series[i] - upperBand[i])
elif lowerBand[i] is not None and series[i] < lowerBand[i]:
aberration.append(series[i] - lowerBand[i])
else:
aberration.append(0)
newName = "holtWintersAberration(%s)" % series.name
results.append(TimeSeries(newName, series.start, series.end
, series.step, aberration))
return results
def holtWintersConfidenceArea(requestContext, seriesList, delta=3):
"""
Performs a Holt-Winters forecast using the series as input data and plots the
area between the upper and lower bands of the predicted forecast deviations.
"""
bands = holtWintersConfidenceBands(requestContext, seriesList, delta)
results = areaBetween(requestContext, bands)
for series in results:
series.name = series.name.replace('areaBetween', 'holtWintersConfidenceArea')
return results
def linearRegressionAnalysis(series):
"""
Returns factor and offset of linear regression function by least squares method.
"""
n = safeLen(series)
sumI = sum([i for i,v in enumerate(series) if v is not None])
sumV = sum([v for i,v in enumerate(series) if v is not None])
sumII = sum([i*i for i,v in enumerate(series) if v is not None])
sumIV = sum([i*v for i,v in enumerate(series) if v is not None])
denominator = float(n*sumII - sumI*sumI)
if denominator == 0:
return None
else:
factor = (n * sumIV - sumI * sumV) / denominator / series.step
offset = (sumII * sumV - sumIV * sumI) /denominator - factor * series.start
return factor, offset
def linearRegression(requestContext, seriesList, startSourceAt=None, endSourceAt=None):
"""
Graphs the liner regression function by least squares method.
Takes one metric or a wildcard seriesList, followed by a quoted string with the
time to start the line and another quoted string with the time to end the line.
The start and end times are inclusive (default range is from to until). See
``from / until`` in the render\_api_ for examples of time formats. Datapoints
in the range is used to regression.
Example:
.. code-block:: none
&target=linearRegression(Server.instance01.threads.busy, '-1d')
&target=linearRegression(Server.instance*.threads.busy, "00:00 20140101","11:59 20140630")
"""
results = []
sourceContext = requestContext.copy()
if startSourceAt is not None: sourceContext['startTime'] = parseATTime(startSourceAt)
if endSourceAt is not None: sourceContext['endTime'] = parseATTime(endSourceAt)
sourceList = []
for series in seriesList:
source = evaluateTarget(sourceContext, series.pathExpression)
sourceList.extend(source)
for source,series in zip(sourceList, seriesList):
newName = 'linearRegression(%s, %s, %s)' % (
series.name,
int(time.mktime(sourceContext['startTime'].timetuple())),
int(time.mktime(sourceContext['endTime'].timetuple()))
)
forecast = linearRegressionAnalysis(source)
if forecast is None:
continue
factor, offset = forecast
values = [ offset + (series.start + i * series.step) * factor for i in range(len(series)) ]
newSeries = TimeSeries(newName, series.start, series.end, series.step, values)
newSeries.pathExpression = newSeries.name
results.append(newSeries)
return results
def drawAsInfinite(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
If the value is zero, draw the line at 0. If the value is above zero, draw
the line at infinity. If the value is null or less than zero, do not draw
the line.
Useful for displaying on/off metrics, such as exit codes. (0 = success,
anything else = failure.)
Example:
.. code-block:: none
drawAsInfinite(Testing.script.exitCode)
"""
for series in seriesList:
series.options['drawAsInfinite'] = True
series.name = 'drawAsInfinite(%s)' % series.name
return seriesList
def lineWidth(requestContext, seriesList, width):
"""
Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a line width of F, overriding the default
value of 1, or the &lineWidth=X.X parameter.
Useful for highlighting a single metric out of many, or having multiple
line widths in one graph.
Example:
.. code-block:: none
&target=lineWidth(server01.instance01.memory.free,5)
"""
for series in seriesList:
series.options['lineWidth'] = width
return seriesList
def dashed(requestContext, *seriesList):
"""
Takes one metric or a wildcard seriesList, followed by a float F.
Draw the selected metrics with a dotted line with segments of length F
If omitted, the default length of the segments is 5.0
Example:
.. code-block:: none
&target=dashed(server01.instance01.memory.free,2.5)
"""
if len(seriesList) == 2:
dashLength = seriesList[1]
else:
dashLength = 5
for series in seriesList[0]:
series.name = 'dashed(%s, %g)' % (series.name, dashLength)
series.options['dashed'] = dashLength
return seriesList[0]
def timeStack(requestContext, seriesList, timeShiftUnit, timeShiftStart, timeShiftEnd):
"""
Takes one metric or a wildcard seriesList, followed by a quoted string with the
length of time (See ``from / until`` in the render\_api_ for examples of time formats).
Also takes a start multiplier and end multiplier for the length of time
create a seriesList which is composed the original metric series stacked with time shifts
starting time shifts from the start multiplier through the end multiplier
Useful for looking at history, or feeding into averageSeries or stddevSeries.
Example:
.. code-block:: none
&target=timeStack(Sales.widgets.largeBlue,"1d",0,7) # create a series for today and each of the previous 7 days
"""
# Default to negative. parseTimeOffset defaults to +
if timeShiftUnit[0].isdigit():
timeShiftUnit = '-' + timeShiftUnit
delta = parseTimeOffset(timeShiftUnit)
# if len(seriesList) > 1, they will all have the same pathExpression, which is all we care about.
series = seriesList[0]
results = []
timeShiftStartint = int(timeShiftStart)
timeShiftEndint = int(timeShiftEnd)
for shft in range(timeShiftStartint,timeShiftEndint):
myContext = requestContext.copy()
innerDelta = delta * shft
myContext['startTime'] = requestContext['startTime'] + innerDelta
myContext['endTime'] = requestContext['endTime'] + innerDelta
for shiftedSeries in evaluateTarget(myContext, series.pathExpression):
shiftedSeries.name = 'timeShift(%s, %s, %s)' % (shiftedSeries.name, timeShiftUnit,shft)
shiftedSeries.pathExpression = shiftedSeries.name
shiftedSeries.start = series.start
shiftedSeries.end = series.end
results.append(shiftedSeries)
return results
def timeShift(requestContext, seriesList, timeShift, resetEnd=True, alignDST=False):
"""
Takes one metric or a wildcard seriesList, followed by a quoted string with the
length of time (See ``from / until`` in the render\_api_ for examples of time formats).
Draws the selected metrics shifted in time. If no sign is given, a minus sign ( - ) is
implied which will shift the metric back in time. If a plus sign ( + ) is given, the
metric will be shifted forward in time.
Will reset the end date range automatically to the end of the base stat unless
resetEnd is False. Example case is when you timeshift to last week and have the graph
date range set to include a time in the future, will limit this timeshift to pretend
ending at the current time. If resetEnd is False, will instead draw full range including
future time.
Because time is shifted by a fixed number of seconds, comparing a time period with DST to
a time period without DST, and vice-versa, will result in an apparent misalignment. For
example, 8am might be overlaid with 7am. To compensate for this, use the alignDST option.
Useful for comparing a metric against itself at a past periods or correcting data
stored at an offset.
Example:
.. code-block:: none
&target=timeShift(Sales.widgets.largeBlue,"7d")
&target=timeShift(Sales.widgets.largeBlue,"-7d")
&target=timeShift(Sales.widgets.largeBlue,"+1h")
"""
# Default to negative. parseTimeOffset defaults to +
if timeShift[0].isdigit():
timeShift = '-' + timeShift
delta = parseTimeOffset(timeShift)
myContext = requestContext.copy()
myContext['startTime'] = requestContext['startTime'] + delta
myContext['endTime'] = requestContext['endTime'] + delta
if alignDST:
def localDST(dt):
return time.localtime(time.mktime(dt.timetuple())).tm_isdst
reqStartDST = localDST(requestContext['startTime'])
reqEndDST = localDST(requestContext['endTime'])
myStartDST = localDST(myContext['startTime'])
myEndDST = localDST(myContext['endTime'])
dstOffset = timedelta(hours=0)
# If the requestContext is entirely in DST, and we are entirely NOT in DST
if ((reqStartDST and reqEndDST) and (not myStartDST and not myEndDST)):
dstOffset = timedelta(hours=1)
# Or if the requestContext is entirely NOT in DST, and we are entirely in DST
elif ((not reqStartDST and not reqEndDST) and (myStartDST and myEndDST)):
dstOffset = timedelta(hours=-1)
# Otherwise, we don't do anything, because it would be visually confusing
myContext['startTime'] += dstOffset
myContext['endTime'] += dstOffset
results = []
if len(seriesList) > 0:
# if len(seriesList) > 1, they will all have the same pathExpression, which is all we care about.
series = seriesList[0]
for shiftedSeries in evaluateTarget(myContext, series.pathExpression):
shiftedSeries.name = 'timeShift(%s, "%s")' % (shiftedSeries.name, timeShift)
if resetEnd:
shiftedSeries.end = series.end
else:
shiftedSeries.end = shiftedSeries.end - shiftedSeries.start + series.start
shiftedSeries.start = series.start
results.append(shiftedSeries)
return results
def timeSlice(requestContext, seriesList, startSliceAt, endSliceAt="now"):
"""
Takes one metric or a wildcard metric, followed by a quoted string with the
time to start the line and another quoted string with the time to end the line.
The start and end times are inclusive. See ``from / until`` in the render\_api_
for examples of time formats.
Useful for filtering out a part of a series of data from a wider range of
data.
Example:
.. code-block:: none
&target=timeSlice(network.core.port1,"00:00 20140101","11:59 20140630")
&target=timeSlice(network.core.port1,"12:00 20140630","now")
"""
results = []
start = time.mktime(parseATTime(startSliceAt).timetuple())
end = time.mktime(parseATTime(endSliceAt).timetuple())
for slicedSeries in seriesList:
slicedSeries.name = 'timeSlice(%s, %s, %s)' % (slicedSeries.name, int(start), int(end))
curr = time.mktime(requestContext["startTime"].timetuple())
for i, v in enumerate(slicedSeries):
if v is None or curr < start or curr > end:
slicedSeries[i] = None
curr += slicedSeries.step
results.append(slicedSeries)
return results
def constantLine(requestContext, value):
"""
Takes a float F.
Draws a horizontal line at value F across the graph.
Example:
.. code-block:: none
&target=constantLine(123.456)
"""
name = "constantLine(%s)" % str(value)
start = int(epoch( requestContext['startTime'] ) )
end = int(epoch( requestContext['endTime'] ) )
step = int((end - start) / 2.0)
series = TimeSeries(str(value), start, end, step, [value, value, value])
series.pathExpression = name
return [series]
def aggregateLine(requestContext, seriesList, func='avg'):
"""
Takes a metric or wildcard seriesList and draws a horizontal line
based on the function applied to each series.
Note: By default, the graphite renderer consolidates data points by
averaging data points over time. If you are using the 'min' or 'max'
function for aggregateLine, this can cause an unusual gap in the
line drawn by this function and the data itself. To fix this, you
should use the consolidateBy() function with the same function
argument you are using for aggregateLine. This will ensure that the
proper data points are retained and the graph should line up
correctly.
Example:
.. code-block:: none
&target=aggregateLine(server01.connections.total, 'avg')
&target=aggregateLine(server*.connections.total, 'avg')
"""
t_funcs = { 'avg': safeAvg, 'min': safeMin, 'max': safeMax }
if func not in t_funcs:
raise ValueError("Invalid function %s" % func)
results = []
for series in seriesList:
value = t_funcs[func](series)
if value is not None:
name = 'aggregateLine(%s, %g)' % (series.name, value)
else:
name = 'aggregateLine(%s, None)' % (series.name)
[series] = constantLine(requestContext, value)
series.name = name
series.pathExpression = series.name
results.append(series)
return results
def verticalLine(requestContext, ts, label=None, color=None):
"""
Takes a timestamp string ts.
Draws a vertical line at the designated timestamp with optional
'label' and 'color'. Supported timestamp formats include both
relative (e.g. -3h) and absolute (e.g. 16:00_20110501) strings,
such as those used with ``from`` and ``until`` parameters. When
set, the 'label' will appear in the graph legend.
Note: Any timestamps defined outside the requested range will
raise a 'ValueError' exception.
Example:
.. code-block:: none
&target=verticalLine("12:3420131108","event","blue")
&target=verticalLine("16:00_20110501","event")
&target=verticalLine("-5mins")
"""
ts = int(timestamp( parseATTime(ts, requestContext['tzinfo']) ))
start = int(timestamp( requestContext['startTime'] ))
end = int(timestamp( requestContext['endTime'] ))
if ts < start:
raise ValueError("verticalLine(): timestamp %s exists before start of range" % ts)
elif ts > end:
raise ValueError("verticalLine(): timestamp %s exists after end of range" % ts)
start = end = ts
step = 1.0
series = TimeSeries(label, start, end, step, [1.0, 1.0])
series.options['drawAsInfinite'] = True
if color:
series.color = color
return [series]
def threshold(requestContext, value, label=None, color=None):
"""
Takes a float F, followed by a label (in double quotes) and a color.
(See ``bgcolor`` in the render\_api_ for valid color names & formats.)
Draws a horizontal line at value F across the graph.
Example:
.. code-block:: none
&target=threshold(123.456, "omgwtfbbq", "red")
"""
series = constantLine(requestContext, value)[0]
if label:
series.name = label
if color:
series.color = color
return [series]
def transformNull(requestContext, seriesList, default=0, referenceSeries=None):
"""
Takes a metric or wildcard seriesList and replaces null values with the value
specified by `default`. The value 0 used if not specified. The optional
referenceSeries, if specified, is a metric or wildcard series list that governs
which time intervals nulls should be replaced. If specified, nulls are replaced
only in intervals where a non-null is found for the same interval in any of
referenceSeries. This method compliments the drawNullAsZero function in
graphical mode, but also works in text-only mode.
Example:
.. code-block:: none
&target=transformNull(webapp.pages.*.views,-1)
This would take any page that didn't have values and supply negative 1 as a default.
Any other numeric value may be used as well.
"""
def transform(v, d):
if v is None: return d
else: return v
if referenceSeries:
defaults = [default if any(v is not None for v in x) else None for x in izip(*referenceSeries)]
else:
defaults = None
for series in seriesList:
if referenceSeries:
series.name = "transformNull(%s,%g,referenceSeries)" % (series.name, default)
else:
series.name = "transformNull(%s,%g)" % (series.name, default)
series.pathExpression = series.name
if defaults:
values = [transform(v, d) for v, d in izip(series, defaults)]
else:
values = [transform(v, default) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList
def isNonNull(requestContext, seriesList):
"""
Takes a metric or wildcard seriesList and counts up the number of non-null
values. This is useful for understanding the number of metrics that have data
at a given point in time (i.e. to count which servers are alive).
Example:
.. code-block:: none
&target=isNonNull(webapp.pages.*.views)
Returns a seriesList where 1 is specified for non-null values, and
0 is specified for null values.
"""
def transform(v):
if v is None: return 0
else: return 1
for series in seriesList:
series.name = "isNonNull(%s)" % (series.name)
series.pathExpression = series.name
values = [transform(v) for v in series]
series.extend(values)
del series[:len(values)]
return seriesList
def identity(requestContext, name):
"""
Identity function:
Returns datapoints where the value equals the timestamp of the datapoint.
Useful when you have another series where the value is a timestamp, and
you want to compare it to the time of the datapoint, to render an age
Example:
.. code-block:: none
&target=identity("The.time.series")
This would create a series named "The.time.series" that contains points where
x(t) == t.
"""
step = 60
delta = timedelta(seconds=step)
start = int(epoch(requestContext["startTime"]))
end = int(epoch(requestContext["endTime"]))
values = range(start, end, step)
series = TimeSeries(name, start, end, step, values)
series.pathExpression = 'identity("%s")' % name
return [series]
def countSeries(requestContext, *seriesLists):
"""
Draws a horizontal line representing the number of nodes found in the seriesList.
.. code-block:: none
&target=countSeries(carbon.agents.*.*)
"""
if seriesLists:
(seriesList,start,end,step) = normalize(seriesLists)
name = "countSeries(%s)" % formatPathExpressions(seriesList)
values = ( int(len(row)) for row in izip(*seriesList) )
series = TimeSeries(name,start,end,step,values)
series.pathExpression = name
else:
series = constantLine(requestContext, 0).pop()
series.pathExpression = "countSeries()"
return [series]
def group(requestContext, *seriesLists):
"""
Takes an arbitrary number of seriesLists and adds them to a single seriesList. This is used
to pass multiple seriesLists to a function which only takes one
"""
seriesGroup = []
for s in seriesLists:
seriesGroup.extend(s)
return seriesGroup
def mapSeries(requestContext, seriesList, mapNode):
"""
Short form: ``map()``
Takes a seriesList and maps it to a list of seriesList. Each seriesList has the
given mapNode in common.
.. note:: This function is not very useful alone. It should be used with :py:func:`reduceSeries`
.. code-block:: none
mapSeries(servers.*.cpu.*,1) =>
[
servers.server1.cpu.*,
servers.server2.cpu.*,
...
servers.serverN.cpu.*
]
"""
metaSeries = {}
keys = []
for series in seriesList:
key = series.name.split(".")[mapNode]
if key not in metaSeries:
metaSeries[key] = [series]
keys.append(key)
else:
metaSeries[key].append(series)
return [ metaSeries[k] for k in keys ]
def reduceSeries(requestContext, seriesLists, reduceFunction, reduceNode, *reduceMatchers):
"""
Short form: ``reduce()``
Takes a list of seriesLists and reduces it to a list of series by means of the reduceFunction.
Reduction is performed by matching the reduceNode in each series against the list of
reduceMatchers. Then each series is passed to the reduceFunction as arguments in the order
given by reduceMatchers. The reduceFunction should yield a single series.
The resulting list of series are aliased so that they can easily be nested in other functions.
**Example**: Map/Reduce asPercent(bytes_used,total_bytes) for each server
Assume that metrics in the form below exist:
.. code-block:: none
servers.server1.disk.bytes_used
servers.server1.disk.total_bytes
servers.server2.disk.bytes_used
servers.server2.disk.total_bytes
servers.server3.disk.bytes_used
servers.server3.disk.total_bytes
...
servers.serverN.disk.bytes_used
servers.serverN.disk.total_bytes
To get the percentage of disk used for each server:
.. code-block:: none
reduceSeries(mapSeries(servers.*.disk.*,1),"asPercent",3,"bytes_used","total_bytes") =>
alias(asPercent(servers.server1.disk.bytes_used,servers.server1.disk.total_bytes),"servers.server1.disk.reduce.asPercent"),
alias(asPercent(servers.server2.disk.bytes_used,servers.server2.disk.total_bytes),"servers.server2.disk.reduce.asPercent"),
alias(asPercent(servers.server3.disk.bytes_used,servers.server3.disk.total_bytes),"servers.server3.disk.reduce.asPercent"),
...
alias(asPercent(servers.serverN.disk.bytes_used,servers.serverN.disk.total_bytes),"servers.serverN.disk.reduce.asPercent")
In other words, we will get back the following metrics::
servers.server1.disk.reduce.asPercent
servers.server2.disk.reduce.asPercent
servers.server3.disk.reduce.asPercent
...
servers.serverN.disk.reduce.asPercent
.. seealso:: :py:func:`mapSeries`
"""
metaSeries = {}
keys = []
for seriesList in seriesLists:
for series in seriesList:
nodes = series.name.split('.')
node = nodes[reduceNode]
reduceSeriesName = '.'.join(nodes[0:reduceNode]) + '.reduce.' + reduceFunction
if node in reduceMatchers:
if reduceSeriesName not in metaSeries:
metaSeries[reduceSeriesName] = [None] * len(reduceMatchers)
keys.append(reduceSeriesName)
i = reduceMatchers.index(node)
metaSeries[reduceSeriesName][i] = series
for key in keys:
metaSeries[key] = SeriesFunctions[reduceFunction](requestContext,*[[l] for l in metaSeries[key]])[0]
metaSeries[key].name = key
return [ metaSeries[key] for key in keys ]
def applyByNode(requestContext, seriesList, nodeNum, templateFunction, newName=None):
"""
Takes a seriesList and applies some complicated function (described by a string), replacing templates with unique
prefixes of keys from the seriesList (the key is all nodes up to the index given as `nodeNum`).
If the `newName` parameter is provided, the name of the resulting series will be given by that parameter, with any
"%" characters replaced by the unique prefix.
Example:
.. code-block:: none
&target=applyByNode(servers.*.disk.bytes_free,1,"divideSeries(%.disk.bytes_free,sumSeries(%.disk.bytes_*))")
Would find all series which match `servers.*.disk.bytes_free`, then trim them down to unique series up to the node
given by nodeNum, then fill them into the template function provided (replacing % by the prefixes).
Additional Examples:
Given keys of
- `stats.counts.haproxy.web.2XX`
- `stats.counts.haproxy.web.3XX`
- `stats.counts.haproxy.web.5XX`
- `stats.counts.haproxy.microservice.2XX`
- `stats.counts.haproxy.microservice.3XX`
- `stats.counts.haproxy.microservice.5XX`
The following will return the rate of 5XX's per service:
.. code-block:: none
applyByNode(stats.counts.haproxy.*.*XX, 3, "asPercent(%.2XX, sumSeries(%.*XX))", "%.pct_5XX")
The output series would have keys `stats.counts.haproxy.web.pct_5XX` and `stats.counts.haproxy.microservice.pct_5XX`.
"""
prefixes = set()
for series in seriesList:
prefix = '.'.join(series.name.split('.')[:nodeNum + 1])
prefixes.add(prefix)
results = []
for prefix in sorted(prefixes):
for resultSeries in evaluateTarget(requestContext, templateFunction.replace('%', prefix)):
if newName:
resultSeries.name = newName.replace('%', prefix)
resultSeries.pathExpression = prefix
resultSeries.start = series.start
resultSeries.end = series.end
results.append(resultSeries)
return results
def groupByNode(requestContext, seriesList, nodeNum, callback):
"""
Takes a serieslist and maps a callback to subgroups within as defined by a common node
.. code-block:: none
&target=groupByNode(ganglia.by-function.*.*.cpu.load5,2,"sumSeries")
Would return multiple series which are each the result of applying the "sumSeries" function
to groups joined on the second node (0 indexed) resulting in a list of targets like
.. code-block :: none
sumSeries(ganglia.by-function.server1.*.cpu.load5),sumSeries(ganglia.by-function.server2.*.cpu.load5),...
"""
return groupByNodes(requestContext, seriesList, callback, nodeNum)
def groupByNodes(requestContext, seriesList, callback, *nodes):
"""
Takes a serieslist and maps a callback to subgroups within as defined by multiple nodes
.. code-block:: none
&target=groupByNodes(ganglia.server*.*.cpu.load*,"sumSeries",1,4)
Would return multiple series which are each the result of applying the "sumSeries" function
to groups joined on the nodes' list (0 indexed) resulting in a list of targets like
.. code-block :: none
sumSeries(ganglia.server1.*.cpu.load5),sumSeries(ganglia.server1.*.cpu.load10),sumSeries(ganglia.server1.*.cpu.load15),sumSeries(ganglia.server2.*.cpu.load5),sumSeries(ganglia.server2.*.cpu.load10),sumSeries(ganglia.server2.*.cpu.load15),...
"""
metaSeries = {}
keys = []
if isinstance(nodes, int):
nodes=[nodes]
for series in seriesList:
key = '.'.join(series.name.split(".")[n] for n in nodes)
if key not in metaSeries:
metaSeries[key] = [series]
keys.append(key)
else:
metaSeries[key].append(series)
for key in metaSeries.keys():
metaSeries[key] = SeriesFunctions[callback](requestContext,
metaSeries[key])[0]
metaSeries[key].name = key
return [ metaSeries[key] for key in keys ]
def exclude(requestContext, seriesList, pattern):
"""
Takes a metric or a wildcard seriesList, followed by a regular expression
in double quotes. Excludes metrics that match the regular expression.
Example:
.. code-block:: none
&target=exclude(servers*.instance*.threads.busy,"server02")
"""
regex = re.compile(pattern)
return [s for s in seriesList if not regex.search(s.name)]
def grep(requestContext, seriesList, pattern):
"""
Takes a metric or a wildcard seriesList, followed by a regular expression
in double quotes. Excludes metrics that don't match the regular expression.
Example:
.. code-block:: none
&target=grep(servers*.instance*.threads.busy,"server02")
"""
regex = re.compile(pattern)
return [s for s in seriesList if regex.search(s.name)]
def smartSummarize(requestContext, seriesList, intervalString, func='sum', alignToFrom=False):
"""
Smarter experimental version of summarize.
The alignToFrom parameter has been deprecated, it no longer has any effect.
Alignment happens automatically for days, hours, and minutes.
"""
if alignToFrom:
log.info("Deprecated parameter 'alignToFrom' is being ignored.")
results = []
delta = parseTimeOffset(intervalString)
interval = delta.seconds + (delta.days * 86400)
# Adjust the start time to fit an entire day for intervals >= 1 day
requestContext = requestContext.copy()
s = requestContext['startTime']
if interval >= DAY:
requestContext['startTime'] = datetime(s.year, s.month, s.day, tzinfo = s.tzinfo)
elif interval >= HOUR:
requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour, tzinfo = s.tzinfo)
elif interval >= MINUTE:
requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour, s.minute, tzinfo = s.tzinfo)
for i,series in enumerate(seriesList):
# XXX: breaks with summarize(metric.{a,b})
# each series.pathExpression == metric.{a,b}
newSeries = evaluateTarget(requestContext, series.pathExpression)[0]
series[0:len(series)] = newSeries
series.start = newSeries.start
series.end = newSeries.end
series.step = newSeries.step
for series in seriesList:
buckets = {} # { timestamp: [values] }
timestamps = range( int(series.start), int(series.end), int(series.step) )
datapoints = zip(timestamps, series)
# Populate buckets
for timestamp_, value in datapoints:
bucketInterval = int((timestamp_ - series.start) / interval)
if bucketInterval not in buckets:
buckets[bucketInterval] = []
if value is not None:
buckets[bucketInterval].append(value)
newValues = []
for timestamp_ in range(series.start, series.end, interval):
bucketInterval = int((timestamp_ - series.start) / interval)
bucket = buckets.get(bucketInterval, [])
if bucket:
if func == 'avg':
newValues.append( float(sum(bucket)) / float(len(bucket)) )
elif func == 'last':
newValues.append( bucket[len(bucket)-1] )
elif func == 'max':
newValues.append( max(bucket) )
elif func == 'min':
newValues.append( min(bucket) )
else:
newValues.append( sum(bucket) )
else:
newValues.append( None )
newName = "smartSummarize(%s, \"%s\", \"%s\")" % (series.name, intervalString, func)
alignedEnd = series.start + (bucketInterval * interval) + interval
newSeries = TimeSeries(newName, series.start, alignedEnd, interval, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def summarize(requestContext, seriesList, intervalString, func='sum', alignToFrom=False):
"""
Summarize the data into interval buckets of a certain size.
By default, the contents of each interval bucket are summed together. This is
useful for counters where each increment represents a discrete event and
retrieving a "per X" value requires summing all the events in that interval.
Specifying 'avg' instead will return the mean for each bucket, which can be more
useful when the value is a gauge that represents a certain value in time.
'max', 'min' or 'last' can also be specified.
By default, buckets are calculated by rounding to the nearest interval. This
works well for intervals smaller than a day. For example, 22:32 will end up
in the bucket 22:00-23:00 when the interval=1hour.
Passing alignToFrom=true will instead create buckets starting at the from
time. In this case, the bucket for 22:32 depends on the from time. If
from=6:30 then the 1hour bucket for 22:32 is 22:30-23:30.
Example:
.. code-block:: none
&target=summarize(counter.errors, "1hour") # total errors per hour
&target=summarize(nonNegativeDerivative(gauge.num_users), "1week") # new users per week
&target=summarize(queue.size, "1hour", "avg") # average queue size per hour
&target=summarize(queue.size, "1hour", "max") # maximum queue size during each hour
&target=summarize(metric, "13week", "avg", true)&from=midnight+20100101 # 2010 Q1-4
"""
results = []
delta = parseTimeOffset(intervalString)
interval = delta.seconds + (delta.days * 86400)
for series in seriesList:
buckets = {}
timestamps = range( int(series.start), int(series.end), int(series.step) )
datapoints = zip(timestamps, series)
for timestamp_, value in datapoints:
if alignToFrom:
bucketInterval = int((timestamp_ - series.start) / interval)
else:
bucketInterval = timestamp_ - (timestamp_ % interval)
if bucketInterval not in buckets:
buckets[bucketInterval] = []
if value is not None:
buckets[bucketInterval].append(value)
if alignToFrom:
newStart = series.start
newEnd = series.end
else:
newStart = series.start - (series.start % interval)
newEnd = series.end - (series.end % interval) + interval
newValues = []
for timestamp_ in range(newStart, newEnd, interval):
if alignToFrom:
newEnd = timestamp_
bucketInterval = int((timestamp_ - series.start) / interval)
else:
bucketInterval = timestamp_ - (timestamp_ % interval)
bucket = buckets.get(bucketInterval, [])
if bucket:
if func == 'avg':
newValues.append( float(sum(bucket)) / float(len(bucket)) )
elif func == 'last':
newValues.append( bucket[len(bucket)-1] )
elif func == 'max':
newValues.append( max(bucket) )
elif func == 'min':
newValues.append( min(bucket) )
else:
newValues.append( sum(bucket) )
else:
newValues.append( None )
if alignToFrom:
newEnd += interval
newName = "summarize(%s, \"%s\", \"%s\"%s)" % (series.name, intervalString, func, alignToFrom and ", true" or "")
newSeries = TimeSeries(newName, newStart, newEnd, interval, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def hitcount(requestContext, seriesList, intervalString, alignToInterval = False):
"""
Estimate hit counts from a list of time series.
This function assumes the values in each time series represent
hits per second. It calculates hits per some larger interval
such as per day or per hour. This function is like summarize(),
except that it compensates automatically for different time scales
(so that a similar graph results from using either fine-grained
or coarse-grained records) and handles rarely-occurring events
gracefully.
"""
results = []
delta = parseTimeOffset(intervalString)
interval = int(delta.seconds + (delta.days * 86400))
if alignToInterval:
requestContext = requestContext.copy()
s = requestContext['startTime']
if interval >= DAY:
requestContext['startTime'] = datetime(s.year, s.month, s.day)
elif interval >= HOUR:
requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour)
elif interval >= MINUTE:
requestContext['startTime'] = datetime(s.year, s.month, s.day, s.hour, s.minute)
for i,series in enumerate(seriesList):
newSeries = evaluateTarget(requestContext, series.pathExpression)[0]
intervalCount = int((series.end - series.start) / interval)
series[0:len(series)] = newSeries
series.start = newSeries.start
series.end = newSeries.start + (intervalCount * interval) + interval
series.step = newSeries.step
for series in seriesList:
length = len(series)
step = int(series.step)
bucket_count = int(math.ceil(float(series.end - series.start) / interval))
buckets = [[] for _ in range(bucket_count)]
newStart = int(series.end - bucket_count * interval)
for i, value in enumerate(series):
if value is None:
continue
start_time = int(series.start + i * step)
start_bucket, start_mod = divmod(start_time - newStart, interval)
end_time = start_time + step
end_bucket, end_mod = divmod(end_time - newStart, interval)
if end_bucket >= bucket_count:
end_bucket = bucket_count - 1
end_mod = interval
if start_bucket == end_bucket:
# All of the hits go to a single bucket.
if start_bucket >= 0:
buckets[start_bucket].append(value * (end_mod - start_mod))
else:
# Spread the hits among 2 or more buckets.
if start_bucket >= 0:
buckets[start_bucket].append(value * (interval - start_mod))
hits_per_bucket = value * interval
for j in range(start_bucket + 1, end_bucket):
buckets[j].append(hits_per_bucket)
if end_mod > 0:
buckets[end_bucket].append(value * end_mod)
newValues = []
for bucket in buckets:
if bucket:
newValues.append( sum(bucket) )
else:
newValues.append(None)
newName = 'hitcount(%s, "%s"%s)' % (series.name, intervalString, alignToInterval and ", true" or "")
newSeries = TimeSeries(newName, newStart, series.end, interval, newValues)
newSeries.pathExpression = newName
results.append(newSeries)
return results
def timeFunction(requestContext, name, step=60):
"""
Short Alias: time()
Just returns the timestamp for each X value. T
Example:
.. code-block:: none
&target=time("The.time.series")
This would create a series named "The.time.series" that contains in Y the same
value (in seconds) as X.
Accepts optional second argument as 'step' parameter (default step is 60 sec)
"""
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
while when < requestContext["endTime"]:
values.append(time.mktime(when.timetuple()))
when += delta
series = TimeSeries(name,
int(time.mktime(requestContext["startTime"].timetuple())),
int(time.mktime(requestContext["endTime"].timetuple())),
step, values)
series.pathExpression = name
return [series]
def sinFunction(requestContext, name, amplitude=1, step=60):
"""
Short Alias: sin()
Just returns the sine of the current time. The optional amplitude parameter
changes the amplitude of the wave.
Example:
.. code-block:: none
&target=sin("The.time.series", 2)
This would create a series named "The.time.series" that contains sin(x)*2.
Accepts optional second argument as 'amplitude' parameter (default amplitude is 1)
Accepts optional third argument as 'step' parameter (default step is 60 sec)
"""
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
while when < requestContext["endTime"]:
values.append(math.sin(time.mktime(when.timetuple()))*amplitude)
when += delta
return [TimeSeries(name,
int(epoch(requestContext["startTime"])),
int(epoch(requestContext["endTime"])),
step, values)]
def removeEmptySeries(requestContext, seriesList):
"""
Takes one metric or a wildcard seriesList.
Out of all metrics passed, draws only the metrics with not empty data
Example:
.. code-block:: none
&target=removeEmptySeries(server*.instance*.threads.busy)
Draws only live servers with not empty data.
"""
return [ series for series in seriesList if safeIsNotEmpty(series) ]
def randomWalkFunction(requestContext, name, step=60):
"""
Short Alias: randomWalk()
Returns a random walk starting at 0. This is great for testing when there is
no real data in whisper.
Example:
.. code-block:: none
&target=randomWalk("The.time.series")
This would create a series named "The.time.series" that contains points where
x(t) == x(t-1)+random()-0.5, and x(0) == 0.
Accepts optional second argument as 'step' parameter (default step is 60 sec)
"""
delta = timedelta(seconds=step)
when = requestContext["startTime"]
values = []
current = 0
while when < requestContext["endTime"]:
values.append(current)
current += random.random() - 0.5
when += delta
return [TimeSeries(name,
int(epoch(requestContext["startTime"])),
int(epoch(requestContext["endTime"])),
step, values)]
def events(requestContext, *tags):
"""
Returns the number of events at this point in time. Usable with
drawAsInfinite.
Example:
.. code-block:: none
&target=events("tag-one", "tag-two")
&target=events("*")
Returns all events tagged as "tag-one" and "tag-two" and the second one
returns all events.
"""
step = 1
name = "events(" + ", ".join(tags) + ")"
if tags == ("*",):
tags = None
start_timestamp = epoch(requestContext["startTime"])
start_timestamp = start_timestamp - start_timestamp % step
end_timestamp = epoch(requestContext["endTime"])
end_timestamp = end_timestamp - end_timestamp % step
points = (end_timestamp - start_timestamp)/step
events = models.Event.find_events(datetime.fromtimestamp(start_timestamp),
datetime.fromtimestamp(end_timestamp),
tags=tags)
values = [None] * points
for event in events:
event_timestamp = epoch(event.when)
value_offset = (event_timestamp - start_timestamp)/step
if values[value_offset] is None:
values[value_offset] = 1
else:
values[value_offset] += 1
result_series = TimeSeries(name, start_timestamp, end_timestamp, step, values, 'sum')
result_series.pathExpression = name
return [result_series]
def pieAverage(requestContext, series):
return safeDiv(safeSum(series),safeLen(series))
def pieMaximum(requestContext, series):
return max(series)
def pieMinimum(requestContext, series):
return min(series)
PieFunctions = {
'average': pieAverage,
'maximum': pieMaximum,
'minimum': pieMinimum,
}
SeriesFunctions = {
# Combine functions
'sumSeries': sumSeries,
'sum': sumSeries,
'multiplySeries': multiplySeries,
'averageSeries': averageSeries,
'stddevSeries': stddevSeries,
'avg': averageSeries,
'sumSeriesWithWildcards': sumSeriesWithWildcards,
'averageSeriesWithWildcards': averageSeriesWithWildcards,
'multiplySeriesWithWildcards': multiplySeriesWithWildcards,
'minSeries': minSeries,
'maxSeries': maxSeries,
'rangeOfSeries': rangeOfSeries,
'percentileOfSeries': percentileOfSeries,
'countSeries': countSeries,
'weightedAverage': weightedAverage,
# Transform functions
'scale': scale,
'scaleToSeconds': scaleToSeconds,
'offset': offset,
'offsetToZero': offsetToZero,
'derivative': derivative,
'delay': delay,
'squareRoot': squareRoot,
'pow': pow,
'perSecond': perSecond,
'integral': integral,
'integralByInterval' : integralByInterval,
'nonNegativeDerivative': nonNegativeDerivative,
'log': logarithm,
'invert': invert,
'timeStack': timeStack,
'timeShift': timeShift,
'timeSlice': timeSlice,
'summarize': summarize,
'smartSummarize': smartSummarize,
'hitcount': hitcount,
'absolute': absolute,
'interpolate': interpolate,
# Calculate functions
'movingAverage': movingAverage,
'movingMedian': movingMedian,
'stdev': stdev,
'holtWintersForecast': holtWintersForecast,
'holtWintersConfidenceBands': holtWintersConfidenceBands,
'holtWintersConfidenceArea': holtWintersConfidenceArea,
'holtWintersAberration': holtWintersAberration,
'linearRegression': linearRegression,
'asPercent': asPercent,
'pct': asPercent,
'diffSeries': diffSeries,
'divideSeries': divideSeries,
# Series Filter functions
'fallbackSeries': fallbackSeries,
'mostDeviant': mostDeviant,
'highestCurrent': highestCurrent,
'lowestCurrent': lowestCurrent,
'highestMax': highestMax,
'currentAbove': currentAbove,
'currentBelow': currentBelow,
'highestAverage': highestAverage,
'lowestAverage': lowestAverage,
'averageAbove': averageAbove,
'averageBelow': averageBelow,
'maximumAbove': maximumAbove,
'minimumAbove': minimumAbove,
'maximumBelow': maximumBelow,
'minimumBelow': minimumBelow,
'nPercentile': nPercentile,
'limit': limit,
'sortByTotal': sortByTotal,
'sortByName': sortByName,
'averageOutsidePercentile': averageOutsidePercentile,
'removeBetweenPercentile': removeBetweenPercentile,
'sortByMaxima': sortByMaxima,
'sortByMinima': sortByMinima,
'useSeriesAbove': useSeriesAbove,
'exclude': exclude,
'grep': grep,
'removeEmptySeries': removeEmptySeries,
# Data Filter functions
'removeAbovePercentile': removeAbovePercentile,
'removeAboveValue': removeAboveValue,
'removeBelowPercentile': removeBelowPercentile,
'removeBelowValue': removeBelowValue,
# Special functions
'legendValue': legendValue,
'alias': alias,
'aliasSub': aliasSub,
'aliasByNode': aliasByNode,
'aliasByMetric': aliasByMetric,
'cactiStyle': cactiStyle,
'color': color,
'alpha': alpha,
'cumulative': cumulative,
'consolidateBy': consolidateBy,
'keepLastValue': keepLastValue,
'changed': changed,
'drawAsInfinite': drawAsInfinite,
'secondYAxis': secondYAxis,
'lineWidth': lineWidth,
'dashed': dashed,
'substr': substr,
'group': group,
'map': mapSeries,
'mapSeries': mapSeries,
'reduce': reduceSeries,
'reduceSeries': reduceSeries,
'applyByNode': applyByNode,
'groupByNode': groupByNode,
'groupByNodes' : groupByNodes,
'constantLine': constantLine,
'stacked': stacked,
'areaBetween': areaBetween,
'threshold': threshold,
'transformNull': transformNull,
'isNonNull': isNonNull,
'threshold' : threshold,
'verticalLine' : verticalLine,
'identity': identity,
'aggregateLine': aggregateLine,
# test functions
'time': timeFunction,
"sin": sinFunction,
"randomWalk": randomWalkFunction,
'timeFunction': timeFunction,
"sinFunction": sinFunction,
"randomWalkFunction": randomWalkFunction,
# events
'events': events,
}
# Avoid import circularity
if not environ.get('READTHEDOCS'):
from graphite.render.evaluator import evaluateTarget, evaluateTokens
| apache-2.0 | -7,750,885,775,172,380,000 | 31.096373 | 245 | 0.697333 | false |
google-code/android-scripting | python/src/Demo/tix/samples/BtnBox.py | 37 | 1612 | # -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
#
# $Id: BtnBox.py 36560 2004-07-18 06:16:08Z tim_one $
#
# Tix Demostration Program
#
# This sample program is structured in such a way so that it can be
# executed from the Tix demo program "tixwidgets.py": it must have a
# procedure called "RunSample". It should also have the "if" statment
# at the end of this file so that it can be run as a standalone
# program.
# This file demonstrates the use of the tixButtonBox widget, which is a
# group of TK buttons. You can use it to manage the buttons in a dialog box,
# for example.
#
import Tix
def RunSample(w):
# Create the label on the top of the dialog box
#
top = Tix.Label(w, padx=20, pady=10, bd=1, relief=Tix.RAISED,
anchor=Tix.CENTER, text='This dialog box is\n a demonstration of the\n tixButtonBox widget')
# Create the button box and add a few buttons in it. Set the
# -width of all the buttons to the same value so that they
# appear in the same size.
#
# Note that the -text, -underline, -command and -width options are all
# standard options of the button widgets.
#
box = Tix.ButtonBox(w, orientation=Tix.HORIZONTAL)
box.add('ok', text='OK', underline=0, width=5,
command=lambda w=w: w.destroy())
box.add('close', text='Cancel', underline=0, width=5,
command=lambda w=w: w.destroy())
box.pack(side=Tix.BOTTOM, fill=Tix.X)
top.pack(side=Tix.TOP, fill=Tix.BOTH, expand=1)
if __name__ == '__main__':
root = Tix.Tk()
RunSample(root)
root.mainloop()
| apache-2.0 | 4,084,287,208,904,159,000 | 35.636364 | 112 | 0.663151 | false |
girving/tensorflow | tensorflow/python/training/slot_creator.py | 12 | 7957 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Standard functions for creating slots.
A slot is a `Variable` created with the same shape as a primary variable or
`Tensor`. A slot is always scoped in the namespace of the primary object and
typically has the same device and type.
Slots are typically used as accumulators to track values associated with
the primary object:
```python
# Optimizers can create a slot for each variable to track accumulators
accumulators = {var : create_zeros_slot(var, "momentum") for var in vs}
for var in vs:
apply_momentum(var, accumulators[var], lr, grad, momentum_tensor)
# Slots can also be used for moving averages
mavg = create_slot(var, var.initialized_value(), "exponential_moving_avg")
update_mavg = mavg.assign_sub((mavg - var) * (1 - decay))
```
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribution_strategy_context
def _create_slot_var(primary, val, scope, validate_shape, shape, dtype):
"""Helper function for creating a slot variable."""
# TODO(lukaszkaiser): Consider allowing partitioners to be set in the current
# scope.
current_partitioner = variable_scope.get_variable_scope().partitioner
variable_scope.get_variable_scope().set_partitioner(None)
# When init from val instead of callable initializer, the shape is expected to
# be None, not <unknown> or any fully defined shape.
shape = shape if callable(val) else None
slot = variable_scope.get_variable(
scope, initializer=val, trainable=False,
use_resource=resource_variable_ops.is_resource_variable(primary),
shape=shape, dtype=dtype,
validate_shape=validate_shape)
variable_scope.get_variable_scope().set_partitioner(current_partitioner)
# pylint: disable=protected-access
if isinstance(primary, variables.Variable) and primary._save_slice_info:
# Primary is a partitioned variable, so we need to also indicate that
# the slot is a partitioned variable. Slots have the same partitioning
# as their primaries.
# For examples when using AdamOptimizer in linear model, slot.name
# here can be "linear//weights/Adam:0", while primary.op.name is
# "linear//weight". We want to get 'Adam' as real_slot_name, so we
# remove "'linear//weight' + '/'" and ':0'.
real_slot_name = slot.name[len(primary.op.name + "/"):-2]
slice_info = primary._save_slice_info
slot._set_save_slice_info(variables.Variable.SaveSliceInfo(
slice_info.full_name + "/" + real_slot_name,
slice_info.full_shape[:],
slice_info.var_offset[:],
slice_info.var_shape[:]))
# pylint: enable=protected-access
return slot
def create_slot(primary, val, name, colocate_with_primary=True):
"""Create a slot initialized to the given value.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
val: A `Tensor` specifying the initial value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = val.get_shape().is_fully_defined()
if context.executing_eagerly():
prefix = primary._shared_name # pylint: disable=protected-access
else:
prefix = primary.op.name
with variable_scope.variable_scope(None, prefix + "/" + name):
if colocate_with_primary:
distribution_strategy = (
distribution_strategy_context.get_distribution_strategy())
with distribution_strategy.colocate_vars_with(primary):
return _create_slot_var(primary, val, "", validate_shape, None, None)
else:
return _create_slot_var(primary, val, "", validate_shape, None, None)
def create_slot_with_initializer(primary, initializer, shape, dtype, name,
colocate_with_primary=True):
"""Creates a slot initialized using an `Initializer`.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = shape.is_fully_defined()
if context.executing_eagerly():
prefix = primary._shared_name # pylint: disable=protected-access
else:
prefix = primary.op.name
with variable_scope.variable_scope(None, prefix + "/" + name):
if colocate_with_primary:
distribution_strategy = (
distribution_strategy_context.get_distribution_strategy())
with distribution_strategy.colocate_vars_with(primary):
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
else:
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Tensor`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
slot_shape = primary.get_shape()
if slot_shape.is_fully_defined():
initializer = init_ops.zeros_initializer(dtype)
return create_slot_with_initializer(
primary, initializer, slot_shape, dtype, name,
colocate_with_primary=colocate_with_primary)
else:
if isinstance(primary, variables.Variable):
slot_shape = array_ops.shape(primary.initialized_value())
else:
slot_shape = array_ops.shape(primary)
val = array_ops.zeros(slot_shape, dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
| apache-2.0 | 205,240,561,059,555,040 | 40.659686 | 80 | 0.700892 | false |
resmo/ansible | lib/ansible/modules/network/fortios/fortios_endpoint_control_settings.py | 13 | 13991 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_endpoint_control_settings
short_description: Configure endpoint control settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify endpoint_control feature and settings category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
endpoint_control_settings:
description:
- Configure endpoint control settings.
default: null
type: dict
suboptions:
download_custom_link:
description:
- Customized URL for downloading FortiClient.
type: str
download_location:
description:
- FortiClient download location (FortiGuard or custom).
type: str
choices:
- fortiguard
- custom
forticlient_avdb_update_interval:
description:
- Period of time between FortiClient AntiVirus database updates (0 - 24 hours).
type: int
forticlient_dereg_unsupported_client:
description:
- Enable/disable deregistering unsupported FortiClient endpoints.
type: str
choices:
- enable
- disable
forticlient_ems_rest_api_call_timeout:
description:
- FortiClient EMS call timeout in milliseconds (500 - 30000 milliseconds).
type: int
forticlient_keepalive_interval:
description:
- Interval between two KeepAlive messages from FortiClient (20 - 300 sec).
type: int
forticlient_offline_grace:
description:
- Enable/disable grace period for offline registered clients.
type: str
choices:
- enable
- disable
forticlient_offline_grace_interval:
description:
- Grace period for offline registered FortiClient (60 - 600 sec).
type: int
forticlient_reg_key:
description:
- FortiClient registration key.
type: str
forticlient_reg_key_enforce:
description:
- Enable/disable requiring or enforcing FortiClient registration keys.
type: str
choices:
- enable
- disable
forticlient_reg_timeout:
description:
- FortiClient registration license timeout (days, min = 1, max = 180, 0 means unlimited).
type: int
forticlient_sys_update_interval:
description:
- Interval between two system update messages from FortiClient (30 - 1440 min).
type: int
forticlient_user_avatar:
description:
- Enable/disable uploading FortiClient user avatars.
type: str
choices:
- enable
- disable
forticlient_warning_interval:
description:
- Period of time between FortiClient portal warnings (0 - 24 hours).
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure endpoint control settings.
fortios_endpoint_control_settings:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
endpoint_control_settings:
download_custom_link: "<your_own_value>"
download_location: "fortiguard"
forticlient_avdb_update_interval: "5"
forticlient_dereg_unsupported_client: "enable"
forticlient_ems_rest_api_call_timeout: "7"
forticlient_keepalive_interval: "8"
forticlient_offline_grace: "enable"
forticlient_offline_grace_interval: "10"
forticlient_reg_key: "<your_own_value>"
forticlient_reg_key_enforce: "enable"
forticlient_reg_timeout: "13"
forticlient_sys_update_interval: "14"
forticlient_user_avatar: "enable"
forticlient_warning_interval: "16"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_endpoint_control_settings_data(json):
option_list = ['download_custom_link', 'download_location', 'forticlient_avdb_update_interval',
'forticlient_dereg_unsupported_client', 'forticlient_ems_rest_api_call_timeout', 'forticlient_keepalive_interval',
'forticlient_offline_grace', 'forticlient_offline_grace_interval', 'forticlient_reg_key',
'forticlient_reg_key_enforce', 'forticlient_reg_timeout', 'forticlient_sys_update_interval',
'forticlient_user_avatar', 'forticlient_warning_interval']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def endpoint_control_settings(data, fos):
vdom = data['vdom']
endpoint_control_settings_data = data['endpoint_control_settings']
filtered_data = underscore_to_hyphen(filter_endpoint_control_settings_data(endpoint_control_settings_data))
return fos.set('endpoint-control',
'settings',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_endpoint_control(data, fos):
if data['endpoint_control_settings']:
resp = endpoint_control_settings(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"endpoint_control_settings": {
"required": False, "type": "dict", "default": None,
"options": {
"download_custom_link": {"required": False, "type": "str"},
"download_location": {"required": False, "type": "str",
"choices": ["fortiguard", "custom"]},
"forticlient_avdb_update_interval": {"required": False, "type": "int"},
"forticlient_dereg_unsupported_client": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"forticlient_ems_rest_api_call_timeout": {"required": False, "type": "int"},
"forticlient_keepalive_interval": {"required": False, "type": "int"},
"forticlient_offline_grace": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"forticlient_offline_grace_interval": {"required": False, "type": "int"},
"forticlient_reg_key": {"required": False, "type": "str"},
"forticlient_reg_key_enforce": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"forticlient_reg_timeout": {"required": False, "type": "int"},
"forticlient_sys_update_interval": {"required": False, "type": "int"},
"forticlient_user_avatar": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"forticlient_warning_interval": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_endpoint_control(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_endpoint_control(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 9,029,896,582,002,123,000 | 34.691327 | 133 | 0.588092 | false |
davidecaminati/Domotics-Raspberry | Hardware/Socket_to_MCP27013_con_i2c/utility/i2c_inputs_address_24.py | 1 | 1049 | import smbus
import time
#bus = smbus.SMBus(0) # Rev 1 Pi uses 0
bus = smbus.SMBus(1) # Rev 2 Pi uses 1
DEVICE = 0x24 # Device address (A0-A2)
IODIRA = 0x00 # Pin direction register
IODIRB = 0x01 # Pin direction register
OLATA = 0x14 # Register for outputs
OLATB = 0x15 # Register for outputs
GPIOA = 0x12 # Register for inputs
GPIOB = 0x13 # Register for inputs
# Set first 6 GPA pins as outputs and
# last one as input.
bus.write_byte_data(DEVICE,IODIRB,0x80)
bus.write_byte_data(DEVICE,IODIRA,0x00)
#time.sleep(1)
bus.write_byte_data(DEVICE,OLATA,255)
stato = 0
# Loop until user presses CTRL-C
while True:
# Read state of GPIOA register
MySwitch = bus.read_byte_data(DEVICE,GPIOB)
#print MySwitch
if MySwitch == 0b10000000 & stato == 0:
#print "Switch was pressed! 8"
stato = 1
bus.write_byte_data(DEVICE,OLATA,0)
#stato = 0
time.sleep(1)
bus.write_byte_data(DEVICE,OLATA,255)
#stato = 0
#if MySwitch == 0b01000000:
#print "Switch was pressed! 7"
# bus.write_byte_data(DEVICE,OLATA,0)
| lgpl-3.0 | 2,571,518,470,000,461,000 | 24.585366 | 45 | 0.691134 | false |
titilambert/alignak | test/test_dependencies.py | 2 | 14216 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Hartmut Goebel, [email protected]
# Grégory Starck, [email protected]
# Sebastien Coavoux, [email protected]
# Jean Gabes, [email protected]
# Zoran Zaric, [email protected]
# Gerhard Lausser, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test host- and service-downtimes.
#
from alignak_test import *
sys.setcheckinterval(10000)
class TestConfig(AlignakTest):
def setUp(self):
self.setup_with_file(['etc/alignak_dependencies.cfg'])
def test_service_dependencies(self):
self.print_header()
now = time.time()
test_host_0 = self.sched.hosts.find_by_name("test_host_0")
test_host_1 = self.sched.hosts.find_by_name("test_host_1")
test_host_0.checks_in_progress = []
test_host_1.checks_in_progress = []
test_host_0.act_depend_of = [] # ignore the router
test_host_1.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore other routers
test_host_0_test_ok_0 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
test_host_0_test_ok_1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_1")
test_host_1_test_ok_0 = self.sched.services.find_srv_by_name_and_hostname("test_host_1", "test_ok_0")
test_host_1_test_ok_1 = self.sched.services.find_srv_by_name_and_hostname("test_host_1", "test_ok_1")
# the most important: test_ok_0 is in the chk_depend_of-list of test_ok_1
self.assertIn(test_host_0_test_ok_0, [x[0] for x in test_host_0_test_ok_1.chk_depend_of])
self.assertIn(test_host_1_test_ok_0, [x[0] for x in test_host_1_test_ok_1.chk_depend_of])
# and not vice versa
self.assertNotIn(test_host_0_test_ok_1, [x[0] for x in test_host_0_test_ok_0.chk_depend_of])
self.assertNotIn(test_host_1_test_ok_1, [x[0] for x in test_host_1_test_ok_0.chk_depend_of])
# test_ok_0 is also in the act_depend_of-list of test_ok_1
self.assertIn(test_host_0_test_ok_0, [x[0] for x in test_host_0_test_ok_1.chk_depend_of])
self.assertIn(test_host_1_test_ok_0, [x[0] for x in test_host_1_test_ok_1.chk_depend_of])
# check the criteria
# execution_failure_criteria u,c
# notification_failure_criteria u,c,w
self.assertEqual([x[1] for x in test_host_0_test_ok_1.chk_depend_of if x[0] is test_host_0_test_ok_0], [['u', 'c']] )
self.assertEqual([x[1] for x in test_host_1_test_ok_1.chk_depend_of if x[0] is test_host_1_test_ok_0], [['u', 'c']] )
self.assertEqual([x[1] for x in test_host_0_test_ok_1.act_depend_of if x[0] is test_host_0_test_ok_0], [['u', 'c', 'w']] )
self.assertEqual([x[1] for x in test_host_1_test_ok_1.act_depend_of if x[0] is test_host_1_test_ok_0], [['u', 'c', 'w']] )
# and every service has the host in it's act_depend_of-list
self.assertIn(test_host_0, [x[0] for x in test_host_0_test_ok_0.act_depend_of])
self.assertIn(test_host_0, [x[0] for x in test_host_0_test_ok_1.act_depend_of])
self.assertIn(test_host_1, [x[0] for x in test_host_1_test_ok_0.act_depend_of])
self.assertIn(test_host_1, [x[0] for x in test_host_1_test_ok_1.act_depend_of])
# and final count the masters
self.assertEqual(0, len(test_host_0_test_ok_0.chk_depend_of))
self.assertEqual(1, len(test_host_0_test_ok_1.chk_depend_of))
self.assertEqual(0, len(test_host_1_test_ok_0.chk_depend_of))
self.assertEqual(1, len(test_host_1_test_ok_1.chk_depend_of))
self.assertEqual(1, len(test_host_0_test_ok_0.act_depend_of)) # same, plus the host
self.assertEqual(2, len(test_host_0_test_ok_1.act_depend_of))
self.assertEqual(1, len(test_host_1_test_ok_0.act_depend_of))
self.assertEqual(2, len(test_host_1_test_ok_1.act_depend_of))
def test_host_dependencies(self):
self.print_header()
now = time.time()
#
# A <------ B <--
# ^ \--- C
# |---------------------
#
host_A = self.sched.hosts.find_by_name("test_host_A")
host_B = self.sched.hosts.find_by_name("test_host_B")
host_C = self.sched.hosts.find_by_name("test_host_C")
host_D = self.sched.hosts.find_by_name("test_host_D")
# the most important: test_ok_0 is in the chk_depend_of-list of test_ok_1
#self.assertTrue(host_A in [x[0] for x in host_C.chk_depend_of])
print host_C.act_depend_of
print host_C.chk_depend_of
print host_C.chk_depend_of_me
self.assertIn(host_B, [x[0] for x in host_C.act_depend_of])
self.assertIn(host_A, [x[0] for x in host_C.act_depend_of])
self.assertIn(host_A, [x[0] for x in host_B.act_depend_of])
self.assertEqual([], host_A.act_depend_of)
self.assertIn(host_B, [x[0] for x in host_C.chk_depend_of])
self.assertIn(host_A, [x[0] for x in host_C.chk_depend_of])
self.assertIn(host_A, [x[0] for x in host_B.chk_depend_of])
self.assertEqual([], host_A.act_depend_of)
self.assertIn(host_B, [x[0] for x in host_A.act_depend_of_me])
self.assertIn(host_C, [x[0] for x in host_A.act_depend_of_me])
self.assertIn(host_C, [x[0] for x in host_B.act_depend_of_me])
#self.assertEqual([], host_C.act_depend_of_me) # D in here
self.assertIn(host_B, [x[0] for x in host_A.chk_depend_of_me])
self.assertIn(host_C, [x[0] for x in host_A.chk_depend_of_me])
self.assertIn(host_C, [x[0] for x in host_B.chk_depend_of_me])
self.assertIn(host_D, [x[0] for x in host_C.chk_depend_of_me])
# check the notification/execution criteria
self.assertEqual([['d', 'u']], [x[1] for x in host_C.act_depend_of if x[0] is host_B])
self.assertEqual([['d']], [x[1] for x in host_C.chk_depend_of if x[0] is host_B])
self.assertEqual([['d', 'u']], [x[1] for x in host_C.act_depend_of if x[0] is host_A])
self.assertEqual([['d']], [x[1] for x in host_C.chk_depend_of if x[0] is host_A])
self.assertEqual([['d', 'u']], [x[1] for x in host_B.act_depend_of if x[0] is host_A])
self.assertEqual([['n']], [x[1] for x in host_B.chk_depend_of if x[0] is host_A])
def test_host_inherits_dependencies(self):
self.print_header()
now = time.time()
#
# A <------ B <--
# ^ \--- C <-- D
# |---------------------
#
host_A = self.sched.hosts.find_by_name("test_host_A")
host_B = self.sched.hosts.find_by_name("test_host_B")
host_C = self.sched.hosts.find_by_name("test_host_C")
host_D = self.sched.hosts.find_by_name("test_host_D")
print "A depends on", ",".join([x[0].get_name() for x in host_A.chk_depend_of])
print "B depends on", ",".join([x[0].get_name() for x in host_B.chk_depend_of])
print "C depends on", ",".join([x[0].get_name() for x in host_C.chk_depend_of])
print "D depends on", ",".join([x[0].get_name() for x in host_D.chk_depend_of])
self.assertEqual([], host_A.act_depend_of)
self.assertIn(host_A, [x[0] for x in host_B.act_depend_of])
self.assertIn(host_A, [x[0] for x in host_C.act_depend_of])
self.assertIn(host_B, [x[0] for x in host_C.act_depend_of])
self.assertIn(host_C, [x[0] for x in host_D.act_depend_of])
# and through inherits_parent....
#self.assertTrue(host_A in [x[0] for x in host_D.act_depend_of])
#self.assertTrue(host_B in [x[0] for x in host_D.act_depend_of])
# Now test a in service service_dep definition. More easierto use than create a full new object
def test_in_servicedef_dep(self):
svc_parent = self.sched.services.find_srv_by_name_and_hostname("test_host_1", "test_parent_svc")
svc_son = self.sched.services.find_srv_by_name_and_hostname("test_host_1", "test_son_svc")
print "DumP", self.conf.servicedependencies
# the most important: test_parent is in the chk_depend_of-list of test_son
print "Dep: ", svc_son.act_depend_of
self.assertEqual([x[1] for x in svc_son.act_depend_of if x[0] is svc_parent], [['u', 'c', 'w']] )
def test_host_non_inherits_dependencies(self):
#
# A <------ B <--
# ^ \NOT/--- C <-- D
# |---------------------
#
host_A = self.sched.hosts.find_by_name("test_host_A")
host_B = self.sched.hosts.find_by_name("test_host_B")
host_C = self.sched.hosts.find_by_name("test_host_C")
host_D = self.sched.hosts.find_by_name("test_host_D")
host_E = self.sched.hosts.find_by_name("test_host_E")
print "A depends on", ",".join([x[0].get_name() for x in host_A.chk_depend_of])
print "B depends on", ",".join([x[0].get_name() for x in host_B.chk_depend_of])
print "C depends on", ",".join([x[0].get_name() for x in host_C.chk_depend_of])
print "D depends on", ",".join([x[0].get_name() for x in host_D.chk_depend_of])
print "E depends on", ",".join([x[0].get_name() for x in host_E.chk_depend_of])
host_C.state = 'DOWN'
print "D state", host_D.state
print "E dep", host_E.chk_depend_of
print "I raise?", host_D.do_i_raise_dependency('d', inherit_parents=False)
# If I ask D for dep, he should raise Nothing if we do not want parents.
self.assertFalse(host_D.do_i_raise_dependency('d', inherit_parents=False) )
# But he should raise a problem (C here) of we ask for its parents
self.assertTrue(host_D.do_i_raise_dependency('d', inherit_parents=True) )
def test_check_dependencies(self):
self.print_header()
now = time.time()
test_host_0 = self.sched.hosts.find_by_name("test_host_0")
test_host_0.checks_in_progress = []
test_host_0.act_depend_of = [] # ignore the router
test_host_0_test_ok_0 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
# The pending state is always different. Let assume it OK
test_host_0.state = 'OK'
# Create a fake check already done for service
cs = Check('waitconsume', 'foo', test_host_0_test_ok_0, now)
cs.exit_status = 2
cs.output = 'BAD'
cs.check_time = now
cs.execution_time = now
# Create a fake check for the host (so that it is in checking)
ch = Check('scheduled', 'foo', test_host_0, now)
test_host_0.checks_in_progress.append(ch)
# This service should have his host dep
self.assertNotEqual(0, len(test_host_0_test_ok_0.act_depend_of))
# Ok we are at attempt 0 (we should have a 1 with the OK state, but nervermind)
self.assertEqual(0, test_host_0.attempt)
# Add the check to sched queue
self.sched.add(cs)
self.sched.add(ch)
# This should raise a log entry and schedule the host check now
self.sched.consume_results()
# Take the host check. The one generated by dependency not the usual one
c_dep = test_host_0.actions[1]
self.assertTrue(bool(c_dep.dependency_check))
# Hack it to consider it as down and returning critical state
c_dep.status = 'waitconsume'
c_dep.exit_status = 2
c_dep.output = 'BAD'
c_dep.check_time = now
c_dep.execution_time = now
# Add and process result
self.sched.add(c_dep)
self.sched.consume_results()
# We should not have a new attempt as it was a depency check.
self.assertEqual(0, test_host_0.attempt)
def test_disabled_host_service_dependencies(self):
self.print_header()
now = time.time()
test_host_0 = self.sched.hosts.find_by_name("test_host_0")
test_host_0.checks_in_progress = []
test_host_0.act_depend_of = [] # ignore the router
test_host_0_test_ok_0_d = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0_disbld_hst_dep")
self.assertEqual(0, len(test_host_0_test_ok_0_d.act_depend_of))
self.assertNotIn(test_host_0_test_ok_0_d, [x[0] for x in test_host_0.act_depend_of_me])
if __name__ == '__main__':
import cProfile
command = """unittest.main()"""
unittest.main()
#cProfile.runctx( command, globals(), locals(), filename="Thruk.profile" )
| agpl-3.0 | -3,730,806,960,310,747,600 | 47.186441 | 130 | 0.615758 | false |
Anonymouslemming/ansible | lib/ansible/modules/windows/win_template.py | 21 | 4837 | # this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_template
version_added: "1.9.2"
short_description: Templates a file out to a remote server.
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates: C(ansible_managed)
(configurable via the C(defaults) section of C(ansible.cfg)) contains a string
which can be used to describe the template name, host, modification time of the
template file and the owner uid, C(template_host) contains the node name of
the template's machine, C(template_uid) the owner, C(template_path) the
absolute path of the template, C(template_fullpath) is the absolute path of the
template, and C(template_run_date) is the date that the template was rendered. Note that including
a string that uses a date in the template will result in the template being marked 'changed'
each time."
options:
src:
description:
- Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path.
required: true
dest:
description:
- Location to render the template to on the remote machine.
required: true
newline_sequence:
description:
- Specify the newline sequence to use for templating files.
choices: [ '\n', '\r', '\r\n' ]
default: '\r\n'
version_added: '2.4'
block_start_string:
description:
- The string marking the beginning of a block.
default: '{%'
version_added: '2.4'
block_end_string:
description:
- The string marking the end of a block.
default: '%}'
version_added: '2.4'
variable_start_string:
description:
- The string marking the beginning of a print statement.
default: '{{'
version_added: '2.4'
variable_end_string:
description:
- The string marking the end of a print statement.
default: '}}'
version_added: '2.4'
trim_blocks:
description:
- If this is set to True the first newline after a block is removed (block, not variable tag!).
default: "no"
version_added: '2.4'
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
choices: [ "yes", "no" ]
default: "yes"
version_added: '2.4'
notes:
- For other platforms you can use M(template) which uses '\n' as C(newline_sequence).
- Templates are loaded with C(trim_blocks=True).
- Beware fetching files from windows machines when creating templates
because certain tools, such as Powershell ISE, and regedit's export facility
add a Byte Order Mark as the first character of the file, which can cause tracebacks.
- To find Byte Order Marks in files, use C(Format-Hex <file> -Count 16) on Windows, and use C(od -a -t x1 -N 16 <file>) on Linux.
- "Also, you can override jinja2 settings by adding a special header to template file.
i.e. C(#jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: False)
which changes the variable interpolation markers to [% var %] instead of {{ var }}.
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated."
author: "Jon Hawkesworth (@jhawkesworth)"
'''
EXAMPLES = r'''
- name: Create a file from a Jinja2 template
win_template:
src: /mytemplates/file.conf.j2
dest: C:\temp\file.conf
- name: Create a Unix-style file from a Jinja2 template
win_template:
src: unix/config.conf.j2
dest: C:\share\unix\config.conf
newline_sequence: '\n'
'''
| gpl-3.0 | -7,437,082,120,364,231,000 | 40.698276 | 131 | 0.692578 | false |
xcyan/models | object_detection/matchers/bipartite_matcher.py | 21 | 2176 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bipartite matcher implementation."""
import tensorflow as tf
from tensorflow.contrib.image.python.ops import image_ops
from object_detection.core import matcher
class GreedyBipartiteMatcher(matcher.Matcher):
"""Wraps a Tensorflow greedy bipartite matcher."""
def _match(self, similarity_matrix, num_valid_rows=-1):
"""Bipartite matches a collection rows and columns. A greedy bi-partite.
TODO: Add num_valid_columns options to match only that many columns with
all the rows.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher values mean more similar.
num_valid_rows: A scalar or a 1-D tensor with one element describing the
number of valid rows of similarity_matrix to consider for the bipartite
matching. If set to be negative, then all rows from similarity_matrix
are used.
Returns:
match_results: int32 tensor of shape [M] with match_results[i]=-1
meaning that column i is not matched and otherwise that it is matched to
row match_results[i].
"""
# Convert similarity matrix to distance matrix as tf.image.bipartite tries
# to find minimum distance matches.
distance_matrix = -1 * similarity_matrix
_, match_results = image_ops.bipartite_match(
distance_matrix, num_valid_rows)
match_results = tf.reshape(match_results, [-1])
match_results = tf.cast(match_results, tf.int32)
return match_results
| apache-2.0 | 561,302,654,544,859,260 | 40.056604 | 80 | 0.699449 | false |
unreal666/outwiker | src/test/actions/test_htmlactions.py | 2 | 7738 | # -*- coding: utf-8 -*-
import unittest
from outwiker.pages.html.htmlpage import HtmlPageFactory
from test.basetestcases import BaseOutWikerGUIMixin
from outwiker.actions.polyactionsid import *
class HtmlActionsTest(unittest.TestCase, BaseOutWikerGUIMixin):
"""
Тесты действий для HTML-страницы
"""
def setUp(self):
self.initApplication()
self.wikiroot = self.createWiki()
self._turnSyntax = [
(BOLD_STR_ID, "<b>", "</b>"),
(ITALIC_STR_ID, "<i>", "</i>"),
(BOLD_ITALIC_STR_ID, "<b><i>", "</i></b>"),
(UNDERLINE_STR_ID, "<u>", "</u>"),
(STRIKE_STR_ID, "<strike>", "</strike>"),
(SUBSCRIPT_STR_ID, "<sub>", "</sub>"),
(SUPERSCRIPT_STR_ID, "<sup>", "</sup>"),
(ALIGN_LEFT_STR_ID, '<div align="left">', '</div>'),
(ALIGN_CENTER_STR_ID, '<div align="center">', '</div>'),
(ALIGN_RIGHT_STR_ID, '<div align="right">', '</div>'),
(ALIGN_JUSTIFY_STR_ID, '<div align="justify">', '</div>'),
(HEADING_1_STR_ID, "<h1>", "</h1>"),
(HEADING_2_STR_ID, "<h2>", "</h2>"),
(HEADING_3_STR_ID, "<h3>", "</h3>"),
(HEADING_4_STR_ID, "<h4>", "</h4>"),
(HEADING_5_STR_ID, "<h5>", "</h5>"),
(HEADING_6_STR_ID, "<h6>", "</h6>"),
(PREFORMAT_STR_ID, "<pre>", "</pre>"),
(CODE_STR_ID, "<code>", "</code>"),
(ANCHOR_STR_ID, '<a name="', '"></a>'),
(QUOTE_STR_ID, '<blockquote>', '</blockquote>'),
(IMAGE_STR_ID, '<img src="', '"/>'),
]
self._replaceSyntax = [
(HORLINE_STR_ID, "<hr>"),
]
HtmlPageFactory().create(self.wikiroot, "HTML-страница", [])
HtmlPageFactory().create(self.wikiroot, "temp", [])
# Страница, куда будем переключаться перед изменением содержимого основной страницы
# Можно было бы вместо temppage использовать None, но тогда программе
# пришлось бы каждый раз удалять и создавать панели инструментов, что медленно
self.temppage = self.wikiroot["temp"]
self.testpage = self.wikiroot["HTML-страница"]
self.application.wikiroot = self.wikiroot
self.application.selectedPage = self.testpage
def tearDown(self):
self.destroyApplication()
self.destroyWiki(self.wikiroot)
def _getEditor(self):
return self.application.mainWindow.pagePanel.pageView.codeEditor
def testTurnSyntaxEmpty(self):
for syntax in self._turnSyntax:
self.application.selectedPage = self.temppage
self.testpage.content = ""
self.application.selectedPage = self.testpage
self.application.actionController.getAction(syntax[0]).run(None)
self.assertEqual(self._getEditor().GetText(),
syntax[1] + syntax[2])
def testTurnSyntaxSelectedAll(self):
text = "Бла-бла-бла"
for syntax in self._turnSyntax:
self.application.selectedPage = self.temppage
self.testpage.content = text
self.application.selectedPage = self.testpage
self._getEditor().SetSelection(0, len(text))
self.application.actionController.getAction(syntax[0]).run(None)
self.assertEqual(self._getEditor().GetText(),
syntax[1] + "Бла-бла-бла" + syntax[2])
def testTurnSyntaxSelectedPart(self):
text = "Бла-бла-бла"
for syntax in self._turnSyntax:
self.application.selectedPage = self.temppage
self.testpage.content = text
self.application.selectedPage = self.testpage
self._getEditor().SetSelection (4, 7)
self.application.actionController.getAction(syntax[0]).run(None)
self.assertEqual(self._getEditor().GetText(),
"Бла-{}бла{}-бла".format (syntax[1], syntax[2]))
def testReplaceSyntaxEmpty(self):
for syntax in self._replaceSyntax:
self.application.selectedPage = self.temppage
self.testpage.content = ""
self.application.selectedPage = self.testpage
self.application.actionController.getAction(syntax[0]).run(None)
self.assertEqual(self._getEditor().GetText(), syntax[1])
def testReplaceSyntaxSelectedAll(self):
text = "Бла-бла-бла"
for syntax in self._replaceSyntax:
self.application.selectedPage = self.temppage
self.testpage.content = text
self.application.selectedPage = self.testpage
self._getEditor().SetSelection(0, len(text))
self.application.actionController.getAction(syntax[0]).run(None)
self.assertEqual(self._getEditor().GetText(), syntax[1])
def testReplaceSyntaxSelectedPart(self):
text = "Бла-бла-бла"
for syntax in self._replaceSyntax:
self.application.selectedPage = self.temppage
self.testpage.content = text
self.application.selectedPage = self.testpage
self._getEditor().SetSelection(4, 7)
self.application.actionController.getAction(syntax[0]).run(None)
self.assertEqual(self._getEditor().GetText(),
"Бла-{}-бла".format(syntax[1]))
def testListBulletsEmpty(self):
result = """<ul>
<li></li>
</ul>"""
self.application.selectedPage = self.temppage
self.testpage.content = ""
self.application.selectedPage = self.testpage
self.application.actionController.getAction(LIST_BULLETS_STR_ID).run(None)
self.assertEqual(self._getEditor().GetText(), result)
def testListNumbersEmpty(self):
result = """<ol>
<li></li>
</ol>"""
self.application.selectedPage = self.temppage
self.testpage.content = ""
self.application.selectedPage = self.testpage
self.application.actionController.getAction(LIST_NUMBERS_STR_ID).run(None)
self.assertEqual(self._getEditor().GetText(), result)
def testListBulletsSelectedAll(self):
text = """йцкуйцук
укеуке
ывапвыап
ывапвыапыап
ывапываппа"""
result = """<ul>
<li>йцкуйцук</li>
<li>укеуке</li>
<li>ывапвыап</li>
<li>ывапвыапыап</li>
<li>ывапываппа</li>
</ul>"""
self.application.selectedPage = self.temppage
self.testpage.content = text
self.application.selectedPage = self.testpage
self._getEditor().SetSelection(0, len (text))
self.application.actionController.getAction(LIST_BULLETS_STR_ID).run(None)
self.assertEqual(self._getEditor().GetText(), result)
def testListNumbersSelectedAll(self):
text = """йцкуйцук
укеуке
ывапвыап
ывапвыапыап
ывапываппа"""
result = """<ol>
<li>йцкуйцук</li>
<li>укеуке</li>
<li>ывапвыап</li>
<li>ывапвыапыап</li>
<li>ывапываппа</li>
</ol>"""
self.application.selectedPage = self.temppage
self.testpage.content = text
self.application.selectedPage = self.testpage
self._getEditor().SetSelection(0, len(text))
self.application.actionController.getAction(LIST_NUMBERS_STR_ID).run(None)
self.assertEqual(self._getEditor().GetText(), result)
| gpl-3.0 | 1,667,000,238,484,813,800 | 33.685714 | 91 | 0.599945 | false |
mozilla/bztools | auto_nag/scripts/tracking.py | 1 | 4049 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto_nag import utils
from auto_nag.bzcleaner import BzCleaner
from auto_nag.nag_me import Nag
class Tracking(BzCleaner, Nag):
def __init__(self, channel, untouched):
# since it's used in name() we must have it before to call parent ctor
self.untouched = untouched
super(Tracking, self).__init__()
self.channel = channel
self.assignees = {}
if not self.init_versions():
return
self.version = self.versions[self.channel]
def description(self):
if self.untouched:
return "Bugs which are tracked in {} and untouched in the last 3 days".format(
self.channel
)
return "Bugs which are tracked in {}".format(self.channel)
def name(self):
return "tracking" + ("_untouched" if self.untouched else "")
def template(self):
return "tracking.html"
def nag_template(self):
return "tracking_nag.html"
def has_last_comment_time(self):
return True
def has_default_products(self):
return False
def has_needinfo(self):
return True
def has_assignee(self):
return True
def get_extra_for_template(self):
return {
"channel": "nightly" if self.channel == "central" else self.channel,
"version": self.version,
"untouched": self.untouched,
"next_release": (utils.get_next_release_date() - self.nag_date).days,
}
def get_extra_for_nag_template(self):
return self.get_extra_for_template()
def columns(self):
return ["id", "summary", "needinfos", "assignee", "last_comment"]
def columns_nag(self):
return ["id", "summary", "needinfos", "To", "last_comment"]
def set_people_to_nag(self, bug, buginfo):
priority = self.get_priority(bug)
if not self.filter_bug(priority):
return None
assignee = bug["assigned_to"]
real = bug["assigned_to_detail"]["real_name"]
buginfo["to"] = assignee
buginfo["To"] = real
if not self.add(assignee, buginfo, priority=priority):
self.add_no_manager(buginfo["id"])
return bug
def get_bz_params(self, date):
status = utils.get_flag(self.version, "status", self.channel)
self.tracking = utils.get_flag(self.version, "tracking", self.channel)
tracking_value = (
"+,blocking" if self.channel != "esr" else self.versions["beta"] + "+"
)
fields = [self.tracking]
params = {
"include_fields": fields,
"f1": self.tracking,
"o1": "anywords",
"v1": tracking_value,
"f2": status,
"o2": "nowordssubstr",
"v2": ",".join(["wontfix", "fixed", "disabled", "verified", "unaffected"]),
"f3": self.tracking,
"o3": "changedbefore",
"v3": "-1d",
"n4": 1,
"f4": self.tracking,
"o4": "changedafter",
"v4": "-1d",
}
if self.channel == "central":
tracking = utils.get_flag(self.versions["beta"], "tracking", "beta")
params.update({"f5": tracking, "o5": "nowordssubstr", "v5": "+,blocking"})
elif self.channel != "esr":
approval = utils.get_flag(self.version, "approval", self.channel)
params.update(
{"f5": "flagtypes.name", "o5": "notsubstring", "v5": approval + "?"}
)
if self.untouched:
params.update({"f6": "days_elapsed", "o6": "greaterthan", "v6": 3})
return params
if __name__ == "__main__":
Tracking("beta", False).run()
Tracking("beta", True).run()
Tracking("central", False).run()
Tracking("central", True).run()
Tracking("esr", False).run()
| bsd-3-clause | -7,715,408,698,369,393,000 | 30.88189 | 90 | 0.559891 | false |
kwentz10/Photosynthesis_Optimization_Modeling | uncertain_params_all.py | 1 | 5737 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 20 12:08:50 2017
@author: Katherine
"""
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 24 09:04:31 2017
@author: Katherine
Draw parameter values from uniform distribution
"""
import numpy as np
def monte_carlo(chl_mean, chl_sd, dia_mean, dia_sd, na_min, na_max, ht_mean, ht_sd):
#meadow specific parameter min and max
temp_mean=[15.+2.,15.,15.-2.]
temp_sd=[2.5,2.5,2.5]
vwc_mean=[0.12,0.15,0.27]
vwc_sd=[0.008,0.013,0.022]
#parameter min and max for all meadows
ekc_lw=80500.0*0.8 #Activation energy for K of CO2 (J mol-1) VARIABLE
ekc_hg=80500.0*1.2 #Activation energy for K of CO2 (J mol-1) VARIABLE
eko_lw=14500.0*0.8 #Activation energy for K of O2 (J mol-1) VARIABLE
eko_hg=14500.0*1.2 #Activation energy for K of O2 (J mol-1) VARIABLE
etau_lw=-29000.0*0.8 #Activation energy for tau (???) (J mol-1) VARIABLE
etau_hg=-29000.0*1.2 #Activation energy for tau (???) (J mol-1) VARIABLE
ev_lw=55000.0*0.8 #Activation energy for carboxylation (J mol-1) VARIABLE
ev_hg=55000.0*1.2 #Activation energy for carboxylation (J mol-1) VARIABLE
ej_lw=55000.0*0.8 #Activation energy for electron transport (J mol-1) VARIABLE
ej_hg=55000.0*1.2 #Activation energy for electron transport (J mol-1) VARIABLE
ra_lw=20.7*0.8 #specific rubisco activity (umol CO2/g Rub s) VARIABLE
ra_hg=20.7*1.2 #specific rubisco activity (umol CO2/g Rub s) VARIABLE
flnr_lw=0.1*0.8 #fraction of leaf nitrogen in rubisco (g N Rub/g N leaf) VARIABLE
flnr_hg=0.1*1.2 #fraction of leaf nitrogen in rubisco (g N Rub/g N leaf) VARIABLE
rh_lw=0.5*0.8 #relative humidity (kPa/kPa) VARIABLE
rh_hg=0.5*1.2 #relative humidity (kPa/kPa) VARIABLE
ko25_lw=30000*0.8 #Michaelis-Menten kinetic coefficient for oxygen at 25 C(Pa) VARIABLE
ko25_hg=30000*1.2 #Michaelis-Menten kinetic coefficient for oxygen at 25 C(Pa) VARIABLE
kc25_lw=30*0.8 #Michaelis-Menten kinetic coefficient for carbon dioxide at 25 C (Pa) VARIABLE
kc25_hg=30*1.2 #Michaelis-Menten kinetic coefficient for carbon dioxide at 25 C (Pa) VARIABLE
g0_lw=0.002*0.8 #Ball-Berry stomatal conductance intercept parameter (mol H2O/m2s) VARIABLE
g0_hg=0.002*1.2 #Ball-Berry stomatal conductance intercept parameter (mol H2O/m2s) VARIABLE
m_lw=9.0*0.8 #ball-berry parameter (unitless) VARIABLE
m_hg=9.0*1.2 #ball-berry parameter (unitless) VARIABLE
u_lw=5.0*0.8 #windspeed (m/s) VARIABLE
u_hg=5.0*1.2 #windspeed (m/s) VARIABLE
qeff_lw=0.32*0.8 #leaf quantum yield, electrons VARIABLE
qeff_hg=0.32*1.2 #leaf quantum yield, electrons VARIABLE
PAR_lw=2000*0.8 #photosynthetic active radiation (umol/m2s) VARIABLE
PAR_hg=2000*1.2 #photosynthetic active radiation (umol/m2s) VARIABLE
jm_lw=2.68*0.8 #slope coefficient VARIABLE
jm_hg=2.68*1.2 #slope coefficient VARIABLE
vwc_min_lw=0.08*0.8 #minimum soil water content for photosynthesis to occur (permanent wilting point) (cm3/cm3) VARIABLE
vwc_min_hg=0.08*1.2 #minimum soil water content for photosynthesis to occur (permanent wilting point) (cm3/cm3) VARIABLE
vwc_max_lw=0.66*0.8 #maximum soil water content where increases in soil water do not affect photosynthesis (field capacity?) (cm3/cm3) VARIABLE
vwc_max_hg=0.66*1.2 #maximum soil water content where increases in soil water do not affect photosynthesis (field capacity?) (cm3/cm3) VARIABLE
q_lw=0.2*0.8 #parameter for soil water affect on photosynthesis (unitless) VARIABLE
q_hg=0.2*1.2 #parameter for soil water affect on photosynthesis (unitless) VARIABLE
#make dictionary of parameters with uncertainty
params=[]
for i in range(len(chl_mean)):
meadow_params=[]
for ii in range(30):
d={} #create dictionary
#parameters unique to each meadow
d["chl"]=np.random.normal(chl_mean[i],chl_sd[i],1)
d["na"]=np.random.uniform(na_min[i],na_max[i],1)
d["dia"]=np.random.normal(dia_mean[i],dia_sd[i],1)
if d["dia"]<0.0:
d["dia"]=[dia_mean[i]-dia_sd[i]]
d["ht"]=np.random.normal(ht_mean[i],ht_sd[i],1)
d["temp"]=np.random.normal(temp_mean[i],temp_sd[i],1)
d["vwc"]=np.random.normal(vwc_mean[i],vwc_sd[i],1)
#parameters for all meadows
d["ekc"]=np.random.uniform(ekc_lw,ekc_hg,1)
d["eko"]=np.random.uniform(eko_lw,eko_hg,1)
d["etau"]=np.random.uniform(etau_lw,etau_hg,1)
d["ev"]=np.random.uniform(ev_lw,ev_hg,1)
d["ej"]=np.random.uniform(ej_lw,ej_hg,1)
d["ra"]=np.random.uniform(ra_lw,ra_hg,1)
d["flnr"]=np.random.uniform(flnr_lw,flnr_hg,1)
d["rh"]=np.random.uniform(rh_lw,rh_hg,1)
d["ko25"]=np.random.uniform(ko25_lw,ko25_hg,1)
d["kc25"]=np.random.uniform(kc25_lw,kc25_hg,1)
d["g0"]=np.random.uniform(g0_lw,g0_hg,1)
d["m"]=np.random.uniform(m_lw,m_hg,1)
d["u"]=np.random.uniform(u_lw,u_hg,1)
d["qeff"]=np.random.uniform(qeff_lw,qeff_hg,1)
d["PAR"]=np.random.uniform(PAR_lw,PAR_hg,1)
d["jm"]=np.random.uniform(jm_lw,jm_hg,1)
d["vwc_min"]=np.random.uniform(vwc_min_lw,vwc_min_hg,1)
d["vwc_max"]=np.random.uniform(vwc_max_lw,vwc_max_hg,1)
d["q"]=np.random.uniform(q_lw,q_hg,1)
meadow_params+=[d]
params+=[meadow_params]
| mit | 7,917,404,251,319,288,000 | 38.565517 | 147 | 0.622625 | false |
qedi-r/home-assistant | homeassistant/components/websocket_api/http.py | 2 | 7181 | """View to accept incoming websocket connection."""
import asyncio
from contextlib import suppress
import logging
from typing import Optional
from aiohttp import web, WSMsgType
import async_timeout
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from .const import (
MAX_PENDING_MSG,
CANCELLATION_ERRORS,
URL,
ERR_UNKNOWN_ERROR,
SIGNAL_WEBSOCKET_CONNECTED,
SIGNAL_WEBSOCKET_DISCONNECTED,
DATA_CONNECTIONS,
JSON_DUMP,
)
from .auth import AuthPhase, auth_required_message
from .error import Disconnect
from .messages import error_message
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
class WebsocketAPIView(HomeAssistantView):
"""View to serve a websockets endpoint."""
name = "websocketapi"
url = URL
requires_auth = False
async def get(self, request):
"""Handle an incoming websocket connection."""
return await WebSocketHandler(request.app["hass"], request).async_handle()
class WebSocketHandler:
"""Handle an active websocket client connection."""
def __init__(self, hass, request):
"""Initialize an active connection."""
self.hass = hass
self.request = request
self.wsock: Optional[web.WebSocketResponse] = None
self._to_write: asyncio.Queue = asyncio.Queue(maxsize=MAX_PENDING_MSG)
self._handle_task = None
self._writer_task = None
self._logger = logging.getLogger("{}.connection.{}".format(__name__, id(self)))
async def _writer(self):
"""Write outgoing messages."""
# Exceptions if Socket disconnected or cancelled by connection handler
with suppress(RuntimeError, ConnectionResetError, *CANCELLATION_ERRORS):
while not self.wsock.closed:
message = await self._to_write.get()
if message is None:
break
self._logger.debug("Sending %s", message)
if isinstance(message, str):
await self.wsock.send_str(message)
continue
try:
dumped = JSON_DUMP(message)
except (ValueError, TypeError) as err:
self._logger.error(
"Unable to serialize to JSON: %s\n%s", err, message
)
await self.wsock.send_json(
error_message(
message["id"], ERR_UNKNOWN_ERROR, "Invalid JSON in response"
)
)
continue
await self.wsock.send_str(dumped)
@callback
def _send_message(self, message):
"""Send a message to the client.
Closes connection if the client is not reading the messages.
Async friendly.
"""
try:
self._to_write.put_nowait(message)
except asyncio.QueueFull:
self._logger.error(
"Client exceeded max pending messages [2]: %s", MAX_PENDING_MSG
)
self._cancel()
@callback
def _cancel(self):
"""Cancel the connection."""
self._handle_task.cancel()
self._writer_task.cancel()
async def async_handle(self):
"""Handle a websocket response."""
request = self.request
wsock = self.wsock = web.WebSocketResponse(heartbeat=55)
await wsock.prepare(request)
self._logger.debug("Connected")
# Py3.7+
if hasattr(asyncio, "current_task"):
# pylint: disable=no-member
self._handle_task = asyncio.current_task()
else:
self._handle_task = asyncio.Task.current_task()
@callback
def handle_hass_stop(event):
"""Cancel this connection."""
self._cancel()
unsub_stop = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, handle_hass_stop
)
self._writer_task = self.hass.async_create_task(self._writer())
auth = AuthPhase(self._logger, self.hass, self._send_message, request)
connection = None
disconnect_warn = None
try:
self._send_message(auth_required_message())
# Auth Phase
try:
with async_timeout.timeout(10):
msg = await wsock.receive()
except asyncio.TimeoutError:
disconnect_warn = "Did not receive auth message within 10 seconds"
raise Disconnect
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
raise Disconnect
if msg.type != WSMsgType.TEXT:
disconnect_warn = "Received non-Text message."
raise Disconnect
try:
msg_data = msg.json()
except ValueError:
disconnect_warn = "Received invalid JSON."
raise Disconnect
self._logger.debug("Received %s", msg_data)
connection = await auth.async_handle(msg_data)
self.hass.data[DATA_CONNECTIONS] = (
self.hass.data.get(DATA_CONNECTIONS, 0) + 1
)
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_CONNECTED
)
# Command phase
while not wsock.closed:
msg = await wsock.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
break
if msg.type != WSMsgType.TEXT:
disconnect_warn = "Received non-Text message."
break
try:
msg_data = msg.json()
except ValueError:
disconnect_warn = "Received invalid JSON."
break
self._logger.debug("Received %s", msg_data)
connection.async_handle(msg_data)
except asyncio.CancelledError:
self._logger.info("Connection closed by client")
except Disconnect:
pass
except Exception: # pylint: disable=broad-except
self._logger.exception("Unexpected error inside websocket API")
finally:
unsub_stop()
if connection is not None:
connection.async_close()
try:
self._to_write.put_nowait(None)
# Make sure all error messages are written before closing
await self._writer_task
except asyncio.QueueFull:
self._writer_task.cancel()
await wsock.close()
if disconnect_warn is None:
self._logger.debug("Disconnected")
else:
self._logger.warning("Disconnected: %s", disconnect_warn)
if connection is not None:
self.hass.data[DATA_CONNECTIONS] -= 1
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_DISCONNECTED
)
return wsock
| apache-2.0 | 8,870,288,057,287,233,000 | 30.774336 | 88 | 0.56176 | false |
sunlightlabs/tcamp | tcamp/reg/migrations/0007_auto.py | 1 | 12144 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Ticket', fields ['email']
db.create_index(u'reg_ticket', ['email'])
def backwards(self, orm):
# Removing index on 'Ticket', fields ['email']
db.delete_index(u'reg_ticket', ['email'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'reg.couponcode': {
'Meta': {'object_name': 'CouponCode'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'discount': ('django_extras.db.models.fields.PercentField', [], {'default': '100'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sked.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_tickets': ('django.db.models.fields.IntegerField', [], {})
},
u'reg.sale': {
'Meta': {'object_name': 'Sale'},
'address1': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'address2': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'coupon_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reg.CouponCode']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sked.Event']"}),
'first_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'reg.ticket': {
'Meta': {'object_name': 'Ticket'},
'ambassador_program': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '12'}),
'attend_day1': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'attend_day2': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'barcode': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '36', 'blank': 'True'}),
'checked_in': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'diet_allergies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diet_allergies_desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'diet_gluten_free': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diet_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diet_other_desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'diet_vegan': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diet_vegetarian': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'db_index': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sked.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'lobby_day': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reg.Sale']", 'null': 'True'}),
'subscribe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '127', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reg.TicketType']"}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'reg.tickettype': {
'Meta': {'ordering': "['position']", 'object_name': 'TicketType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sked.Event']"}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_tickets': ('django.db.models.fields.PositiveIntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'onsite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'})
},
u'sked.event': {
'Meta': {'ordering': "('-start_date',)", 'object_name': 'Event'},
'_description_rendered': ('django.db.models.fields.TextField', [], {}),
'_overview_rendered': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sked_events'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True', 'blank': 'True'}),
'description_markup_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'event'", 'max_length': '64'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'overview': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True', 'blank': 'True'}),
'overview_markup_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'blank': 'True'}),
'registration_is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'registration_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'session_label': ('django.db.models.fields.CharField', [], {'default': "'session'", 'max_length': '64'}),
'session_length': ('timedelta.fields.TimedeltaField', [], {}),
'session_submission_is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['reg'] | bsd-3-clause | -563,507,370,118,593,860 | 78.379085 | 187 | 0.548748 | false |
timuralp/glacier-tests | glaciertests/test_glacier.py | 2 | 3998 | from boto.glacier.exceptions import UnexpectedHTTPResponseError as GlacierError
import json
import nose
from nose.tools import eq_
from nose.tools import ok_
import dateutil.parser
from glaciertests.util import Util
from glaciertests.util import GlacierTestsConfig
def test_vault_list_all():
conn = GlacierTestsConfig().connection()
result = conn.list_vaults()
ok_('RequestId' in result)
ok_('VaultList' in result)
ok_('Marker' in result)
def test_vault_create():
conn = GlacierTestsConfig().connection()
vault = Util.get_new_vault()
ok_('Location' in vault)
ok_('RequestId' in vault)
vault_name = vault['Location'].split('/')[-1]
_check_in_listing([vault_name])
def test_vault_delete():
conn = GlacierTestsConfig().connection()
vault = Util.get_new_vault()
ok_('Location' in vault)
ok_('RequestId' in vault)
vault_name = vault['Location'].split('/')[-1]
_check_in_listing([vault_name])
conn.delete_vault(vault_name)
_check_not_in_listing([vault_name])
def test_vault_describe_does_not_exist():
conn = GlacierTestsConfig().connection()
try:
conn.describe_vault(GlacierTestsConfig().prefix() + '-doesnotexist')
except GlacierError as e:
eq_(e.code, 'ResourceNotFoundException')
body = json.loads(e.body)
eq_(body['type'], 'client')
def test_archive_create():
conn = GlacierTestsConfig().connection()
vault = Util.get_new_vault()
vault_name = vault['Location'].split('/')[-1]
archive = Util.upload_archive(vault_name, b"hello", None)
ok_('ArchiveId' in archive)
def test_archive_delete():
conn = GlacierTestsConfig().connection()
vault = Util.get_new_vault()
vault_name = vault['Location'].split('/')[-1]
archive = Util.upload_archive(vault_name, b"hello", None)
ok_('ArchiveId' in archive)
archive_id = archive['ArchiveId']
conn.delete_archive(vault_name, archive_id)
def test_create_inventory_job():
vault = Util.get_new_vault()
vault_name = vault['Location'].split('/')[-1]
description = "test archive"
conn = GlacierTestsConfig().connection()
job_data = {'Type': 'inventory-retrieval'}
job = conn.initiate_job(vault_name, job_data)
ok_('JobId' in job)
description = conn.describe_job(vault_name, job['JobId'])
date = dateutil.parser.parse(description['CompletionDate'])
date = dateutil.parser.parse(description['CreationDate'])
eq_(description['StatusCode'], 'Succeeded')
eq_(description['Action'], 'inventory-retrieval')
def test_create_archive_retrieval_job():
vault = Util.get_new_vault()
vault_name = vault['Location'].split('/')[-1]
description = "test archive"
archive = _setup_test_archive(vault_name, description)
conn = GlacierTestsConfig().connection()
job_data = {'Type': 'archive-retrieval',
'ArchiveId': archive}
job = conn.initiate_job(vault_name, job_data)
ok_('JobId' in job)
description = conn.describe_job(vault_name, job['JobId'])
date = dateutil.parser.parse(description['CompletionDate'])
date = dateutil.parser.parse(description['CreationDate'])
eq_(description['RetrievalByteRange'], '0-4')
eq_(description['StatusCode'], 'Succeeded')
eq_(description['Completed'], True)
eq_(description['ArchiveId'], archive)
eq_(description['Action'], 'archive-retrieval')
def _get_vault_names():
conn = GlacierTestsConfig().connection()
result = conn.list_vaults()
return [x['VaultName'] for x in result['VaultList']]
def _check_not_in_listing(names):
vault_names = _get_vault_names()
eq_(len(set(vault_names).intersection(set(names))), 0)
def _check_in_listing(expected_names):
vault_names = _get_vault_names()
ok_(set(vault_names).issuperset(set(expected_names)))
def _setup_test_archive(vault, description=None):
archive = Util.upload_archive(vault, b"hello", None)
ok_('ArchiveId' in archive)
return archive['ArchiveId']
| mit | -1,349,212,336,169,829,600 | 31.504065 | 79 | 0.670335 | false |
cedadev/ndg_xacml | ndg/xacml/utils/factory.py | 2 | 4928 | """
Class Factory
NERC DataGrid project
"""
__author__ = "Philip Kershaw"
__date__ = "15/02/10"
__copyright__ = "(C) 2010 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = '$Id$'
import traceback
import logging, os, sys
log = logging.getLogger(__name__)
def importModuleObject(moduleName, objectName=None, objectType=None):
'''Import from a string module name and object name. Object can be
any entity contained in a module
@param moduleName: Name of module containing the class
@type moduleName: str
@param objectName: Name of the class to import. If none is given, the
class name will be assumed to be the last component of modulePath
@type objectName: str
@rtype: class object
@return: imported class'''
if objectName is None:
if ':' in moduleName:
# Support Paste style import syntax with rhs of colon denoting
# module content to import
_moduleName, objectName = moduleName.rsplit(':', 1)
if '.' in objectName:
objectName = objectName.split('.')
else:
_moduleName, objectName = moduleName.rsplit('.', 1)
else:
_moduleName = moduleName
if isinstance(objectName, basestring):
objectName = [objectName]
module = __import__(_moduleName, globals(), locals(), [])
components = _moduleName.split('.')
try:
for component in components[1:]:
module = getattr(module, component)
except AttributeError:
raise AttributeError("Error importing %r: %s" %
(objectName[0], traceback.format_exc()))
importedObject = module
for i in objectName:
importedObject = getattr(importedObject, i)
# Check class inherits from a base class
if objectType and not issubclass(importedObject, objectType):
raise TypeError("Specified class %r must be derived from %r; got %r" %
(objectName, objectType, importedObject))
return importedObject
def callModuleObject(moduleName, objectName=None, moduleFilePath=None,
objectType=None, objectArgs=None, objectProperties=None):
'''
Create and return an instance of the specified class or invoke callable
@param moduleName: Name of module containing the class
@type moduleName: str
@param objectName: Name of the class to instantiate. May be None in
which case, the class name is parsed from the moduleName last element
@type objectName: str
@param moduleFilePath: Path to the module - if unset, assume module on
system path already
@type moduleFilePath: str
@param objectProperties: dict of properties to use when instantiating the
class
@type objectProperties: dict
@param objectType: expected type for the object to instantiate - to
enforce use of specific interfaces
@type objectType: object
@return: object - instance of the class specified
'''
# ensure that properties is a dict - NB, it may be passed in as a null
# value which can override the default val
if not objectProperties:
objectProperties = {}
if not objectArgs:
objectArgs = ()
# variable to store original state of the system path
sysPathBak = None
try:
try:
# Module file path may be None if the new module to be loaded
# can be found in the existing system path
if moduleFilePath:
if not os.path.exists(moduleFilePath):
raise IOError("Module file path '%s' doesn't exist" %
moduleFilePath)
# Temporarily extend system path ready for import
sysPathBak = sys.path
sys.path.append(moduleFilePath)
# Import module name specified in properties file
importedObject = importModuleObject(moduleName,
objectName=objectName,
objectType=objectType)
finally:
# revert back to original sys path, if necessary
# NB, python requires the use of a try/finally OR a try/except
# block - not both combined
if sysPathBak:
sys.path = sysPathBak
except Exception, e:
log.debug('%r module import raised %r type exception: %s',
moduleName, e.__class__, traceback.format_exc())
raise
# Instantiate class
if objectArgs:
obj = importedObject(*objectArgs, **objectProperties)
else:
obj = importedObject(**objectProperties)
return obj
| bsd-3-clause | -9,119,949,805,445,766,000 | 36.333333 | 78 | 0.608969 | false |
sergio-teruel/bank-payment | __unported__/bank_statement_instant_voucher/model/account_voucher_instant.py | 13 | 13555 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2012 - 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
class instant_voucher(orm.TransientModel):
_name = 'account.voucher.instant'
_description = 'Instant Voucher'
def cancel(self, cr, uid, ids, context=None):
"""
Delete the voucher and close window
"""
assert len(ids) == 1, "Will only take one resource id"
instant = self.browse(cr, uid, ids[0], context=context)
if instant.voucher_id:
self.pool.get('account.voucher').cancel_voucher(
cr, uid, [instant.voucher_id.id], context=context)
self.pool.get('account.voucher').unlink(
cr, uid, [instant.voucher_id.id], context=context)
return {'type': 'ir.actions.act_window_close'}
def get_voucher_defaults(self, cr, uid, vals, context=None):
"""
Gather conditional defaults based on given key, value pairs
:param vals: dictionary of key, value pairs
:returns: dictionary of default values for fields not in vals
"""
values_pool = self.pool.get('ir.values')
voucher_pool = self.pool.get('account.voucher')
res = {}
for (key, val) in vals.iteritems():
if val and voucher_pool._all_columns[key].column.change_default:
for default in values_pool.get_defaults(
cr, uid, 'account.voucher', '%s=%s' % (key, val)):
if default[1] not in vals:
res[default[1]] = default[2]
return res
def create_voucher(self, cr, uid, ids, context=None):
"""
Create a fully fledged voucher counterpart for the
statement line. User only needs to process taxes and may
adapt cost/income account.
"""
assert len(ids) == 1, "Will only take one resource id"
voucher_pool = self.pool.get('account.voucher')
period_pool = self.pool.get('account.period')
instant = self.browse(cr, uid, ids[0], context=context)
line = instant.statement_line_id
voucher_type = line.amount < 0 and 'purchase' or 'sale'
journal_ids = self.pool.get('account.journal').search(
cr, uid, [('company_id', '=', line.company_id.id),
('type', '=', voucher_type)])
if not journal_ids:
orm.exept_orm(
_('Error'),
_('No %s journal defined') % voucher_type)
journal = self.pool.get('account.journal').browse(
cr, uid, journal_ids[0], context=context)
if journal.type in ('sale', 'sale_refund'):
line_account_id = (
journal.default_credit_account_id and
journal.default_credit_account_id.id or False
)
elif journal.type in ('purchase', 'expense', 'purchase_refund'):
line_account_id = (
journal.default_debit_account_id and
journal.default_debit_account_id.id or False
)
vals = {
'name': (_('Voucher for statement line %s.%s') %
(line.statement_id.name, line.name)),
'reference': line.ref or False,
'company_id': line.company_id.id,
'partner_id': instant.partner_id.id,
'date': line.date or False,
'account_id': line.account_id.id,
'type': voucher_type,
'line_ids': [(0, 0, {'amount': abs(line.amount),
'account_id': line_account_id,
'type': line.amount < 0 and 'dr' or 'cr',
'name': line.ref or False,
})],
'amount': line.amount and abs(line.amount) or False,
'journal_id': journal_ids[0],
}
if vals['date']:
period_ids = period_pool.find(
cr, uid, vals['date'], context=context
)
if period_ids:
vals['period_id'] = period_ids[0]
vals.update(self.get_voucher_defaults(cr, uid, vals, context=context))
voucher_id = voucher_pool.create(
cr, uid, vals, context=context)
self.write(
cr, uid, ids[0],
{'voucher_id': voucher_id,
'state': 'ready',
'type': voucher_type,
}, context=context)
return {
'name': self._description,
'view_type': 'form',
'view_mode': 'form',
'res_model': self._name,
'domain': [],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': ids[0],
'nodestroy': False,
}
def dummy(self, cr, uid, ids, context=None):
return {
'name': self._description,
'view_type': 'form',
'view_mode': 'form',
'res_model': self._name,
'domain': [],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': ids[0],
'nodestroy': False,
}
def default_get(self, cr, uid, fields_list, context=None):
"""
Gather sane default values from the originating statement line
"""
res = super(instant_voucher, self).default_get(
cr, uid, fields_list, context=context)
if 'statement_line_id' in fields_list:
res['statement_line_id'] = (
context.get('active_id') or
context.get('active_ids') and context.get('active_ids')[0])
if not res['statement_line_id']:
raise orm.except_orm(
_('Error'),
_('Cannot determine statement line'))
line = self.pool.get('account.bank.statement.line').browse(
cr, uid, res['statement_line_id'], context=context)
if 'balance' in fields_list:
res['balance'] = line.amount
if 'ref' in fields_list:
res['ref'] = line.ref
if 'partner_id' in fields_list:
if line.partner_id:
res['partner_id'] = line.partner_id.id
return res
def _get_balance(self, cr, uid, ids, field_name, args, context=None):
"""
Compute the expected residual
TODO: currency conversion
"""
res = {}
for instant in self.browse(cr, uid, ids, context=context):
if instant.voucher_id and instant.voucher_id.state == 'posted':
amount = instant.statement_line_id.amount
counteramount = 0.0
statement_account_id = instant.statement_line_id.account_id.id
for line in instant.voucher_id.move_ids:
if line.account_id.id == statement_account_id:
counteramount = line.debit - line.credit
for line in instant.voucher_id.move_ids:
if line.account_id.id == statement_account_id:
counteramount = line.debit - line.credit
else:
amount = abs(instant.statement_line_id.amount)
counteramount = abs(instant.voucher_id
and instant.voucher_id.amount or 0.0)
res[instant.id] = amount - counteramount
return res
def confirm(self, cr, uid, ids, context=None):
"""
Post the voucher if necessary
Post the voucher's move lines if necessary
Sanity checks on currency and residual = 0.0
If the account_banking module is installed, perform matching
and reconciliation. If not, the user is left to manual
reconciliation of OpenERP.
"""
assert len(ids) == 1, "Will only take one resource id"
statement_line_obj = self.pool.get('account.bank.statement.line')
voucher_obj = self.pool.get('account.voucher')
move_obj = self.pool.get('account.move')
instant = self.browse(cr, uid, ids[0], context=context)
statement_line = instant.statement_line_id
voucher_currency = (instant.voucher_id.currency_id and
instant.voucher_id.currency_id or
instant.voucher_id.company_id.currency_id)
if (statement_line.statement_id.currency.id != voucher_currency.id):
raise orm.except_orm(
_("Error"),
_("Currency on the bank statement line needs to be the "
"same as on the voucher. Currency conversion is not yet "
"supported."))
if instant.voucher_id.state != 'posted':
voucher_obj.proforma_voucher(
cr, uid, [instant.voucher_id.id], context=context)
instant.refresh()
if instant.voucher_id.state != 'posted':
raise orm.except_orm(
_("Error"),
_("The voucher could not be posted."))
if instant.voucher_id.move_id.state != 'posted':
move_obj.post(
cr, uid, [instant.voucher_id.move_id.id], context=context)
instant.refresh()
if instant.voucher_id.move_id.state != 'posted':
raise orm.except_orm(
_("Error"),
_("The voucher's move line could not be posted."))
if not self.pool.get('res.currency').is_zero(
cr, uid, voucher_currency, instant.balance):
raise orm.except_orm(
_("Error"),
_("The amount on the bank statement line needs to be the "
"same as on the voucher. Write-off is not yet "
"supported."))
# Banking Addons integration:
# Gather the info needed to match the bank statement line
# and trigger its posting and reconciliation.
if 'import_transaction_id' in statement_line_obj._columns:
if instant.statement_line_id.state == 'confirmed':
raise orm.except_orm(
_("Error"),
_("Cannot match a confirmed statement line"))
if not statement_line.import_transaction_id:
statement_line_obj.create_instant_transaction(
cr, uid, statement_line.id, context=context)
statement_line.refresh()
for line in instant.voucher_id.move_ids:
if line.account_id.id == statement_line.account_id.id:
self.pool.get('banking.import.transaction').write(
cr,
uid,
statement_line.import_transaction_id.id,
{
'move_line_id': line.id,
'move_line_ids': [(6, 0, [line.id])],
'match_type': 'move',
'invoice_id': False,
'invoice_ids': [(6, 0, [])],
},
context=context
)
statement_line_obj.confirm(
cr, uid, [statement_line.id], context=context
)
break
return {'type': 'ir.actions.act_window_close'}
_columns = {
'balance': fields.function(
_get_balance,
type='float',
digits_compute=dp.get_precision('Account'),
string="Balance",),
'partner_id': fields.many2one(
'res.partner',
'Partner',
required=True),
'statement_line_id': fields.many2one(
'account.bank.statement.line',
'Bank statement line',
readonly=True),
'ref': fields.related(
'statement_line_id', 'ref',
type="char", size="48",
readonly=True,
string="Reference"),
'voucher_id': fields.many2one(
'account.voucher',
'Voucher',
readonly=True),
'state': fields.selection(
[('init', 'init'),
('ready', 'ready'),
('confirm', 'confirm')],
'State'),
'type': fields.selection(
[('sale', 'Sale'),
('purchase', 'Purchase')],
'Voucher type'),
}
_defaults = {'state': 'init'}
| agpl-3.0 | -8,139,579,470,698,536,000 | 41.492163 | 78 | 0.516857 | false |
BrunoCaimar/ArcREST | src/arcrest/common/geometry.py | 6 | 23140 | from __future__ import absolute_import
from __future__ import print_function
import json
try:
import arcpy
arcpyFound = True
except:
arcpyFound = False
import types
from .._abstract import abstract
#----------------------------------------------------------------------
def _date_handler(obj):
if isinstance(obj, datetime.datetime):
return local_time_to_online(obj)
else:
return obj
########################################################################
class SpatialReference(abstract.AbstractGeometry):
""" creates a spatial reference instance """
_wkid = None
_wkt = None
#----------------------------------------------------------------------
def __init__(self, wkid=None,wkt=None):
"""Constructor"""
self._wkid = wkid
self._wkt = wkt
#----------------------------------------------------------------------
@property
def wkid(self):
""" get/set the wkid """
return self._wkid
@wkid.setter
def wkid(self, wkid):
""" get/set the wkid """
self._wkid = wkid
#----------------------------------------------------------------------
@property
def wkt(self):
""" get/set the wkt """
return self._wkt
@wkt.setter
def wkt(self, wkt):
""" get/set the wkt """
self._wkt = wkt
@property
def asDictionary(self):
"""returns the wkid id for use in json calls"""
if self._wkid == None and self._wkt is not None:
return {"wkt": self._wkt}
else:
return {"wkid": self._wkid}
#----------------------------------------------------------------------
@property
def value(self):
"""returns the wkid id for use in json calls"""
if self._wkid == None and self._wkt is not None:
return {"wkt": self._wkt}
else:
return {"wkid": self._wkid}
########################################################################
class Point(abstract.AbstractGeometry):
""" Point Geometry
Inputs:
coord - list of [X,Y] pair or arcpy.Point Object
wkid - well know id of spatial references
z - is the Z coordinate value
m - m value
"""
_x = None
_y = None
_z = None
_m = None
_wkid = None
_wkt = None
_json = None
_geom = None
_dict = None
#----------------------------------------------------------------------
def __init__(self, coord, wkid=None, wkt=None, z=None, m=None):
"""Constructor"""
if isinstance(coord, list):
self._x = float(coord[0])
self._y = float(coord[1])
elif arcpyFound and isinstance(coord, arcpy.Geometry):
self._x = coord.centroid.X
self._y = coord.centroid.Y
self._z = coord.centroid.Z
self._m = coord.centroid.M
self._geom = coord.centroid
self._wkid = wkid
self._wkt = wkt
if not z is None:
self._z = float(z)
if not m is None:
self._m = m
#----------------------------------------------------------------------
def __str__(self):
""" returns the object as a string """
return json.dumps(self.asDictionary,
default=_date_handler)
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""returns the geometry spatial reference"""
if self._wkid == None and self._wkt is not None:
return {"wkt": self._wkt}
else:
return {"wkid": self._wkid}
#----------------------------------------------------------------------
@property
def type(self):
""" returns the geometry type """
return "esriGeometryPoint"
#----------------------------------------------------------------------
@property
def asJSON(self):
""" returns a geometry as JSON """
value = self._json
if value is None:
value = json.dumps(self.asDictionary,
default=_date_handler)
self._json = value
return self._json
#----------------------------------------------------------------------
@property
def asArcPyObject(self):
""" returns the Point as an ESRI arcpy.Point object """
if arcpyFound == False:
raise Exception("ArcPy is required to use this function")
return arcpy.AsShape(self.asDictionary, True)
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns the object as a python dictionary """
#
template = {"x" : self._x,
"y" : self._y,
"spatialReference" : self.spatialReference
}
if not self._z is None:
template['z'] = self._z
if not self._m is None:
template['z'] = self._m
return template
#----------------------------------------------------------------------
@property
def asList(self):
""" returns a Point value as a list of [x,y,<z>,<m>] """
base = [self._x, self._y]
if not self._z is None:
base.append(self._z)
elif not self._m is None:
base.append(self._m)
return base
#----------------------------------------------------------------------
@property
def X(self):
""" gets the X coordinate """
return self._x
#----------------------------------------------------------------------
@X.setter
def X(self, value):
"""sets the X coordinate"""
if isinstance(value, (int, float,
long, types.NoneType)):
self._x = value
#----------------------------------------------------------------------
@property
def Y(self):
""" gets the Y Coordinate """
return self._y
#----------------------------------------------------------------------
@Y.setter
def Y(self, value):
""" sets the Y coordinate """
if isinstance(value, (int, float,
long, types.NoneType)):
self._y = value
#----------------------------------------------------------------------
@property
def Z(self):
""" gets the Z Coordinate """
return self._z
#----------------------------------------------------------------------
@Z.setter
def Z(self, value):
""" sets the Z coordinate """
if isinstance(value, (int, float,
long, types.NoneType)):
self._z = value
#----------------------------------------------------------------------
@property
def wkid(self):
""" gets the wkid """
return self._wkid
#----------------------------------------------------------------------
@wkid.setter
def wkid(self, value):
""" sets the wkid """
if isinstance(value, (int,
long)):
self._wkid = value
#----------------------------------------------------------------------
@property
def wkt(self):
""" get/set the wkt """
return self._wkt
@wkt.setter
def wkt(self, wkt):
""" get/set the wkt """
self._wkt = wkt
########################################################################
class MultiPoint(abstract.AbstractGeometry):
""" Implements the ArcGIS JSON MultiPoint Geometry Object """
_geom = None
_json = None
_dict = None
_wkid = None
_wkt = None
_points = None
_hasZ = False
_hasM = False
#----------------------------------------------------------------------
def __init__(self, points, wkid=None, wkt=None, hasZ=False, hasM=False):
"""Constructor"""
if isinstance(points, list):
self._points = points
elif arcpyFound and isinstance(points, arcpy.Geometry):
self._points = self.__geomToPointList(points)
self._wkid = wkid
self._wkt = wkt
self._hasZ = hasZ
self._hasM = hasM
#----------------------------------------------------------------------
def __geomToPointList(self, geom):
""" converts a geometry object to a common.Geometry object """
if arcpyFound and isinstance(geom, arcpy.Multipoint):
feature_geom = []
fPart = []
for part in geom:
fPart = []
for pnt in part:
fPart.append(Point(coord=[pnt.X, pnt.Y],
wkid=geom.spatialReference.factoryCode,
z=pnt.Z, m=pnt.M))
feature_geom.append(fPart)
return feature_geom
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""returns the geometry spatial reference"""
if self._wkid == None and self._wkt is not None:
return {"wkt": self._wkt}
else:
return {"wkid": self._wkid}
#----------------------------------------------------------------------
@property
def type(self):
""" returns the geometry type """
return "esriGeometryMultipoint"
#----------------------------------------------------------------------
@property
def asJSON(self):
""" returns a geometry as JSON """
value = self._json
if value is None:
value = json.dumps(self.asDictionary,
default=_date_handler)
self._json = value
return self._json
#----------------------------------------------------------------------
@property
def asArcPyObject(self):
""" returns the Point as an ESRI arcpy.MultiPoint object """
if arcpyFound == False:
raise Exception("ArcPy is required to use this function")
return arcpy.AsShape(self.asDictionary, True)
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns the object as a python dictionary """
#
value = self._dict
if value is None:
template = {
"hasM" : self._hasM,
"hasZ" : self._hasZ,
"points" : [],
"spatialReference" : self.spatialReference
}
for pt in self._points:
template['points'].append(pt.asList)
self._dict = template
return self._dict
########################################################################
class Polyline(abstract.AbstractGeometry):
""" Implements the ArcGIS REST API Polyline Object
Inputs:
paths - list - list of lists of Point objects
wkid - integer - well know spatial reference id
hasZ - boolean -
hasM - boolean -
"""
_paths = None
_wkid = None
_wkt = None
_json = None
_dict = None
_geom = None
_hasZ = None
_hasM = None
#----------------------------------------------------------------------
def __init__(self, paths, wkid=None,wkt=None, hasZ=False, hasM=False):
"""Constructor"""
if isinstance(paths, list):
self._paths = paths
elif arcpyFound and isinstance(paths, arcpy.Geometry):
self._paths = self.__geomToPointList(paths)
self._wkid = wkid
self._wkt = wkt
self._hasM = hasM
self._hasZ = hasZ
#----------------------------------------------------------------------
def __geomToPointList(self, geom):
""" converts a geometry object to a common.Geometry object """
if arcpyFound and isinstance(geom, arcpy.Polyline):
feature_geom = []
fPart = []
wkt = None
wkid = None
for part in geom:
fPart = []
for pnt in part:
if geom.spatialReference is None:
if self._wkid is None and self._wkt is not None:
wkt = self._wkt
else:
wkid = self._wkid
else:
wkid = geom.spatialReference.factoryCode
fPart.append(Point(coord=[pnt.X, pnt.Y],
wkid=wkid,
wkt=wkt,
z=pnt.Z, m=pnt.M))
feature_geom.append(fPart)
return feature_geom
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""returns the geometry spatial reference"""
if self._wkid == None and self._wkt is not None:
return {"wkt": self._wkt}
else:
return {"wkid": self._wkid}
#----------------------------------------------------------------------
@property
def type(self):
""" returns the geometry type """
return "esriGeometryPolyline"
#----------------------------------------------------------------------
@property
def asJSON(self):
""" returns a geometry as JSON """
value = self._json
if value is None:
value = json.dumps(self.asDictionary,
default=_date_handler)
self._json = value
return self._json
#----------------------------------------------------------------------
@property
def asArcPyObject(self):
""" returns the Polyline as an ESRI arcpy.Polyline object """
if arcpyFound == False:
raise Exception("ArcPy is required to use this function")
return arcpy.AsShape(self.asDictionary, True)
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns the object as a python dictionary """
value = self._dict
if value is None:
template = {
"hasM" : self._hasM,
"hasZ" : self._hasZ,
"paths" : [],
"spatialReference" : self.spatialReference
}
for part in self._paths:
lpart = []
for pt in part:
lpart.append(pt.asList)
template['paths'].append(lpart)
del lpart
self._dict = template
return self._dict
########################################################################
class Polygon(abstract.AbstractGeometry):
""" Implements the ArcGIS REST JSON for Polygon Object """
_rings = None
_wkid = None
_wkt = None
_json = None
_dict = None
_geom = None
_hasZ = None
_hasM = None
#----------------------------------------------------------------------
def __init__(self, rings, wkid=None,wkt=None, hasZ=False, hasM=False):
"""Constructor"""
if isinstance(rings, list):
self._rings = rings
elif arcpyFound and isinstance(rings, arcpy.Geometry):
self._rings = self.__geomToPointList(rings)
## self._json = rings.JSON
## self._dict = _unicode_convert(json.loads(self._json))
self._wkid = wkid
self._wkt = wkt
self._hasM = hasM
self._hasZ = hasZ
#----------------------------------------------------------------------
def __geomToPointList(self, geom):
""" converts a geometry object to a common.Geometry object """
sr = geom.spatialReference
wkid = None
wkt = None
if sr is None:
if self._wkid is None and self._wkt is not None:
wkt = self._wkt
else:
wkid = self._wkid
else:
wkid = sr.factoryCode
g = json.loads(geom.JSON)
top = []
for gring in g['rings']:
ring = []
for g in gring:
ring.append(Point(coord=g, wkid=wkid, wkt=wkt, z=None, m=None))
top.append(ring)
return top
#if isinstance(geom, arcpy.Polygon):
#feature_geom = []
#fPart = []
#for part in geom:
#fPart = []
#for pnt in part:
#if geom.spatialReference is None:
#wkid = self._wkid
#else:
#wkid = geom.spatialReference.factoryCode
#fPart.append(Point(coord=[pnt.X, pnt.Y],
#wkid=wkid,
#z=pnt.Z, m=pnt.M))
#feature_geom.append(fPart)
#return feature_geom
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""returns the geometry spatial reference"""
if self._wkid == None and self._wkt is not None:
return {"wkt": self._wkt}
else:
return {"wkid": self._wkid}
#----------------------------------------------------------------------
@property
def type(self):
""" returns the geometry type """
return "esriGeometryPolygon"
#----------------------------------------------------------------------
@property
def asJSON(self):
""" returns a geometry as JSON """
value = self._json
if value is None:
value = json.dumps(self.asDictionary,
default=_date_handler)
self._json = value
return self._json
#----------------------------------------------------------------------
@property
def asArcPyObject(self):
""" returns the Polyline as an ESRI arcpy.Polyline object """
if arcpyFound == False:
raise Exception("ArcPy is required to use this function")
return arcpy.AsShape(self.asDictionary, True)
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns the object as a python dictionary """
value = self._dict
if value is None:
template = {
"hasM" : self._hasM,
"hasZ" : self._hasZ,
"rings" : [],
"spatialReference" : self.spatialReference
}
for part in self._rings:
lpart = []
for pt in part:
if isinstance(pt, list):
lpart.append(pt)
elif isinstance(pt, Point):
lpart.append(pt.asList)
template['rings'].append(lpart)
del lpart
self._dict = template
return self._dict
########################################################################
class Envelope(abstract.AbstractGeometry):
"""
An envelope is a rectangle defined by a range of values for each
coordinate and attribute. It also has a spatialReference field.
The fields for the z and m ranges are optional.
"""
_json = None
_dict = None
_geom = None
_xmin = None
_ymin = None
_zmin = None
_mmin = None
_xmax = None
_ymax = None
_zmax = None
_mmax = None
_wkid = None
_wkt = None
#----------------------------------------------------------------------
def __init__(self, xmin, ymin, xmax, ymax, wkid=None, wkt=None,
zmin=None, zmax=None, mmin=None, mmax=None):
"""Constructor"""
self._xmin = xmin
self._ymin = ymin
self._zmin = zmin
self._mmin = mmin
self._xmax = xmax
self._ymax = ymax
self._zmax = zmax
self._mmax = mmax
self._wkid = wkid
self._wkt = wkt
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""returns the geometry spatial reference"""
if self._wkid == None and self._wkt is not None:
return {"wkt": self._wkt}
else:
return {"wkid": self._wkid}
#----------------------------------------------------------------------
@property
def type(self):
""" returns the geometry type """
return "esriGeometryEnvelope"
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns the envelope as a dictionary """
template = {
"xmin" : self._xmin,
"ymin" : self._ymin,
"xmax" : self._xmax,
"ymax" : self._ymax,
"spatialReference" : self.spatialReference
}
if self._zmax is not None and \
self._zmin is not None:
template['zmin'] = self._zmin
template['zmax'] = self._zmax
if self._mmin is not None and \
self._mmax is not None:
template['mmax'] = self._mmax
template['mmin'] = self._mmin
return template
#----------------------------------------------------------------------
@property
def value(self):
""" returns the envelope as a dictionary """
template = {
"xmin" : self._xmin,
"ymin" : self._ymin,
"xmax" : self._xmax,
"ymax" : self._ymax,
"spatialReference" : self.spatialReference
}
if self._zmax is not None and \
self._zmin is not None:
template['zmin'] = self._zmin
template['zmax'] = self._zmax
if self._mmin is not None and \
self._mmax is not None:
template['mmax'] = self._mmax
template['mmin'] = self._mmin
return template
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
return self.asJSON
#----------------------------------------------------------------------
@property
def asJSON(self):
""" returns a geometry as JSON """
value = self._json
if value is None:
value = json.dumps(self.asDictionary,
default=_date_handler)
self._json = value
return self._json
#----------------------------------------------------------------------
@property
def asArcPyObject(self):
""" returns the Envelope as an ESRI arcpy.Polygon object """
env = self.asDictionary
ring = [[
Point(env['xmin'], env['ymin'], self._wkid),
Point(env['xmax'], env['ymin'], self._wkid),
Point(env['xmax'], env['ymax'], self._wkid),
Point(env['xmin'], env['ymax'], self._wkid)
]]
return Polygon(rings=ring,
wkid=self._wkid,
wkt=self._wkid,
hasZ=False,
hasM=False).asArcPyObject | apache-2.0 | 2,313,178,454,659,710,500 | 35.5 | 79 | 0.413656 | false |
crypt3lx2k/Imageboard-Image-Scraper | iwi/web/WebCache.py | 3 | 6171 | import socket
import threading
import time
import zlib
import httplib
import urllib2
import urlparse
try:
import cPickle as pickle
except ImportError:
import pickle
from . import UniformRetryStrategy
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
__all__ = ['WebCache']
class WebCache (object):
"""
Allows for thread-safe cached downloads, honors last-modified.
WebCache can also write and read the cache to and from disk.
"""
# default retry parameters
retry_times = 3
retry_lower = 5
retry_upper = 15
# default user string
user_string = "Mozilla/5.0"
def __init__ (self, cache_file=None, sleeper=None):
"""
Initializes an instance from an optional cache_file, an optional
retrier, and an optional sleeper.
The cache_file parameter may either be a filename or an open file-like
object.
If the cache_file parameter is not given the cache is initialized to be
empty.
The sleeper parameter must be a function that takes the number of
seconds to sleep (as a floating point number).
If the sleeper parameter is not given it is initialized as time.sleep.
"""
if cache_file is None:
self.cache = {}
else:
self.load(cache_file)
if sleeper is None:
self.sleeper = time.sleep
self.cache_lock = threading.Lock()
self.set_online_mode()
def download (self, url, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, bypass_cache=False):
"""
Downloads the contents from the URL, if something goes wrong it
registers the exception with the retrier and asks for a sleep time.
"""
retry = 0.0
retrier = UniformRetryStrategy (
self.retry_times,
self.retry_lower,
self.retry_upper
)
while retry is not None:
if retry:
logger.debug('sleeping on %s for %s seconds', url, retry)
self.sleeper(retry)
try:
return self.downloader(url, timeout=timeout, bypass_cache=bypass_cache)
except Exception as e:
logger.debug('got on %s exception %s', url, e)
retrier.register_error(e)
retry = retrier.seconds()
return ''
def download_offline (self, url, timeout=None, bypass_cache=False):
"""
Simulates downloading contents from URL while only looking it up in the
cache.
"""
contents = None
key = self.url_to_key(url)
if bypass_cache:
raise ValueError ('Cache bypass doesn\'t make sense in offline mode.')
if self.has_key(key):
_, contents = self.get_values(key)
return zlib.decompress(contents)
raise urllib2.URLError(OSError('not in cache'))
def download_online (self, url, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, bypass_cache=False):
"""
Downloads contents from the URL, using the internal cache if applicable.
"""
contents = None
key = self.url_to_key(url)
request = urllib2.Request(url)
request.add_header('User-agent', self.user_string)
if not bypass_cache and self.has_key(key):
lastmodified, contents = self.get_values(key)
request.add_header('if-modified-since', lastmodified)
try:
connection = urllib2.urlopen(request, timeout=timeout)
lastmodified = connection.headers.get('last-modified')
contents = connection.read()
connection.close()
except urllib2.HTTPError as e:
if e.code == 304:
logger.debug (
'cache hit %r not modified since %s',
key, lastmodified
)
return zlib.decompress(contents)
raise
if not bypass_cache:
self.set_values(key, lastmodified, zlib.compress(contents))
return contents
def dump (self, outfile):
"""
Writes internal cache to outfile.
outfile may be a filename or an open file-like object.
"""
if isinstance(outfile, str):
outfile = open(outfile, 'wb')
pickle.dump(self.cache, outfile, protocol=-1)
def get_values (self, key):
"""
Returns the values referred to by key in a thread-safe manner.
"""
with self.cache_lock:
logger.debug('getting %r from cache', key)
return self.cache[key]
def has_key (self, key):
"""
Returns if the cache contains entries for key.
"""
logger.debug('looking for %r in cache', key)
return key in self.cache
def keys (self):
"""
Makes a copy of the list of keys and returns it.
"""
return self.cache.keys()
def load (self, infile):
"""
Loads internal cache from infile.
infile may be a filename or an open file-like object.
"""
try:
if isinstance(infile, str):
infile = open(infile, 'rb')
self.cache = pickle.load(infile)
except IOError:
self.cache = {}
def url_to_key (self, url):
"""
Takes an url and returns a key for use in the cache.
"""
return urlparse.urlparse(url).path
def remove_key (self, key):
"""
Removes an entry from the cache, not thread-safe.
"""
del self.cache[key]
def set_offline_mode (self):
"""
Sets offline mode for the webcache.
"""
self.downloader = self.download_offline
def set_online_mode (self):
"""
Sets online mode for the webcache.
"""
self.downloader = self.download_online
def set_values (self, key, *args):
"""
Sets values in a thread-safe manner.
"""
with self.cache_lock:
logger.debug('storing %r in cache', key)
self.cache[key] = args
| mit | -3,845,465,332,784,672,000 | 27.437788 | 96 | 0.573327 | false |
AAROC/invenio | invenio/legacy/miscutil/dbdump.py | 6 | 16241 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2012, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio DB dumper.
"""
import os
import re
import time
from invenio.config import CFG_LOGDIR, CFG_PATH_MYSQL, CFG_PATH_GZIP, \
CFG_DATABASE_HOST, \
CFG_DATABASE_USER, \
CFG_DATABASE_PASS, \
CFG_DATABASE_NAME, \
CFG_DATABASE_PORT, \
CFG_DATABASE_SLAVE
from invenio.legacy.dbquery import get_connection_for_dump_on_slave, run_sql, \
get_table_names as db_get_table_names
from invenio.legacy.bibsched.bibtask import task_init, \
write_message, \
task_set_option, \
task_get_option, \
task_update_progress, \
task_get_task_param, \
task_low_level_submission
from invenio.utils.shell import run_shell_command, escape_shell_arg
from invenio.celery.utils import (
enable_queue,
get_queues,
suspend_queues,
)
def get_table_names(value):
"""
Get table names of the tables matching the given regular expressions
@param option: list of regular expressions
@return: list of strings
"""
rex = re.compile(value)
return [row[0] for row in db_get_table_names() if rex.search(row[0])]
def _delete_old_dumps(dirname, filename, number_to_keep):
"""
Look for files in DIRNAME directory starting with FILENAME
pattern. Delete up to NUMBER_TO_KEEP files (when sorted
alphabetically, which is equal to sorted by date). Useful to
prune old dump files.
"""
files = [x for x in os.listdir(dirname) if x.startswith(filename)]
files.sort()
for afile in files[:-number_to_keep]:
write_message("... deleting %s" % dirname + os.sep + afile)
os.remove(dirname + os.sep + afile)
def check_slave_is_up(connection=None):
"""Raise an StandardError in case the slave is not correctly up."""
if connection is None:
connection = get_connection_for_dump_on_slave()
# FIXME compatibility with postgresql
res = run_sql("SHOW SLAVE STATUS", with_dict=True, connection=connection)
if res[0]['Slave_IO_Running'] != 'Yes':
raise StandardError("Slave_IO_Running is not set to 'Yes'")
if res[0]['Slave_SQL_Running'] != 'Yes':
raise StandardError("Slave_SQL_Running is not set to 'Yes'")
def check_slave_is_down(connection=None):
"""Raise an StandardError in case the slave is not correctly down."""
if connection is None:
connection = get_connection_for_dump_on_slave()
# FIXME compatibility with postgresql
res = run_sql("SHOW SLAVE STATUS", with_dict=True, connection=connection)
if res[0]['Slave_SQL_Running'] != 'No':
raise StandardError("Slave_SQL_Running is not set to 'No'")
def detach_slave(connection=None):
"""Detach the slave."""
if connection is None:
connection = get_connection_for_dump_on_slave()
# FIXME compatibility with postgresql
run_sql("STOP SLAVE SQL_THREAD", connection=connection)
check_slave_is_down(connection)
def attach_slave(connection=None):
"""Attach the slave."""
if connection is None:
connection = get_connection_for_dump_on_slave()
# FIXME compatibility with postgresql
run_sql("START SLAVE", connection=connection)
check_slave_is_up(connection)
def check_slave_is_in_consistent_state(connection=None):
"""
Check if the slave is already aware that dbdump task is running.
dbdump being a monotask, guarantee that no other task is currently
running and it's hence safe to detach the slave and start the
actual dump.
"""
if connection is None:
connection = get_connection_for_dump_on_slave()
i = 0
## Let's take the current status of dbdump (e.g. RUNNING, ABOUT TO STOP, etc.)...
current_status = run_sql("""SELECT status FROM "schTASK" WHERE id=%s""", (task_get_task_param('task_id'), ))[0][0]
while True:
if i == 10:
## Timeout!!
raise StandardError("The slave seems not to pick up with the master")
## ...and let's see if it matches with what the slave sees.
if run_sql("""SELECT status FROM "schTASK" WHERE id=%s AND status=%s""", (task_get_task_param('task_id'), current_status), connection=connection):
## Bingo!
return
time.sleep(3)
i += 1
def dump_database(dump_path, host=CFG_DATABASE_HOST, port=CFG_DATABASE_PORT, \
user=CFG_DATABASE_USER, passw=CFG_DATABASE_PASS, \
name=CFG_DATABASE_NAME, params=None, compress=False, \
ignore_tables=None):
"""
Dump Invenio database into SQL file located at DUMP_PATH.
Will perform the command to mysqldump with the given host configuration
and user credentials.
Optional mysqldump parameters can also be passed. Otherwise, a default
set of parameters will be used.
@param dump_path: path on the filesystem to save the dump to.
@type dump_path: string
@param host: hostname of mysql database node to connect to.
@type host: string
@param port: port of mysql database node to connect to
@type port: string
@param user: username to connect with
@type user: string
@param passw: password to connect to with
@type passw: string
@param name: name of mysql database node to dump
@type name: string
@param params: command line parameters to pass to mysqldump. Optional.
@type params: string
@param compress: should the dump be compressed through gzip?
@type compress: bool
@param ignore_tables: list of tables to ignore in the dump
@type ignore: list of string
"""
write_message("... writing %s" % (dump_path,))
partial_dump_path = dump_path + ".part"
# Is mysqldump installed or in the right path?
cmd_prefix = CFG_PATH_MYSQL + 'dump'
if not os.path.exists(cmd_prefix):
raise StandardError("%s is not installed." % (cmd_prefix))
if not params:
# No parameters set, lets use the default ones.
params = " --skip-opt --add-drop-table --add-locks --create-options" \
" --quick --extended-insert --set-charset --disable-keys" \
" --lock-tables=false --max_allowed_packet=2G "
if ignore_tables:
params += " ".join([escape_shell_arg("--ignore-table=%s.%s" % (CFG_DATABASE_NAME, table)) for table in ignore_tables])
dump_cmd = "%s %s " \
" --host=%s --port=%s --user=%s --password=%s %s" % \
(cmd_prefix, \
params, \
escape_shell_arg(host), \
escape_shell_arg(str(port)), \
escape_shell_arg(user), \
escape_shell_arg(passw), \
escape_shell_arg(name))
if compress:
dump_cmd = "%s | %s -cf; exit ${PIPESTATUS[0]}" % \
(dump_cmd, \
CFG_PATH_GZIP)
dump_cmd = "bash -c %s" % (escape_shell_arg(dump_cmd),)
write_message(dump_cmd, verbose=2)
exit_code, stdout, stderr = run_shell_command(dump_cmd, None, partial_dump_path)
if exit_code:
raise StandardError("ERROR: mysqldump exit code is %s. stderr: %s stdout: %s" % \
(repr(exit_code), \
repr(stderr), \
repr(stdout)))
else:
os.rename(partial_dump_path, dump_path)
write_message("... completed writing %s" % (dump_path,))
def _dbdump_elaborate_submit_param(key, value, dummyopts, dummyargs):
"""
Elaborate task submission parameter. See bibtask's
task_submit_elaborate_specific_parameter_fnc for help.
"""
if key in ('-n', '--number'):
try:
task_set_option('number', int(value))
except ValueError:
raise StandardError("ERROR: Number '%s' is not integer." % (value,))
elif key in ('-o', '--output'):
if os.path.isdir(value):
task_set_option('output', value)
else:
raise StandardError("ERROR: Output '%s' is not a directory." % \
(value,))
elif key in ('--params',):
task_set_option('params', value)
elif key in ('--compress',):
if not CFG_PATH_GZIP or (CFG_PATH_GZIP and not os.path.exists(CFG_PATH_GZIP)):
raise StandardError("ERROR: No valid gzip path is defined.")
task_set_option('compress', True)
elif key in ('-S', '--slave'):
if value:
task_set_option('slave', value)
else:
if not CFG_DATABASE_SLAVE:
raise StandardError("ERROR: No slave defined.")
task_set_option('slave', CFG_DATABASE_SLAVE)
elif key in ('--dump-on-slave-helper', ):
task_set_option('dump_on_slave_helper_mode', True)
elif key in ('--ignore-tables',):
try:
re.compile(value)
task_set_option("ignore_tables", value)
except re.error:
raise StandardError, "ERROR: Passed string: '%s' is not a valid regular expression." % value
elif key in ('--disable-workers', ):
task_set_option('disable_workers', True)
else:
return False
return True
def _dbdump_run_task_core():
"""
Run DB dumper core stuff.
Note: do not use task_can_sleep() stuff here because we don't want
other tasks to interrupt us while we are dumping the DB content.
"""
# read params:
host = CFG_DATABASE_HOST
port = CFG_DATABASE_PORT
connection = None
active_queues = []
try:
if task_get_option('slave') and not task_get_option('dump_on_slave_helper_mode'):
connection = get_connection_for_dump_on_slave()
write_message("Dump on slave requested")
write_message("... checking if slave is well up...")
check_slave_is_up(connection)
write_message("... checking if slave is in consistent state...")
check_slave_is_in_consistent_state(connection)
write_message("... detaching slave database...")
detach_slave(connection)
write_message("... scheduling dump on slave helper...")
helper_arguments = []
if task_get_option("number"):
helper_arguments += ["--number", str(task_get_option("number"))]
if task_get_option("output"):
helper_arguments += ["--output", str(task_get_option("output"))]
if task_get_option("params"):
helper_arguments += ["--params", str(task_get_option("params"))]
if task_get_option("ignore_tables"):
helper_arguments += ["--ignore-tables", str(task_get_option("ignore_tables"))]
if task_get_option("compress"):
helper_arguments += ["--compress"]
if task_get_option("slave"):
helper_arguments += ["--slave", str(task_get_option("slave"))]
helper_arguments += ['-N', 'slavehelper', '--dump-on-slave-helper']
task_id = task_low_level_submission('dbdump', task_get_task_param('user'), '-P4', *helper_arguments)
write_message("Slave scheduled with ID %s" % task_id)
task_update_progress("DONE")
return True
elif task_get_option('dump_on_slave_helper_mode'):
write_message("Dumping on slave mode")
connection = get_connection_for_dump_on_slave()
write_message("... checking if slave is well down...")
check_slave_is_down(connection)
host = CFG_DATABASE_SLAVE
task_update_progress("Reading parameters")
write_message("Reading parameters started")
output_dir = task_get_option('output', CFG_LOGDIR)
output_num = task_get_option('number', 5)
params = task_get_option('params', None)
compress = task_get_option('compress', False)
slave = task_get_option('slave', False)
ignore_tables = task_get_option('ignore_tables', None)
if ignore_tables:
ignore_tables = get_table_names(ignore_tables)
else:
ignore_tables = None
output_file_suffix = task_get_task_param('task_starting_time')
output_file_suffix = output_file_suffix.replace(' ', '_') + '.sql'
if compress:
output_file_suffix = "%s.gz" % (output_file_suffix,)
write_message("Reading parameters ended")
if task_get_option('disable_workers'):
active_queues = get_queues()
if active_queues:
write_message("Suspend workers and wait for any running tasks to complete")
suspend_queues(active_queues)
write_message("Workers suspended")
# make dump:
task_update_progress("Dumping database")
write_message("Database dump started")
if slave:
output_file_prefix = 'slave-%s-dbdump-' % (CFG_DATABASE_NAME,)
else:
output_file_prefix = '%s-dbdump-' % (CFG_DATABASE_NAME,)
output_file = output_file_prefix + output_file_suffix
dump_path = output_dir + os.sep + output_file
dump_database(dump_path, \
host=host,
port=port,
params=params, \
compress=compress, \
ignore_tables=ignore_tables)
write_message("Database dump ended")
finally:
for queue in active_queues:
enable_queue(queue)
if connection and task_get_option('dump_on_slave_helper_mode'):
write_message("Reattaching slave")
attach_slave(connection)
# prune old dump files:
task_update_progress("Pruning old dump files")
write_message("Pruning old dump files started")
_delete_old_dumps(output_dir, output_file_prefix, output_num)
write_message("Pruning old dump files ended")
# we are done:
task_update_progress("Done.")
return True
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='rundbdump',
authorization_msg="DB Dump Task Submission",
help_specific_usage="""\
-o, --output=DIR Output directory. [default=%s]
-n, --number=NUM Keep up to NUM previous dump files. [default=5]
--params=PARAMS Specify your own mysqldump parameters. Optional.
--compress Compress dump directly into gzip.
-S, --slave=HOST Perform the dump from a slave, if no host use CFG_DATABASE_SLAVE.
--ignore-tables=regex Ignore tables matching the given regular expression
--disable-workers Disable any task queue workers while dumping.
Examples:
$ dbdump --ignore-tables '^(idx|rnk)'
$ dbdump -n3 -o/tmp -s1d -L 02:00-04:00
""" % CFG_LOGDIR,
specific_params=("n:o:p:S:",
["number=", "output=", "params=", "slave=",
"compress", 'ignore-tables=',
"dump-on-slave-helper", "disable-workers"]),
task_submit_elaborate_specific_parameter_fnc=_dbdump_elaborate_submit_param,
task_run_fnc=_dbdump_run_task_core)
if __name__ == '__main__':
main()
| gpl-2.0 | 5,608,615,535,621,905,000 | 39.806533 | 154 | 0.595899 | false |
mezz64/home-assistant | homeassistant/components/alarmdecoder/const.py | 15 | 1442 | """Constants for the AlarmDecoder component."""
CONF_ALT_NIGHT_MODE = "alt_night_mode"
CONF_AUTO_BYPASS = "auto_bypass"
CONF_CODE_ARM_REQUIRED = "code_arm_required"
CONF_DEVICE_BAUD = "device_baudrate"
CONF_DEVICE_PATH = "device_path"
CONF_RELAY_ADDR = "zone_relayaddr"
CONF_RELAY_CHAN = "zone_relaychan"
CONF_ZONE_LOOP = "zone_loop"
CONF_ZONE_NAME = "zone_name"
CONF_ZONE_NUMBER = "zone_number"
CONF_ZONE_RFID = "zone_rfid"
CONF_ZONE_TYPE = "zone_type"
DATA_AD = "alarmdecoder"
DATA_REMOVE_STOP_LISTENER = "rm_stop_listener"
DATA_REMOVE_UPDATE_LISTENER = "rm_update_listener"
DATA_RESTART = "restart"
DEFAULT_ALT_NIGHT_MODE = False
DEFAULT_AUTO_BYPASS = False
DEFAULT_CODE_ARM_REQUIRED = True
DEFAULT_DEVICE_BAUD = 115200
DEFAULT_DEVICE_HOST = "alarmdecoder"
DEFAULT_DEVICE_PATH = "/dev/ttyUSB0"
DEFAULT_DEVICE_PORT = 10000
DEFAULT_ZONE_TYPE = "window"
DEFAULT_ARM_OPTIONS = {
CONF_ALT_NIGHT_MODE: DEFAULT_ALT_NIGHT_MODE,
CONF_AUTO_BYPASS: DEFAULT_AUTO_BYPASS,
CONF_CODE_ARM_REQUIRED: DEFAULT_CODE_ARM_REQUIRED,
}
DEFAULT_ZONE_OPTIONS = {}
DOMAIN = "alarmdecoder"
OPTIONS_ARM = "arm_options"
OPTIONS_ZONES = "zone_options"
PROTOCOL_SERIAL = "serial"
PROTOCOL_SOCKET = "socket"
SIGNAL_PANEL_MESSAGE = "alarmdecoder.panel_message"
SIGNAL_REL_MESSAGE = "alarmdecoder.rel_message"
SIGNAL_RFX_MESSAGE = "alarmdecoder.rfx_message"
SIGNAL_ZONE_FAULT = "alarmdecoder.zone_fault"
SIGNAL_ZONE_RESTORE = "alarmdecoder.zone_restore"
| apache-2.0 | 7,394,525,429,119,247,000 | 28.428571 | 54 | 0.744105 | false |
numenta/htmresearch | htmresearch/frameworks/union_temporal_pooling/activation/excite_functions/excite_functions_all.py | 9 | 3762 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy
import matplotlib.pyplot as plt
from excite_function_base import ExciteFunctionBase
class LogisticExciteFunction(ExciteFunctionBase):
"""
Implementation of a logistic activation function for activation updating.
Specifically, the function has the following form:
f(x) = (maxValue - minValue) / (1 + exp(-steepness * (x - xMidpoint) ) ) + minValue
Note: The excitation rate is linear. The activation function is
logistic.
"""
def __init__(self, xMidpoint=5, minValue=10, maxValue=20, steepness=1):
"""
@param xMidpoint: Controls where function output is half of 'maxValue,'
i.e. f(xMidpoint) = maxValue / 2
@param minValue: Minimum value of the function
@param maxValue: Controls the maximum value of the function's range
@param steepness: Controls the steepness of the "middle" part of the
curve where output values begin changing rapidly.
Must be a non-zero value.
"""
assert steepness != 0
self._xMidpoint = xMidpoint
self._maxValue = maxValue
self._minValue = minValue
self._steepness = steepness
def excite(self, currentActivation, inputs):
"""
Increases current activation by amount.
@param currentActivation (numpy array) Current activation levels for each cell
@param inputs (numpy array) inputs for each cell
"""
currentActivation += self._minValue + (self._maxValue - self._minValue) / (
1 + numpy.exp(-self._steepness * (inputs - self._xMidpoint)))
return currentActivation
def plot(self):
"""
plot the activation function
"""
plt.ion()
plt.show()
x = numpy.linspace(0, 15, 100)
y = numpy.zeros(x.shape)
y = self.excite(y, x)
plt.plot(x, y)
plt.xlabel('Input')
plt.ylabel('Persistence')
plt.title('Sigmoid Activation Function')
class FixedExciteFunction(ExciteFunctionBase):
"""
Implementation of a simple fixed excite function
The function reset the activation level to a fixed amount
"""
def __init__(self, targetExcLevel=10.0):
"""
"""
self._targetExcLevel = targetExcLevel
def excite(self, currentActivation, inputs):
"""
Increases current activation by a fixed amount.
@param currentActivation (numpy array) Current activation levels for each cell
@param inputs (numpy array) inputs for each cell
"""
currentActivation += self._targetExcLevel
return currentActivation
def plot(self):
"""
plot the activation function
"""
plt.ion()
plt.show()
x = numpy.linspace(0, 15, 100)
y = numpy.zeros(x.shape)
y = self.excite(y, x)
plt.plot(x, y)
plt.xlabel('Input')
plt.ylabel('Persistence')
| agpl-3.0 | 4,409,350,432,068,811,300 | 30.613445 | 86 | 0.652578 | false |
ApoorvaJ/BlenderSFM | osmpmvs/__init__.py | 1 | 4313 | import logging
import sys, os, getopt, tempfile, subprocess, shutil
# service function: get path of an executable (.exe suffix is added if we are on Windows)
def getExecPath(dir, fileName):
if sys.platform == "win32": fileName = "%s.exe" % fileName
return os.path.join(dir, fileName)
distrPath = os.path.dirname( os.path.abspath(sys.argv[0]) )
pmvsExecutable = getExecPath(distrPath, "software/pmvs/bin/pmvs2")
bundlerBinPath = ''
if sys.platform == "win32": bundlerBinPath = os.path.join(distrPath, "software/bundler/bin/")
else: bundlerBinPath = os.path.join(distrPath, "software/bundler/bin/")
bundler2PmvsExecutable = getExecPath(bundlerBinPath, "Bundle2PMVS")
RadialUndistordExecutable = getExecPath(bundlerBinPath, "RadialUndistort")
Bundle2VisExecutable = getExecPath(bundlerBinPath, "Bundle2Vis")
bundlerListFileName = "list.txt"
commandLineLongFlags = ["bundlerOutputPath="]
class OsmPmvs():
currentDir = ""
workDir = ""
# value of command line argument --bundlerOutputPath=<..>
bundleOutArg = ""
def __init__(self):
self.parseCommandLineFlags()
# save current directory (i.e. from where RunBundler.py is called)
self.currentDir = os.getcwd()
# create a working directory
self.workDir = self.bundleOutArg
logging.info("Working directory created: "+self.workDir)
if not (os.path.isdir(self.bundleOutArg) or os.path.isfile(self.bundleOutArg)):
raise Exception, "'%s' is neither directory nor a file name" % self.bundleOutArg
def parseCommandLineFlags(self):
try:
opts, args = getopt.getopt(sys.argv[1:], "", commandLineLongFlags)
except getopt.GetoptError:
self.printHelpExit()
for opt,val in opts:
if opt=="--bundlerOutputPath":
self.bundleOutArg = val
elif opt=="--help":
self.printHelpExit()
if self.bundleOutArg=="": self.printHelpExit()
def doBundle2PMVS(self):
# just run Bundle2PMVS here
logging.info("\nPerforming Bundler2PMVS conversion...")
os.chdir(self.workDir)
os.mkdir("pmvs")
# Create directory structure
os.mkdir("pmvs/txt")
os.mkdir("pmvs/visualize")
os.mkdir("pmvs/models")
#$BASE_PATH/bin32/Bundle2PMVS.exe list.txt bundle/bundle.out
print "Running Bundle2PMVS to generate geometry and converted camera file"
subprocess.call([bundler2PmvsExecutable, "list.txt", "bundle/bundle.out"])
# Apply radial undistortion to the images
print "Running RadialUndistort to undistort input images"
subprocess.call([RadialUndistordExecutable, "list.txt", "bundle/bundle.out", "pmvs"])
print "Running Bundle2Vis to generate vis.dat"
subprocess.call([Bundle2VisExecutable, "pmvs/bundle.rd.out", "pmvs/vis.dat"])
os.chdir(os.path.join(self.workDir,"pmvs"))
#Rename all the files to the correct name
undistortTextFile = open("list.rd.txt", "r")
imagesStrings = undistortTextFile.readlines()
print "Move files in the correct directory"
cpt = 0
for imageString in imagesStrings:
image = imageString.split(".")
# sh => mv pmvs/et001.rd.jpg pmvs/visualize/00000000.jpg
shutil.copy(image[0]+".rd.jpg", "visualize/%08d.jpg"%cpt)
# sh => mv pmvs/00000000.txt pmvs/txt/
shutil.copy("%08d.txt"%cpt, "txt/%08d.txt"%cpt)
os.remove(image[0]+".rd.jpg")
os.remove("%08d.txt"%cpt)
cpt+=1
undistortTextFile.close()
logging.info("Finished!")
def doPMVS(self):
print "Run PMVS2 : %s " % pmvsExecutable
subprocess.call([pmvsExecutable, "./", "pmvs_options.txt"])
def printHelpExit(self):
self.printHelp()
sys.exit(2)
def openResult(self):
if sys.platform == "win32": subprocess.call(["explorer", self.workDir])
else: print "See the results in the '%s' directory" % self.workDir
def printHelp(self):
print "Error"
helpFile = open(os.path.join(distrPath, "osmpmvs/help.txt"), "r")
print helpFile.read()
helpFile.close()
| gpl-3.0 | 3,143,918,996,373,519,400 | 34.941667 | 93 | 0.632275 | false |
littlstar/chromium.src | ppapi/generators/idl_c_proto.py | 15 | 24493 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generator for C style prototypes and definitions """
import glob
import os
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_parser import ParseFiles
Option('cgen_debug', 'Debug generate.')
class CGenError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
def CommentLines(lines, tabs=0):
# Generate a C style comment block by prepending the block with '<tab>/*'
# and adding a '<tab> *' per line.
tab = ' ' * tabs
out = '%s/*' % tab + ('\n%s *' % tab).join(lines)
# Add a terminating ' */' unless the last line is blank which would mean it
# already has ' *'
if not lines[-1]:
out += '/\n'
else:
out += ' */\n'
return out
def Comment(node, prefix=None, tabs=0):
# Generate a comment block from the provided Comment node.
comment = node.GetName()
lines = comment.split('\n')
# If an option prefix is provided, then prepend that to the comment
# for this node.
if prefix:
prefix_lines = prefix.split('\n')
# If both the prefix and comment start with a blank line ('*') remove
# the extra one.
if prefix_lines[0] == '*' and lines[0] == '*':
lines = prefix_lines + lines[1:]
else:
lines = prefix_lines + lines;
return CommentLines(lines, tabs)
def GetNodeComments(node, tabs=0):
# Generate a comment block joining all comment nodes which are children of
# the provided node.
comment_txt = ''
for doc in node.GetListOf('Comment'):
comment_txt += Comment(doc, tabs=tabs)
return comment_txt
class CGen(object):
# TypeMap
#
# TypeMap modifies how an object is stored or passed, for example pointers
# are passed as 'const' if they are 'in' parameters, and structures are
# preceeded by the keyword 'struct' as well as using a pointer.
#
TypeMap = {
'Array': {
'in': 'const %s',
'inout': '%s',
'out': '%s*',
'store': '%s',
'return': '%s',
'ref': '%s*'
},
'Callspec': {
'in': '%s',
'inout': '%s',
'out': '%s',
'store': '%s',
'return': '%s'
},
'Enum': {
'in': '%s',
'inout': '%s*',
'out': '%s*',
'store': '%s',
'return': '%s'
},
'Interface': {
'in': 'const %s*',
'inout': '%s*',
'out': '%s**',
'return': '%s*',
'store': '%s*'
},
'Struct': {
'in': 'const %s*',
'inout': '%s*',
'out': '%s*',
'return': ' %s*',
'store': '%s',
'ref': '%s*'
},
'blob_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': '%s',
'store': '%s'
},
'mem_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': '%s',
'store': '%s'
},
'mem_ptr_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': '%s',
'store': '%s'
},
'str_t': {
'in': 'const %s',
'inout': '%s',
'out': '%s',
'return': 'const %s',
'store': '%s'
},
'cstr_t': {
'in': '%s',
'inout': '%s*',
'out': '%s*',
'return': '%s',
'store': '%s'
},
'TypeValue': {
'in': '%s',
'inout': '%s*',
'out': '%s*',
'return': '%s',
'store': '%s'
},
}
#
# RemapName
#
# A diction array of PPAPI types that are converted to language specific
# types before being returned by by the C generator
#
RemapName = {
'blob_t': 'void**',
'float_t': 'float',
'double_t': 'double',
'handle_t': 'int',
'mem_t': 'void*',
'mem_ptr_t': 'void**',
'str_t': 'char*',
'cstr_t': 'const char*',
'interface_t' : 'const void*'
}
def __init__(self):
self.dbg_depth = 0
#
# Debug Logging functions
#
def Log(self, txt):
if not GetOption('cgen_debug'): return
tabs = ' ' * self.dbg_depth
print '%s%s' % (tabs, txt)
def LogEnter(self, txt):
if txt: self.Log(txt)
self.dbg_depth += 1
def LogExit(self, txt):
self.dbg_depth -= 1
if txt: self.Log(txt)
def GetDefine(self, name, value):
out = '#define %s %s' % (name, value)
if len(out) > 80:
out = '#define %s \\\n %s' % (name, value)
return '%s\n' % out
#
# Interface strings
#
def GetMacroHelper(self, node):
macro = node.GetProperty('macro')
if macro: return macro
name = node.GetName()
name = name.upper()
return "%s_INTERFACE" % name
def GetInterfaceMacro(self, node, version = None):
name = self.GetMacroHelper(node)
if version is None:
return name
return '%s_%s' % (name, str(version).replace('.', '_'))
def GetInterfaceString(self, node, version = None):
# If an interface name is specified, use that
name = node.GetProperty('iname')
if not name:
# Otherwise, the interface name is the object's name
# With '_Dev' replaced by '(Dev)' if it's a Dev interface.
name = node.GetName()
if name.endswith('_Dev'):
name = '%s(Dev)' % name[:-4]
if version is None:
return name
return "%s;%s" % (name, version)
#
# Return the array specification of the object.
#
def GetArraySpec(self, node):
assert(node.cls == 'Array')
fixed = node.GetProperty('FIXED')
if fixed:
return '[%s]' % fixed
else:
return '[]'
#
# GetTypeName
#
# For any valid 'typed' object such as Member or Typedef
# the typenode object contains the typename
#
# For a given node return the type name by passing mode.
#
def GetTypeName(self, node, release, prefix=''):
self.LogEnter('GetTypeName of %s rel=%s' % (node, release))
# For Members, Params, and Typedefs get the type it refers to otherwise
# the node in question is it's own type (struct, union etc...)
if node.IsA('Member', 'Param', 'Typedef'):
typeref = node.GetType(release)
else:
typeref = node
if typeref is None:
node.Error('No type at release %s.' % release)
raise CGenError('No type for %s' % node)
# If the type is a (BuiltIn) Type then return it's name
# remapping as needed
if typeref.IsA('Type'):
name = CGen.RemapName.get(typeref.GetName(), None)
if name is None: name = typeref.GetName()
name = '%s%s' % (prefix, name)
# For Interfaces, use the name + version
elif typeref.IsA('Interface'):
rel = typeref.first_release[release]
name = 'struct %s%s' % (prefix, self.GetStructName(typeref, rel, True))
# For structures, preceed with 'struct' or 'union' as appropriate
elif typeref.IsA('Struct'):
if typeref.GetProperty('union'):
name = 'union %s%s' % (prefix, typeref.GetName())
else:
name = 'struct %s%s' % (prefix, typeref.GetName())
# If it's an enum, or typedef then return the Enum's name
elif typeref.IsA('Enum', 'Typedef'):
if not typeref.LastRelease(release):
first = node.first_release[release]
ver = '_' + node.GetVersion(first).replace('.','_')
else:
ver = ''
# The enum may have skipped having a typedef, we need prefix with 'enum'.
if typeref.GetProperty('notypedef'):
name = 'enum %s%s%s' % (prefix, typeref.GetName(), ver)
else:
name = '%s%s%s' % (prefix, typeref.GetName(), ver)
else:
raise RuntimeError('Getting name of non-type %s.' % node)
self.LogExit('GetTypeName %s is %s' % (node, name))
return name
#
# GetRootType
#
# For a given node return basic type of that object. This is
# either a 'Type', 'Callspec', or 'Array'
#
def GetRootTypeMode(self, node, release, mode):
self.LogEnter('GetRootType of %s' % node)
# If it has an array spec, then treat it as an array regardless of type
if node.GetOneOf('Array'):
rootType = 'Array'
# Or if it has a callspec, treat it as a function
elif node.GetOneOf('Callspec'):
rootType, mode = self.GetRootTypeMode(node.GetType(release), release,
'return')
# If it's a plain typedef, try that object's root type
elif node.IsA('Member', 'Param', 'Typedef'):
rootType, mode = self.GetRootTypeMode(node.GetType(release),
release, mode)
# If it's an Enum, then it's normal passing rules
elif node.IsA('Enum'):
rootType = node.cls
# If it's an Interface or Struct, we may be passing by value
elif node.IsA('Interface', 'Struct'):
if mode == 'return':
if node.GetProperty('returnByValue'):
rootType = 'TypeValue'
else:
rootType = node.cls
else:
if node.GetProperty('passByValue'):
rootType = 'TypeValue'
else:
rootType = node.cls
# If it's an Basic Type, check if it's a special type
elif node.IsA('Type'):
if node.GetName() in CGen.TypeMap:
rootType = node.GetName()
else:
rootType = 'TypeValue'
else:
raise RuntimeError('Getting root type of non-type %s.' % node)
self.LogExit('RootType is "%s"' % rootType)
return rootType, mode
def GetTypeByMode(self, node, release, mode):
self.LogEnter('GetTypeByMode of %s mode=%s release=%s' %
(node, mode, release))
name = self.GetTypeName(node, release)
ntype, mode = self.GetRootTypeMode(node, release, mode)
out = CGen.TypeMap[ntype][mode] % name
self.LogExit('GetTypeByMode %s = %s' % (node, out))
return out
# Get the passing mode of the object (in, out, inout).
def GetParamMode(self, node):
self.Log('GetParamMode for %s' % node)
if node.GetProperty('in'): return 'in'
if node.GetProperty('out'): return 'out'
if node.GetProperty('inout'): return 'inout'
return 'return'
#
# GetComponents
#
# Returns the signature components of an object as a tuple of
# (rtype, name, arrays, callspec) where:
# rtype - The store or return type of the object.
# name - The name of the object.
# arrays - A list of array dimensions as [] or [<fixed_num>].
# args - None if not a function, otherwise a list of parameters.
#
def GetComponents(self, node, release, mode):
self.LogEnter('GetComponents mode %s for %s %s' % (mode, node, release))
# Generate passing type by modifying root type
rtype = self.GetTypeByMode(node, release, mode)
if node.IsA('Enum', 'Interface', 'Struct'):
rname = node.GetName()
else:
rname = node.GetType(release).GetName()
if rname in CGen.RemapName:
rname = CGen.RemapName[rname]
if '%' in rtype:
rtype = rtype % rname
name = node.GetName()
arrayspec = [self.GetArraySpec(array) for array in node.GetListOf('Array')]
callnode = node.GetOneOf('Callspec')
if callnode:
callspec = []
for param in callnode.GetListOf('Param'):
if not param.IsRelease(release):
continue
mode = self.GetParamMode(param)
ptype, pname, parray, pspec = self.GetComponents(param, release, mode)
callspec.append((ptype, pname, parray, pspec))
else:
callspec = None
self.LogExit('GetComponents: %s, %s, %s, %s' %
(rtype, name, arrayspec, callspec))
return (rtype, name, arrayspec, callspec)
def Compose(self, rtype, name, arrayspec, callspec, prefix, func_as_ptr,
include_name, unsized_as_ptr):
self.LogEnter('Compose: %s %s' % (rtype, name))
arrayspec = ''.join(arrayspec)
# Switch unsized array to a ptr. NOTE: Only last element can be unsized.
if unsized_as_ptr and arrayspec[-2:] == '[]':
prefix += '*'
arrayspec=arrayspec[:-2]
if not include_name:
name = prefix + arrayspec
else:
name = prefix + name + arrayspec
if callspec is None:
out = '%s %s' % (rtype, name)
else:
params = []
for ptype, pname, parray, pspec in callspec:
params.append(self.Compose(ptype, pname, parray, pspec, '', True,
include_name=True,
unsized_as_ptr=unsized_as_ptr))
if func_as_ptr:
name = '(*%s)' % name
if not params:
params = ['void']
out = '%s %s(%s)' % (rtype, name, ', '.join(params))
self.LogExit('Exit Compose: %s' % out)
return out
#
# GetSignature
#
# Returns the 'C' style signature of the object
# prefix - A prefix for the object's name
# func_as_ptr - Formats a function as a function pointer
# include_name - If true, include member name in the signature.
# If false, leave it out. In any case, prefix is always
# included.
# include_version - if True, include version in the member name
#
def GetSignature(self, node, release, mode, prefix='', func_as_ptr=True,
include_name=True, include_version=False):
self.LogEnter('GetSignature %s %s as func=%s' %
(node, mode, func_as_ptr))
rtype, name, arrayspec, callspec = self.GetComponents(node, release, mode)
if include_version:
name = self.GetStructName(node, release, True)
# If not a callspec (such as a struct) use a ptr instead of []
unsized_as_ptr = not callspec
out = self.Compose(rtype, name, arrayspec, callspec, prefix,
func_as_ptr, include_name, unsized_as_ptr)
self.LogExit('Exit GetSignature: %s' % out)
return out
# Define a Typedef.
def DefineTypedef(self, node, releases, prefix='', comment=False):
__pychecker__ = 'unusednames=comment'
build_list = node.GetUniqueReleases(releases)
out = 'typedef %s;\n' % self.GetSignature(node, build_list[-1], 'return',
prefix, True,
include_version=False)
# Version mangle any other versions
for index, rel in enumerate(build_list[:-1]):
out += '\n'
out += 'typedef %s;\n' % self.GetSignature(node, rel, 'return',
prefix, True,
include_version=True)
self.Log('DefineTypedef: %s' % out)
return out
# Define an Enum.
def DefineEnum(self, node, releases, prefix='', comment=False):
__pychecker__ = 'unusednames=comment,releases'
self.LogEnter('DefineEnum %s' % node)
name = '%s%s' % (prefix, node.GetName())
notypedef = node.GetProperty('notypedef')
unnamed = node.GetProperty('unnamed')
if unnamed:
out = 'enum {'
elif notypedef:
out = 'enum %s {' % name
else:
out = 'typedef enum {'
enumlist = []
for child in node.GetListOf('EnumItem'):
value = child.GetProperty('VALUE')
comment_txt = GetNodeComments(child, tabs=1)
if value:
item_txt = '%s%s = %s' % (prefix, child.GetName(), value)
else:
item_txt = '%s%s' % (prefix, child.GetName())
enumlist.append('%s %s' % (comment_txt, item_txt))
self.LogExit('Exit DefineEnum')
if unnamed or notypedef:
out = '%s\n%s\n};\n' % (out, ',\n'.join(enumlist))
else:
out = '%s\n%s\n} %s;\n' % (out, ',\n'.join(enumlist), name)
return out
def DefineMember(self, node, releases, prefix='', comment=False):
__pychecker__ = 'unusednames=prefix,comment'
release = releases[0]
self.LogEnter('DefineMember %s' % node)
if node.GetProperty('ref'):
out = '%s;' % self.GetSignature(node, release, 'ref', '', True)
else:
out = '%s;' % self.GetSignature(node, release, 'store', '', True)
self.LogExit('Exit DefineMember')
return out
def GetStructName(self, node, release, include_version=False):
suffix = ''
if include_version:
ver_num = node.GetVersion(release)
suffix = ('_%s' % ver_num).replace('.', '_')
return node.GetName() + suffix
def DefineStructInternals(self, node, release,
include_version=False, comment=True):
channel = node.GetProperty('FILE').release_map.GetChannel(release)
if channel == 'dev':
channel_comment = ' /* dev */'
else:
channel_comment = ''
out = ''
if node.GetProperty('union'):
out += 'union %s {%s\n' % (
self.GetStructName(node, release, include_version), channel_comment)
else:
out += 'struct %s {%s\n' % (
self.GetStructName(node, release, include_version), channel_comment)
channel = node.GetProperty('FILE').release_map.GetChannel(release)
# Generate Member Functions
members = []
for child in node.GetListOf('Member'):
if channel == 'stable' and child.NodeIsDevOnly():
continue
member = self.Define(child, [release], tabs=1, comment=comment)
if not member:
continue
members.append(member)
out += '%s\n};\n' % '\n'.join(members)
return out
def DefineStruct(self, node, releases, prefix='', comment=False):
__pychecker__ = 'unusednames=comment,prefix'
self.LogEnter('DefineStruct %s' % node)
out = ''
build_list = node.GetUniqueReleases(releases)
newest_stable = None
newest_dev = None
for rel in build_list:
channel = node.GetProperty('FILE').release_map.GetChannel(rel)
if channel == 'stable':
newest_stable = rel
if channel == 'dev':
newest_dev = rel
last_rel = build_list[-1]
# TODO(noelallen) : Bug 157017 finish multiversion support
if node.IsA('Struct'):
if len(build_list) != 1:
node.Error('Can not support multiple versions of node.')
assert len(build_list) == 1
# Build the most recent one versioned, with comments
out = self.DefineStructInternals(node, last_rel,
include_version=False, comment=True)
if node.IsA('Interface'):
# Build the most recent one versioned, with comments
out = self.DefineStructInternals(node, last_rel,
include_version=True, comment=True)
if last_rel == newest_stable:
# Define an unversioned typedef for the most recent version
out += '\ntypedef struct %s %s;\n' % (
self.GetStructName(node, last_rel, include_version=True),
self.GetStructName(node, last_rel, include_version=False))
# Build the rest without comments and with the version number appended
for rel in build_list[0:-1]:
channel = node.GetProperty('FILE').release_map.GetChannel(rel)
# Skip dev channel interface versions that are
# Not the newest version, and
# Don't have an equivalent stable version.
if channel == 'dev' and rel != newest_dev:
if not node.DevInterfaceMatchesStable(rel):
continue
out += '\n' + self.DefineStructInternals(node, rel,
include_version=True,
comment=False)
if rel == newest_stable:
# Define an unversioned typedef for the most recent version
out += '\ntypedef struct %s %s;\n' % (
self.GetStructName(node, rel, include_version=True),
self.GetStructName(node, rel, include_version=False))
self.LogExit('Exit DefineStruct')
return out
#
# Copyright and Comment
#
# Generate a comment or copyright block
#
def Copyright(self, node, cpp_style=False):
lines = node.GetName().split('\n')
if cpp_style:
return '//' + '\n//'.join(filter(lambda f: f != '', lines)) + '\n'
return CommentLines(lines)
def Indent(self, data, tabs=0):
"""Handles indentation and 80-column line wrapping."""
tab = ' ' * tabs
lines = []
for line in data.split('\n'):
# Add indentation
line = tab + line
space_break = line.rfind(' ', 0, 80)
if len(line) <= 80 or 'http://' in line:
# Ignore normal line and URLs permitted by the style guide.
lines.append(line.rstrip())
elif not '(' in line and space_break >= 0:
# Break long typedefs on nearest space.
lines.append(line[0:space_break])
lines.append(' ' + line[space_break + 1:])
else:
left = line.rfind('(') + 1
args = line[left:].split(',')
orig_args = args
orig_left = left
# Try to split on '(arg1)' or '(arg1, arg2)', not '()'
while args[0][0] == ')':
left = line.rfind('(', 0, left - 1) + 1
if left == 0: # No more parens, take the original option
args = orig_args
left = orig_left
break
args = line[left:].split(',')
line_max = 0
for arg in args:
if len(arg) > line_max: line_max = len(arg)
if left + line_max >= 80:
indent = '%s ' % tab
args = (',\n%s' % indent).join([arg.strip() for arg in args])
lines.append('%s\n%s%s' % (line[:left], indent, args))
else:
indent = ' ' * (left - 1)
args = (',\n%s' % indent).join(args)
lines.append('%s%s' % (line[:left], args))
return '\n'.join(lines)
# Define a top level object.
def Define(self, node, releases, tabs=0, prefix='', comment=False):
# If this request does not match unique release, or if the release is not
# available (possibly deprecated) then skip.
unique = node.GetUniqueReleases(releases)
if not unique or not node.InReleases(releases):
return ''
self.LogEnter('Define %s tab=%d prefix="%s"' % (node,tabs,prefix))
declmap = dict({
'Enum': CGen.DefineEnum,
'Function': CGen.DefineMember,
'Interface': CGen.DefineStruct,
'Member': CGen.DefineMember,
'Struct': CGen.DefineStruct,
'Typedef': CGen.DefineTypedef
})
out = ''
func = declmap.get(node.cls, None)
if not func:
ErrOut.Log('Failed to define %s named %s' % (node.cls, node.GetName()))
define_txt = func(self, node, releases, prefix=prefix, comment=comment)
comment_txt = GetNodeComments(node, tabs=0)
if comment_txt and comment:
out += comment_txt
out += define_txt
indented_out = self.Indent(out, tabs)
self.LogExit('Exit Define')
return indented_out
# Clean a string representing an object definition and return then string
# as a single space delimited set of tokens.
def CleanString(instr):
instr = instr.strip()
instr = instr.split()
return ' '.join(instr)
# Test a file, by comparing all it's objects, with their comments.
def TestFile(filenode):
cgen = CGen()
errors = 0
for node in filenode.GetChildren()[2:]:
instr = node.GetOneOf('Comment')
if not instr: continue
instr.Dump()
instr = CleanString(instr.GetName())
outstr = cgen.Define(node, releases=['M14'])
if GetOption('verbose'):
print outstr + '\n'
outstr = CleanString(outstr)
if instr != outstr:
ErrOut.Log('Failed match of\n>>%s<<\nto:\n>>%s<<\nFor:\n' %
(instr, outstr))
node.Dump(1, comments=True)
errors += 1
return errors
# Build and resolve the AST and compare each file individual.
def TestFiles(filenames):
if not filenames:
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_cgen', '*.idl')
filenames = glob.glob(idldir)
filenames = sorted(filenames)
ast = ParseFiles(filenames)
total_errs = 0
for filenode in ast.GetListOf('File'):
errs = TestFile(filenode)
if errs:
ErrOut.Log('%s test failed with %d error(s).' %
(filenode.GetName(), errs))
total_errs += errs
if total_errs:
ErrOut.Log('Failed generator test.')
else:
InfoOut.Log('Passed generator test.')
return total_errs
def main(args):
filenames = ParseOptions(args)
if GetOption('test'):
return TestFiles(filenames)
ast = ParseFiles(filenames)
cgen = CGen()
for f in ast.GetListOf('File'):
if f.GetProperty('ERRORS') > 0:
print 'Skipping %s' % f.GetName()
continue
for node in f.GetChildren()[2:]:
print cgen.Define(node, ast.releases, comment=True, prefix='tst_')
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | -3,180,145,218,188,916,700 | 30.121982 | 79 | 0.580697 | false |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Foursquare/Users/Badges.py | 5 | 3462 | # -*- coding: utf-8 -*-
###############################################################################
#
# Badges
# Returns badges for a given user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Badges(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Badges Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Badges, self).__init__(temboo_session, '/Library/Foursquare/Users/Badges')
def new_input_set(self):
return BadgesInputSet()
def _make_result_set(self, result, path):
return BadgesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return BadgesChoreographyExecution(session, exec_id, path)
class BadgesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Badges
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_OauthToken(self, value):
"""
Set the value of the OauthToken input for this Choreo. ((required, string) The Foursquare API OAuth token string.)
"""
super(BadgesInputSet, self)._set_input('OauthToken', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
super(BadgesInputSet, self)._set_input('ResponseFormat', value)
def set_UserID(self, value):
"""
Set the value of the UserID input for this Choreo. ((optional, string) Identity of the user to get badges for. Defaults to "self" to get lists of the acting user.)
"""
super(BadgesInputSet, self)._set_input('UserID', value)
class BadgesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Badges Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
return self._output.get('Response', None)
class BadgesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return BadgesResultSet(response, path)
| gpl-2.0 | 5,529,902,878,224,082,000 | 36.630435 | 171 | 0.667244 | false |
ganga-devs/ganga | ganga/GangaTest/Lib/TestRobot/ProxyChecker.py | 1 | 1115 | #Check for grid proxy
from GangaRobot.Framework.Action import IAction
from GangaCore.Utility.logging import getLogger
from GangaCore.Utility.Config import getConfig
import os, datetime, time
from GangaRobot.Framework.exceptions import *
#from GangaCore.GPIDev.Credentials_old.GridProxy import GridProxy
from GangaCore.GPIDev.Credentials_old import getCredential
logger = getLogger()
class ProxyChecker(IAction):
"""
ProxyChecker IAction implementation
Checks for grid proxy valid long enough to run tests.....
"""
def execute(self, runid):
# check for grid proxy
GPcred = getCredential(name = 'GridProxy', create = 'False')
timeleft = float(GPcred.timeleft("seconds"))
# Get maxumin time tests allowed to take
config = getConfig('TestRobot')
MaxTestTime = config['JobTimeOut']
if ( timeleft < MaxTestTime ):
raise GangaRobotBreakError("Grid Proxy valid for %8.0f seconds but %d might be required to finish testing. Breaking." % (timeleft, MaxTestTime), Exception)
| gpl-2.0 | -7,439,472,121,618,250,000 | 31.794118 | 167 | 0.685202 | false |
saurabh6790/trufil_lib | core/doctype/profile/test_profile.py | 34 | 3620 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import webnotes, unittest
from webnotes.model.utils import delete_doc, LinkExistsError
class TestProfile(unittest.TestCase):
def test_delete(self):
self.assertRaises(LinkExistsError, delete_doc, "Role", "_Test Role 2")
webnotes.conn.sql("""delete from tabUserRole where role='_Test Role 2'""")
delete_doc("Role","_Test Role 2")
profile = webnotes.bean(copy=test_records[1])
profile.doc.email = "[email protected]"
profile.insert()
webnotes.bean({"doctype": "ToDo", "description": "_Test"}).insert()
delete_doc("Profile", "[email protected]")
self.assertTrue(not webnotes.conn.sql("""select * from `tabToDo` where owner=%s""",
"[email protected]"))
webnotes.bean({"doctype": "Role", "role_name": "_Test Role 2"}).insert()
def test_get_value(self):
self.assertEquals(webnotes.conn.get_value("Profile", "[email protected]"), "[email protected]")
self.assertEquals(webnotes.conn.get_value("Profile", {"email":"[email protected]"}), "[email protected]")
self.assertEquals(webnotes.conn.get_value("Profile", {"email":"[email protected]"}, "email"), "[email protected]")
self.assertEquals(webnotes.conn.get_value("Profile", {"email":"[email protected]"}, ["first_name", "email"]),
("_Test", "[email protected]"))
self.assertEquals(webnotes.conn.get_value("Profile",
{"email":"[email protected]", "first_name": "_Test"},
["first_name", "email"]),
("_Test", "[email protected]"))
test_profile = webnotes.conn.sql("select * from tabProfile where name='[email protected]'",
as_dict=True)[0]
self.assertEquals(webnotes.conn.get_value("Profile", {"email":"[email protected]"}, "*", as_dict=True),
test_profile)
self.assertEquals(webnotes.conn.get_value("Profile", "[email protected]"), None)
webnotes.conn.set_value("Control Panel", "Control Panel", "_test", "_test_val")
self.assertEquals(webnotes.conn.get_value("Control Panel", None, "_test"), "_test_val")
self.assertEquals(webnotes.conn.get_value("Control Panel", "Control Panel", "_test"), "_test_val")
def test_doclist(self):
p_meta = webnotes.get_doctype("Profile")
self.assertEquals(len(p_meta.get({"doctype": "DocField", "parent": "Profile", "fieldname": "first_name"})), 1)
self.assertEquals(len(p_meta.get({"doctype": "DocField", "parent": "Profile", "fieldname": "^first"})), 1)
self.assertEquals(len(p_meta.get({"fieldname": ["!=", "first_name"]})), len(p_meta) - 1)
self.assertEquals(len(p_meta.get({"fieldname": ["in", ["first_name", "last_name"]]})), 2)
self.assertEquals(len(p_meta.get({"fieldname": ["not in", ["first_name", "last_name"]]})), len(p_meta) - 2)
test_records = [[{
"doctype":"Profile",
"email": "[email protected]",
"first_name": "_Test",
"new_password": "testpassword",
"enabled": 1
}, {
"doctype":"UserRole",
"parentfield":"user_roles",
"role": "_Test Role"
}, {
"doctype":"UserRole",
"parentfield":"user_roles",
"role": "System Manager"
}],
[{
"doctype":"Profile",
"email": "[email protected]",
"first_name": "_Test1",
"new_password": "testpassword"
}],
[{
"doctype":"Profile",
"email": "[email protected]",
"first_name": "_Test2",
"new_password": "testpassword"
}],
[{
"doctype":"Profile",
"email": "[email protected]",
"first_name": "_Test",
"new_password": "testpassword",
"enabled": 1
}, {
"doctype":"UserRole",
"parentfield":"user_roles",
"role": "_Test Role 2"
}, {
"doctype":"UserRole",
"parentfield":"user_roles",
"role": "System Manager"
}],
] | mit | 8,994,529,740,686,218,000 | 34.851485 | 114 | 0.648066 | false |
mozilla-services/tecken | tecken/download/forms.py | 1 | 2855 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import re
import dateutil
from django import forms
from django.utils import timezone
class DownloadForm(forms.Form):
code_file = forms.CharField(required=False)
code_id = forms.CharField(required=False)
class DownloadsMissingForm(forms.Form):
symbol = forms.CharField(required=False)
debugid = forms.CharField(required=False)
filename = forms.CharField(required=False)
code_file = forms.CharField(required=False)
code_id = forms.CharField(required=False)
modified_at = forms.CharField(required=False)
sort = forms.CharField(required=False)
reverse = forms.CharField(required=False)
def _clean_dates(self, values):
"""Return a list of either a date or None.
Each one can have an operator.
"""
if not values:
return []
dates = []
operators = re.compile("<=|>=|<|>|=")
for block in [x.strip() for x in values.split(",") if x.strip()]:
if operators.findall(block):
(operator,) = operators.findall(block)
else:
operator = "="
rest = operators.sub("", block).strip()
if rest.lower() in ("null", "incomplete"):
date_obj = None
elif rest.lower() == "today":
date_obj = timezone.now().replace(hour=0, minute=0, second=0)
elif rest.lower() == "yesterday":
date_obj = timezone.now().replace(hour=0, minute=0, second=0)
date_obj -= datetime.timedelta(days=1)
else:
try:
date_obj = dateutil.parser.parse(rest)
except ValueError:
raise forms.ValidationError(f"Unable to parse {rest!r}")
if timezone.is_naive(date_obj):
date_obj = timezone.make_aware(date_obj)
dates.append((operator, date_obj.date()))
return dates
def clean_modified_at(self):
return self._clean_dates(self.cleaned_data["modified_at"])
def clean_sort(self):
value = self.cleaned_data["sort"]
if value and value not in ["modified_at", "created_at"]:
raise forms.ValidationError(f"Invalid sort '{value}'")
return value
def clean_reverse(self):
value = self.cleaned_data["reverse"]
if value:
return value == "true"
def clean(self):
cleaned = super().clean()
if cleaned.get("sort"):
cleaned["order_by"] = {
"sort": cleaned.pop("sort"),
"reverse": cleaned.pop("reverse"),
}
return cleaned
| mpl-2.0 | -133,640,022,271,394,500 | 32.988095 | 77 | 0.577233 | false |
etaos/etaos | kernel/python/lib/cpu.py | 1 | 4278 | #
# ETA/OS - CPU class
# Copyright (C) 2017 Michel Megens <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
## @addtogroup python-cpu
# @{
## @package cpu
# @brief Python CPU API.
#
# <b>USAGE</b>
#
# import cpu
__name__ = "cpu"
## Write \p value to \p pin.
# @param pin GPIO pin number.
# @param value Boolean value.
# @return None
def write(pin, value):
"""__NATIVE__
#if defined(CONFIG_GPIO) || defined(CONFIG_GPIO_MODULE)
PmReturn_t retval = PM_RET_OK;
pPmObj_t pin, value;
int32_t _pin;
bool _value;
if (NATIVE_GET_NUM_ARGS() != 2) {
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
pin = NATIVE_GET_LOCAL(0);
value = NATIVE_GET_LOCAL(1);
if(OBJ_GET_TYPE(pin) != OBJ_TYPE_INT ||
OBJ_GET_TYPE(value) != OBJ_TYPE_BOOL) {
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
_pin = ((pPmInt_t)pin)->val;
_value = ((pPmBoolean_t)value)->val != 0;
pm_cpu_pin_write(_pin, _value);
return retval;
#else
return PM_RET_OK
#endif
"""
pass
## Set \p pin as output.
# @param pin GPIO pin number.
# @param value Boolean value.
# @return None
def set_output(pin, value):
"""__NATIVE__
#if defined(CONFIG_GPIO) || defined(CONFIG_GPIO_MODULE)
PmReturn_t retval = PM_RET_OK;
pPmObj_t pin, value;
int32_t _pin;
bool _value;
if (NATIVE_GET_NUM_ARGS() != 2) {
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
pin = NATIVE_GET_LOCAL(0);
value = NATIVE_GET_LOCAL(1);
if(OBJ_GET_TYPE(pin) != OBJ_TYPE_INT ||
OBJ_GET_TYPE(value) != OBJ_TYPE_BOOL) {
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
_pin = ((pPmInt_t)pin)->val;
_value = ((pPmBoolean_t)value)->val != 0;
pm_cpu_direction_output(_pin, _value);
return retval;
#else
return PM_RET_OK
#endif
"""
pass
## Set \p pin as input.
# @param pin GPIO pin number.
# @return None
def set_input(pin):
"""__NATIVE__
#if defined(CONFIG_GPIO) || defined(CONFIG_GPIO_MODULE)
PmReturn_t retval = PM_RET_OK;
pPmObj_t pin;
int32_t _pin;
if (NATIVE_GET_NUM_ARGS() != 1) {
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
pin = NATIVE_GET_LOCAL(0);
if(OBJ_GET_TYPE(pin) != OBJ_TYPE_INT) {
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
_pin = ((pPmInt_t)pin)->val;
pm_cpu_direction_input(_pin);
return retval;
#else
return PM_RET_OK
#endif
"""
pass
## Read from GPIO pin \p pin.
# @param pin GPIO pin number.
# @return Boolean value of \p pin.
def read(pin):
"""__NATIVE__
#if defined(CONFIG_GPIO) || defined(CONFIG_GPIO_MODULE)
PmReturn_t retval = PM_RET_OK;
pPmObj_t pin;
int32_t _pin;
pPmObj_t rv;
if (NATIVE_GET_NUM_ARGS() != 1) {
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
pin = NATIVE_GET_LOCAL(0);
if(OBJ_GET_TYPE(pin) != OBJ_TYPE_INT) {
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
_pin = ((pPmInt_t)pin)->val;
rv = pm_cpu_pin_read(_pin) ? PM_TRUE : PM_FALSE;
NATIVE_SET_TOS(rv);
return retval;
#else
return PM_RET_OK
#endif
"""
pass
## Read from an analog port.
# @param pin Analog pin number.
# @return The value read from \p pin.
def analog_read(pin):
"""__NATIVE__
#if defined(CONFIG_ANALOG) || defined(CONFIG_ANALOG_MODULE)
PmReturn_t retval = PM_RET_OK;
pPmObj_t pin;
int32_t _pin;
pPmObj_t rv;
float value;
if (NATIVE_GET_NUM_ARGS() != 1) {
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
pin = NATIVE_GET_LOCAL(0);
if(OBJ_GET_TYPE(pin) != OBJ_TYPE_INT) {
PM_RAISE(retval, PM_RET_EX_TYPE);
return retval;
}
_pin = ((pPmInt_t)pin)->val;
value = pm_cpu_analog_pin_read(_pin);
retval = float_new(value, &rv);
NATIVE_SET_TOS(rv);
return retval;
#else
return PM_RET_OK
#endif
"""
pass
## @}
| lgpl-3.0 | -47,768,275,232,880,920 | 19.567308 | 77 | 0.649603 | false |
dstroppa/openstack-smartos-nova-grizzly | nova/tests/api/openstack/compute/contrib/test_admin_actions.py | 2 | 15194 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo.config import cfg
import webob
from nova.api.openstack import compute
from nova.api.openstack.compute.contrib import admin_actions
from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.openstack.common import jsonutils
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
INSTANCE = {
"id": 1,
"name": "fake",
"display_name": "test_server",
"uuid": "abcd",
"user_id": 'fake_user_id',
"tenant_id": 'fake_tenant_id',
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"security_groups": [{"id": 1, "name": "test"}],
"progress": 0,
"image_ref": 'http://foo.com/123',
"fixed_ips": [],
"instance_type": {"flavorid": '124'},
}
def fake_compute_api(*args, **kwargs):
return True
def fake_compute_api_raises_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
def fake_compute_api_get(self, context, instance_id):
return {'id': 1, 'uuid': instance_id, 'vm_state': vm_states.ACTIVE,
'task_state': None}
class AdminActionsTest(test.TestCase):
_actions = ('pause', 'unpause', 'suspend', 'resume', 'migrate',
'resetNetwork', 'injectNetworkInfo', 'lock', 'unlock')
_methods = ('pause', 'unpause', 'suspend', 'resume', 'resize',
'reset_network', 'inject_network_info', 'lock', 'unlock')
_actions_that_check_state = (
# action, method
('pause', 'pause'),
('unpause', 'unpause'),
('suspend', 'suspend'),
('resume', 'resume'),
('migrate', 'resize'))
def setUp(self):
super(AdminActionsTest, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Admin_actions'])
def test_admin_api_actions(self):
app = fakes.wsgi_app(init_only=('servers',))
for _action in self._actions:
req = webob.Request.blank('/v2/fake/servers/%s/action' %
self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({_action: None})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 202)
def test_admin_api_actions_raise_conflict_on_invalid_state(self):
app = fakes.wsgi_app(init_only=('servers',))
for _action, _method in self._actions_that_check_state:
self.stubs.Set(compute_api.API, _method,
fake_compute_api_raises_invalid_state)
req = webob.Request.blank('/v2/fake/servers/%s/action' %
self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({_action: None})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 409)
self.assertIn("Cannot \'%(_action)s\' while instance" % locals(),
res.body)
def test_migrate_live_enabled(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt, init_only=('servers',))
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'os-migrateLive': {
'host': 'hostname',
'block_migration': False,
'disk_over_commit': False,
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
def fake_scheduler_api_live_migration(self, context, dest,
block_migration=False,
disk_over_commit=False, instance=None,
instance_id=None, topic=None):
return None
self.stubs.Set(compute_api.API, 'update', fake_update)
self.stubs.Set(scheduler_rpcapi.SchedulerAPI,
'live_migration',
fake_scheduler_api_live_migration)
res = req.get_response(app)
self.assertEqual(res.status_int, 202)
def test_migrate_live_missing_dict_param(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt, init_only=('servers',))
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'os-migrateLive': {
'dummy': 'hostname',
'block_migration': False,
'disk_over_commit': False,
}
})
req.content_type = 'application/json'
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
def test_migrate_live_compute_service_unavailable(self):
ctxt = context.get_admin_context()
ctxt.user_id = 'fake'
ctxt.project_id = 'fake'
ctxt.is_admin = True
app = fakes.wsgi_app(fake_auth_context=ctxt, init_only=('servers',))
req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
req.method = 'POST'
req.body = jsonutils.dumps({
'os-migrateLive': {
'host': 'hostname',
'block_migration': False,
'disk_over_commit': False,
}
})
req.content_type = 'application/json'
def fake_update(inst, context, instance,
task_state, expected_task_state):
return None
def fake_scheduler_api_live_migration(context, dest,
block_migration=False,
disk_over_commit=False, instance=None,
instance_id=None, topic=None):
raise exception.ComputeServiceUnavailable(host='host')
self.stubs.Set(compute_api.API, 'update', fake_update)
self.stubs.Set(scheduler_rpcapi.SchedulerAPI,
'live_migration',
fake_scheduler_api_live_migration)
res = req.get_response(app)
self.assertEqual(res.status_int, 400)
self.assertIn(
unicode(exception.ComputeServiceUnavailable(host='host')),
res.body)
class CreateBackupTests(test.TestCase):
def setUp(self):
super(CreateBackupTests, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.backup_stubs = fakes.stub_out_compute_api_backup(self.stubs)
self.app = compute.APIRouter(init_only=('servers',))
self.uuid = uuid.uuid4()
def _get_request(self, body):
url = '/fake/servers/%s/action' % self.uuid
req = fakes.HTTPRequest.blank(url)
req.method = 'POST'
req.content_type = 'application/json'
req.body = jsonutils.dumps(body)
return req
def test_create_backup_with_metadata(self):
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
'metadata': {'123': 'asdf'},
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 202)
self.assertTrue(response.headers['Location'])
def test_create_backup_with_too_much_metadata(self):
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
'metadata': {'123': 'asdf'},
},
}
for num in range(CONF.quota_metadata_items + 1):
body['createBackup']['metadata']['foo%i' % num] = "bar"
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 413)
def test_create_backup_no_name(self):
# Name is required for backups.
body = {
'createBackup': {
'backup_type': 'daily',
'rotation': 1,
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
def test_create_backup_no_rotation(self):
# Rotation is required for backup requests.
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
def test_create_backup_negative_rotation(self):
"""Rotation must be greater than or equal to zero
for backup requests
"""
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': -1,
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
def test_create_backup_no_backup_type(self):
# Backup Type (daily or weekly) is required for backup requests.
body = {
'createBackup': {
'name': 'Backup 1',
'rotation': 1,
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
def test_create_backup_bad_entity(self):
body = {'createBackup': 'go'}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 400)
def test_create_backup_rotation_is_zero(self):
# The happy path for creating backups if rotation is zero.
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 0,
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 202)
self.assertFalse('Location' in response.headers)
def test_create_backup_rotation_is_positive(self):
# The happy path for creating backups if rotation is positive.
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 202)
self.assertTrue(response.headers['Location'])
def test_create_backup_raises_conflict_on_invalid_state(self):
body = {
'createBackup': {
'name': 'Backup 1',
'backup_type': 'daily',
'rotation': 1,
},
}
self.stubs.Set(compute_api.API, 'backup',
fake_compute_api_raises_invalid_state)
request = self._get_request(body)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 409)
class ResetStateTests(test.TestCase):
def setUp(self):
super(ResetStateTests, self).setUp()
self.exists = True
self.kwargs = None
self.uuid = uuid.uuid4()
def fake_get(inst, context, instance_id):
if self.exists:
return dict(id=1, uuid=instance_id, vm_state=vm_states.ACTIVE)
raise exception.InstanceNotFound(instance_id=instance_id)
def fake_update(inst, context, instance, **kwargs):
self.kwargs = kwargs
self.stubs.Set(compute_api.API, 'get', fake_get)
self.stubs.Set(compute_api.API, 'update', fake_update)
self.admin_api = admin_actions.AdminActionsController()
url = '/fake/servers/%s/action' % self.uuid
self.request = fakes.HTTPRequest.blank(url)
def test_no_state(self):
self.assertRaises(webob.exc.HTTPBadRequest,
self.admin_api._reset_state,
self.request, 'inst_id',
{"os-resetState": None})
def test_bad_state(self):
self.assertRaises(webob.exc.HTTPBadRequest,
self.admin_api._reset_state,
self.request, 'inst_id',
{"os-resetState": {"state": "spam"}})
def test_no_instance(self):
self.exists = False
self.assertRaises(webob.exc.HTTPNotFound,
self.admin_api._reset_state,
self.request, 'inst_id',
{"os-resetState": {"state": "active"}})
def test_reset_active(self):
body = {"os-resetState": {"state": "active"}}
result = self.admin_api._reset_state(self.request, 'inst_id', body)
self.assertEqual(result.status_int, 202)
self.assertEqual(self.kwargs, dict(vm_state=vm_states.ACTIVE,
task_state=None))
def test_reset_error(self):
body = {"os-resetState": {"state": "error"}}
result = self.admin_api._reset_state(self.request, 'inst_id', body)
self.assertEqual(result.status_int, 202)
self.assertEqual(self.kwargs, dict(vm_state=vm_states.ERROR,
task_state=None))
| apache-2.0 | 1,699,468,721,495,718,400 | 34.171296 | 78 | 0.55206 | false |
jinq0123/grpc_cb_core | conanfile.py | 1 | 1244 | from conans import ConanFile, CMake
class GrpccbConan(ConanFile):
name = "grpc_cb_core"
version = "0.2"
license = "Apache-2.0"
url = "https://github.com/jinq0123/grpc_cb_core"
description = "C++ gRPC core library with callback interface."
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
requires = "grpc/1.17.2@inexorgame/stable",
generators = "cmake", "Premake" # A custom generator: PremakeGen/0.1@memsharded/testing
build_requires = "PremakeGen/0.1@memsharded/testing"
exports_sources = "src*", "include*", "CMakeLists.txt"
def build(self):
cmake = CMake(self)
self.run('cmake %s %s' % (self.source_folder, cmake.command_line))
self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("include/*")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dylib*", dst="lib", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["grpc_cb_core"]
| apache-2.0 | 4,314,942,800,006,423,000 | 36.69697 | 92 | 0.610129 | false |
roman-v-l/AIND | AIND-Planning/aimacode/probability.py | 2 | 22703 | """Probability models. (Chapter 13-15)
"""
from .utils import (
product, argmax, element_wise_product, matrix_multiplication,
vector_to_diagonal, vector_add, scalar_vector_product, inverse_matrix,
weighted_sample_with_replacement, isclose, probability, normalize
)
from .logic import extend
import random
from collections import defaultdict
from functools import reduce
# ______________________________________________________________________________
def DTAgentProgram(belief_state):
"A decision-theoretic agent. [Figure 13.1]"
def program(percept):
belief_state.observe(program.action, percept)
program.action = argmax(belief_state.actions(),
key=belief_state.expected_outcome_utility)
return program.action
program.action = None
return program
# ______________________________________________________________________________
class ProbDist:
"""A discrete probability distribution. You name the random variable
in the constructor, then assign and query probability of values.
>>> P = ProbDist('Flip'); P['H'], P['T'] = 0.25, 0.75; P['H']
0.25
>>> P = ProbDist('X', {'lo': 125, 'med': 375, 'hi': 500})
>>> P['lo'], P['med'], P['hi']
(0.125, 0.375, 0.5)
"""
def __init__(self, varname='?', freqs=None):
"""If freqs is given, it is a dictionary of value: frequency pairs,
and the ProbDist then is normalized."""
self.prob = {}
self.varname = varname
self.values = []
if freqs:
for (v, p) in freqs.items():
self[v] = p
self.normalize()
def __getitem__(self, val):
"Given a value, return P(value)."
try:
return self.prob[val]
except KeyError:
return 0
def __setitem__(self, val, p):
"Set P(val) = p."
if val not in self.values:
self.values.append(val)
self.prob[val] = p
def normalize(self):
"""Make sure the probabilities of all values sum to 1.
Returns the normalized distribution.
Raises a ZeroDivisionError if the sum of the values is 0."""
total = sum(self.prob.values())
if not isclose(total, 1.0):
for val in self.prob:
self.prob[val] /= total
return self
def show_approx(self, numfmt='%.3g'):
"""Show the probabilities rounded and sorted by key, for the
sake of portable doctests."""
return ', '.join([('%s: ' + numfmt) % (v, p)
for (v, p) in sorted(self.prob.items())])
def __repr__(self):
return "P(%s)" % self.varname
class JointProbDist(ProbDist):
"""A discrete probability distribute over a set of variables.
>>> P = JointProbDist(['X', 'Y']); P[1, 1] = 0.25
>>> P[1, 1]
0.25
>>> P[dict(X=0, Y=1)] = 0.5
>>> P[dict(X=0, Y=1)]
0.5"""
def __init__(self, variables):
self.prob = {}
self.variables = variables
self.vals = defaultdict(list)
def __getitem__(self, values):
"Given a tuple or dict of values, return P(values)."
values = event_values(values, self.variables)
return ProbDist.__getitem__(self, values)
def __setitem__(self, values, p):
"""Set P(values) = p. Values can be a tuple or a dict; it must
have a value for each of the variables in the joint. Also keep track
of the values we have seen so far for each variable."""
values = event_values(values, self.variables)
self.prob[values] = p
for var, val in zip(self.variables, values):
if val not in self.vals[var]:
self.vals[var].append(val)
def values(self, var):
"Return the set of possible values for a variable."
return self.vals[var]
def __repr__(self):
return "P(%s)" % self.variables
def event_values(event, variables):
"""Return a tuple of the values of variables in event.
>>> event_values ({'A': 10, 'B': 9, 'C': 8}, ['C', 'A'])
(8, 10)
>>> event_values ((1, 2), ['C', 'A'])
(1, 2)
"""
if isinstance(event, tuple) and len(event) == len(variables):
return event
else:
return tuple([event[var] for var in variables])
# ______________________________________________________________________________
def enumerate_joint_ask(X, e, P):
"""Return a probability distribution over the values of the variable X,
given the {var:val} observations e, in the JointProbDist P. [Section 13.3]
>>> P = JointProbDist(['X', 'Y'])
>>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125
>>> enumerate_joint_ask('X', dict(Y=1), P).show_approx()
'0: 0.667, 1: 0.167, 2: 0.167'
"""
assert X not in e, "Query variable must be distinct from evidence"
Q = ProbDist(X) # probability distribution for X, initially empty
Y = [v for v in P.variables if v != X and v not in e] # hidden variables.
for xi in P.values(X):
Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)
return Q.normalize()
def enumerate_joint(variables, e, P):
"""Return the sum of those entries in P consistent with e,
provided variables is P's remaining variables (the ones not in e)."""
if not variables:
return P[e]
Y, rest = variables[0], variables[1:]
return sum([enumerate_joint(rest, extend(e, Y, y), P)
for y in P.values(Y)])
# ______________________________________________________________________________
class BayesNet:
"Bayesian network containing only boolean-variable nodes."
def __init__(self, node_specs=[]):
"nodes must be ordered with parents before children."
self.nodes = []
self.variables = []
for node_spec in node_specs:
self.add(node_spec)
def add(self, node_spec):
"""Add a node to the net. Its parents must already be in the
net, and its variable must not."""
node = BayesNode(*node_spec)
assert node.variable not in self.variables
assert all((parent in self.variables) for parent in node.parents)
self.nodes.append(node)
self.variables.append(node.variable)
for parent in node.parents:
self.variable_node(parent).children.append(node)
def variable_node(self, var):
"""Return the node for the variable named var.
>>> burglary.variable_node('Burglary').variable
'Burglary'"""
for n in self.nodes:
if n.variable == var:
return n
raise Exception("No such variable: %s" % var)
def variable_values(self, var):
"Return the domain of var."
return [True, False]
def __repr__(self):
return 'BayesNet(%r)' % self.nodes
class BayesNode:
"""A conditional probability distribution for a boolean variable,
P(X | parents). Part of a BayesNet."""
def __init__(self, X, parents, cpt):
"""X is a variable name, and parents a sequence of variable
names or a space-separated string. cpt, the conditional
probability table, takes one of these forms:
* A number, the unconditional probability P(X=true). You can
use this form when there are no parents.
* A dict {v: p, ...}, the conditional probability distribution
P(X=true | parent=v) = p. When there's just one parent.
* A dict {(v1, v2, ...): p, ...}, the distribution P(X=true |
parent1=v1, parent2=v2, ...) = p. Each key must have as many
values as there are parents. You can use this form always;
the first two are just conveniences.
In all cases the probability of X being false is left implicit,
since it follows from P(X=true).
>>> X = BayesNode('X', '', 0.2)
>>> Y = BayesNode('Y', 'P', {T: 0.2, F: 0.7})
>>> Z = BayesNode('Z', 'P Q',
... {(T, T): 0.2, (T, F): 0.3, (F, T): 0.5, (F, F): 0.7})
"""
if isinstance(parents, str):
parents = parents.split()
# We store the table always in the third form above.
if isinstance(cpt, (float, int)): # no parents, 0-tuple
cpt = {(): cpt}
elif isinstance(cpt, dict):
# one parent, 1-tuple
if cpt and isinstance(list(cpt.keys())[0], bool):
cpt = {(v,): p for v, p in cpt.items()}
assert isinstance(cpt, dict)
for vs, p in cpt.items():
assert isinstance(vs, tuple) and len(vs) == len(parents)
assert all(isinstance(v, bool) for v in vs)
assert 0 <= p <= 1
self.variable = X
self.parents = parents
self.cpt = cpt
self.children = []
def p(self, value, event):
"""Return the conditional probability
P(X=value | parents=parent_values), where parent_values
are the values of parents in event. (event must assign each
parent a value.)
>>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625})
>>> bn.p(False, {'Burglary': False, 'Earthquake': True})
0.375"""
assert isinstance(value, bool)
ptrue = self.cpt[event_values(event, self.parents)]
return ptrue if value else 1 - ptrue
def sample(self, event):
"""Sample from the distribution for this variable conditioned
on event's values for parent_variables. That is, return True/False
at random according with the conditional probability given the
parents."""
return probability(self.p(True, event))
def __repr__(self):
return repr((self.variable, ' '.join(self.parents)))
# Burglary example [Figure 14.2]
T, F = True, False
burglary = BayesNet([
('Burglary', '', 0.001),
('Earthquake', '', 0.002),
('Alarm', 'Burglary Earthquake',
{(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}),
('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}),
('MaryCalls', 'Alarm', {T: 0.70, F: 0.01})
])
# ______________________________________________________________________________
def enumeration_ask(X, e, bn):
"""Return the conditional probability distribution of variable X
given evidence e, from BayesNet bn. [Figure 14.9]
>>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
... ).show_approx()
'False: 0.716, True: 0.284'"""
assert X not in e, "Query variable must be distinct from evidence"
Q = ProbDist(X)
for xi in bn.variable_values(X):
Q[xi] = enumerate_all(bn.variables, extend(e, X, xi), bn)
return Q.normalize()
def enumerate_all(variables, e, bn):
"""Return the sum of those entries in P(variables | e{others})
consistent with e, where P is the joint distribution represented
by bn, and e{others} means e restricted to bn's other variables
(the ones other than variables). Parents must precede children in variables."""
if not variables:
return 1.0
Y, rest = variables[0], variables[1:]
Ynode = bn.variable_node(Y)
if Y in e:
return Ynode.p(e[Y], e) * enumerate_all(rest, e, bn)
else:
return sum(Ynode.p(y, e) * enumerate_all(rest, extend(e, Y, y), bn)
for y in bn.variable_values(Y))
# ______________________________________________________________________________
def elimination_ask(X, e, bn):
"""Compute bn's P(X|e) by variable elimination. [Figure 14.11]
>>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
... ).show_approx()
'False: 0.716, True: 0.284'"""
assert X not in e, "Query variable must be distinct from evidence"
factors = []
for var in reversed(bn.variables):
factors.append(make_factor(var, e, bn))
if is_hidden(var, X, e):
factors = sum_out(var, factors, bn)
return pointwise_product(factors, bn).normalize()
def is_hidden(var, X, e):
"Is var a hidden variable when querying P(X|e)?"
return var != X and var not in e
def make_factor(var, e, bn):
"""Return the factor for var in bn's joint distribution given e.
That is, bn's full joint distribution, projected to accord with e,
is the pointwise product of these factors for bn's variables."""
node = bn.variable_node(var)
variables = [X for X in [var] + node.parents if X not in e]
cpt = {event_values(e1, variables): node.p(e1[var], e1)
for e1 in all_events(variables, bn, e)}
return Factor(variables, cpt)
def pointwise_product(factors, bn):
return reduce(lambda f, g: f.pointwise_product(g, bn), factors)
def sum_out(var, factors, bn):
"Eliminate var from all factors by summing over its values."
result, var_factors = [], []
for f in factors:
(var_factors if var in f.variables else result).append(f)
result.append(pointwise_product(var_factors, bn).sum_out(var, bn))
return result
class Factor:
"A factor in a joint distribution."
def __init__(self, variables, cpt):
self.variables = variables
self.cpt = cpt
def pointwise_product(self, other, bn):
"Multiply two factors, combining their variables."
variables = list(set(self.variables) | set(other.variables))
cpt = {event_values(e, variables): self.p(e) * other.p(e)
for e in all_events(variables, bn, {})}
return Factor(variables, cpt)
def sum_out(self, var, bn):
"Make a factor eliminating var by summing over its values."
variables = [X for X in self.variables if X != var]
cpt = {event_values(e, variables): sum(self.p(extend(e, var, val))
for val in bn.variable_values(var))
for e in all_events(variables, bn, {})}
return Factor(variables, cpt)
def normalize(self):
"Return my probabilities; must be down to one variable."
assert len(self.variables) == 1
return ProbDist(self.variables[0],
{k: v for ((k,), v) in self.cpt.items()})
def p(self, e):
"Look up my value tabulated for e."
return self.cpt[event_values(e, self.variables)]
def all_events(variables, bn, e):
"Yield every way of extending e with values for all variables."
if not variables:
yield e
else:
X, rest = variables[0], variables[1:]
for e1 in all_events(rest, bn, e):
for x in bn.variable_values(X):
yield extend(e1, X, x)
# ______________________________________________________________________________
# [Figure 14.12a]: sprinkler network
sprinkler = BayesNet([
('Cloudy', '', 0.5),
('Sprinkler', 'Cloudy', {T: 0.10, F: 0.50}),
('Rain', 'Cloudy', {T: 0.80, F: 0.20}),
('WetGrass', 'Sprinkler Rain',
{(T, T): 0.99, (T, F): 0.90, (F, T): 0.90, (F, F): 0.00})])
# ______________________________________________________________________________
def prior_sample(bn):
"""Randomly sample from bn's full joint distribution. The result
is a {variable: value} dict. [Figure 14.13]"""
event = {}
for node in bn.nodes:
event[node.variable] = node.sample(event)
return event
# _________________________________________________________________________
def rejection_sampling(X, e, bn, N):
"""Estimate the probability distribution of variable X given
evidence e in BayesNet bn, using N samples. [Figure 14.14]
Raises a ZeroDivisionError if all the N samples are rejected,
i.e., inconsistent with e.
>>> random.seed(47)
>>> rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T),
... burglary, 10000).show_approx()
'False: 0.7, True: 0.3'
"""
counts = {x: 0 for x in bn.variable_values(X)} # bold N in [Figure 14.14]
for j in range(N):
sample = prior_sample(bn) # boldface x in [Figure 14.14]
if consistent_with(sample, e):
counts[sample[X]] += 1
return ProbDist(X, counts)
def consistent_with(event, evidence):
"Is event consistent with the given evidence?"
return all(evidence.get(k, v) == v
for k, v in event.items())
# _________________________________________________________________________
def likelihood_weighting(X, e, bn, N):
"""Estimate the probability distribution of variable X given
evidence e in BayesNet bn. [Figure 14.15]
>>> random.seed(1017)
>>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T),
... burglary, 10000).show_approx()
'False: 0.702, True: 0.298'
"""
W = {x: 0 for x in bn.variable_values(X)}
for j in range(N):
sample, weight = weighted_sample(bn, e) # boldface x, w in [Figure 14.15]
W[sample[X]] += weight
return ProbDist(X, W)
def weighted_sample(bn, e):
"""Sample an event from bn that's consistent with the evidence e;
return the event and its weight, the likelihood that the event
accords to the evidence."""
w = 1
event = dict(e) # boldface x in [Figure 14.15]
for node in bn.nodes:
Xi = node.variable
if Xi in e:
w *= node.p(e[Xi], event)
else:
event[Xi] = node.sample(event)
return event, w
# _________________________________________________________________________
def gibbs_ask(X, e, bn, N):
"""[Figure 14.16]"""
assert X not in e, "Query variable must be distinct from evidence"
counts = {x: 0 for x in bn.variable_values(X)} # bold N in [Figure 14.16]
Z = [var for var in bn.variables if var not in e]
state = dict(e) # boldface x in [Figure 14.16]
for Zi in Z:
state[Zi] = random.choice(bn.variable_values(Zi))
for j in range(N):
for Zi in Z:
state[Zi] = markov_blanket_sample(Zi, state, bn)
counts[state[X]] += 1
return ProbDist(X, counts)
def markov_blanket_sample(X, e, bn):
"""Return a sample from P(X | mb) where mb denotes that the
variables in the Markov blanket of X take their values from event
e (which must assign a value to each). The Markov blanket of X is
X's parents, children, and children's parents."""
Xnode = bn.variable_node(X)
Q = ProbDist(X)
for xi in bn.variable_values(X):
ei = extend(e, X, xi)
# [Equation 14.12:]
Q[xi] = Xnode.p(xi, e) * product(Yj.p(ei[Yj.variable], ei)
for Yj in Xnode.children)
# (assuming a Boolean variable here)
return probability(Q.normalize()[True])
# _________________________________________________________________________
class HiddenMarkovModel:
""" A Hidden markov model which takes Transition model and Sensor model as inputs"""
def __init__(self, transition_model, sensor_model, prior=[0.5, 0.5]):
self.transition_model = transition_model
self.sensor_model = sensor_model
self.prior = prior
def sensor_dist(self, ev):
if ev is True:
return self.sensor_model[0]
else:
return self.sensor_model[1]
def forward(HMM, fv, ev):
prediction = vector_add(scalar_vector_product(fv[0], HMM.transition_model[0]),
scalar_vector_product(fv[1], HMM.transition_model[1]))
sensor_dist = HMM.sensor_dist(ev)
return normalize(element_wise_product(sensor_dist, prediction))
def backward(HMM, b, ev):
sensor_dist = HMM.sensor_dist(ev)
prediction = element_wise_product(sensor_dist, b)
return normalize(vector_add(scalar_vector_product(prediction[0], HMM.transition_model[0]),
scalar_vector_product(prediction[1], HMM.transition_model[1])))
def forward_backward(HMM, ev, prior):
"""[Figure 15.4]
Forward-Backward algorithm for smoothing. Computes posterior probabilities
of a sequence of states given a sequence of observations."""
t = len(ev)
ev.insert(0, None) # to make the code look similar to pseudo code
fv = [[0.0, 0.0] for i in range(len(ev))]
b = [1.0, 1.0]
bv = [b] # we don't need bv; but we will have a list of all backward messages here
sv = [[0, 0] for i in range(len(ev))]
fv[0] = prior
for i in range(1, t + 1):
fv[i] = forward(HMM, fv[i - 1], ev[i])
for i in range(t, -1, -1):
sv[i - 1] = normalize(element_wise_product(fv[i], b))
b = backward(HMM, b, ev[i])
bv.append(b)
sv = sv[::-1]
return sv
# _________________________________________________________________________
def fixed_lag_smoothing(e_t, HMM, d, ev, t):
"""[Figure 15.6]
Smoothing algorithm with a fixed time lag of 'd' steps.
Online algorithm that outputs the new smoothed estimate if observation
for new time step is given."""
ev.insert(0, None)
T_model = HMM.transition_model
f = HMM.prior
B = [[1, 0], [0, 1]]
evidence = []
evidence.append(e_t)
O_t = vector_to_diagonal(HMM.sensor_dist(e_t))
if t > d:
f = forward(HMM, f, e_t)
O_tmd = vector_to_diagonal(HMM.sensor_dist(ev[t - d]))
B = matrix_multiplication(inverse_matrix(O_tmd), inverse_matrix(T_model), B, T_model, O_t)
else:
B = matrix_multiplication(B, T_model, O_t)
t = t + 1
if t > d:
# always returns a 1x2 matrix
return [normalize(i) for i in matrix_multiplication([f], B)][0]
else:
return None
# _________________________________________________________________________
def particle_filtering(e, N, HMM):
"""Particle filtering considering two states variables."""
s = []
dist = [0.5, 0.5]
# State Initialization
s = ['A' if probability(dist[0]) else 'B' for i in range(N)]
# Weight Initialization
w = [0 for i in range(N)]
# STEP 1
# Propagate one step using transition model given prior state
dist = vector_add(scalar_vector_product(dist[0], HMM.transition_model[0]),
scalar_vector_product(dist[1], HMM.transition_model[1]))
# Assign state according to probability
s = ['A' if probability(dist[0]) else 'B' for i in range(N)]
w_tot = 0
# Calculate importance weight given evidence e
for i in range(N):
if s[i] == 'A':
# P(U|A)*P(A)
w_i = HMM.sensor_dist(e)[0] * dist[0]
if s[i] == 'B':
# P(U|B)*P(B)
w_i = HMM.sensor_dist(e)[1] * dist[1]
w[i] = w_i
w_tot += w_i
# Normalize all the weights
for i in range(N):
w[i] = w[i] / w_tot
# Limit weights to 4 digits
for i in range(N):
w[i] = float("{0:.4f}".format(w[i]))
# STEP 2
s = weighted_sample_with_replacement(s, w, N)
return s
| mit | -369,382,309,511,127,800 | 33.661069 | 98 | 0.560587 | false |
kylewm/feverdream | silopub/twitter.py | 2 | 12917 | import html
import os
import re
import requests
import sys
import tempfile
import urllib.parse
from flask import Blueprint, current_app, redirect, url_for, request, flash
from flask import session
from requests_oauthlib import OAuth1Session, OAuth1
from silopub import util
from silopub.ext import db
from silopub.models import Account, Twitter
import brevity
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
AUTHENTICATE_URL = 'https://api.twitter.com/oauth/authenticate'
AUTHORIZE_URL = 'https://api.twitter.com/oauth/authorize'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
VERIFY_CREDENTIALS_URL = 'https://api.twitter.com/1.1/account/verify_credentials.json'
CREATE_STATUS_URL = 'https://api.twitter.com/1.1/statuses/update.json'
# CREATE_WITH_MEDIA_URL = 'https://api.twitter.com/1.1/statuses/update_with_media.json'
UPLOAD_MEDIA_URL = 'https://upload.twitter.com/1.1/media/upload.json'
RETWEET_STATUS_URL = 'https://api.twitter.com/1.1/statuses/retweet/{}.json'
FAVE_STATUS_URL = 'https://api.twitter.com/1.1/favorites/create.json'
TWEET_RE = re.compile(r'https?://(?:www\.|mobile\.)?twitter\.com/(\w+)/status(?:es)?/(\w+)')
SERVICE_NAME = 'twitter'
twitter = Blueprint('twitter', __name__)
@twitter.route('/twitter.com/<username>')
def proxy_homepage(username):
account = Account.query.filter_by(
service=SERVICE_NAME, username=username).first()
params = {
'service_name': 'Twitter',
'service_url': 'https://twitter.com/',
'service_photo': 'https://abs.twimg.com/favicons/favicon.ico',
}
if account:
params.update({
'user_name': '@' + account.username,
'user_url': account.sites[0].url,
'user_photo': (account.user_info or {}).get(
'profile_image_url_https'),
})
else:
params.update({
'user_name': '@' + username,
'user_url': 'https://twitter.com/' + username,
})
return util.render_proxy_homepage(**params)
@twitter.route('/twitter/authorize', methods=['POST'])
def authorize():
try:
callback_uri = url_for('.callback', _external=True)
return redirect(get_authorize_url(callback_uri))
except:
current_app.logger.exception('Starting Twitter authorization')
flash(html.escape(str(sys.exc_info()[0])), 'danger')
return redirect(url_for('views.index'))
@twitter.route('/twitter/callback')
def callback():
try:
callback_uri = url_for('.callback', _external=True)
result = process_callback(callback_uri)
if 'error' in result:
flash(result['error'], category='danger')
return redirect(url_for('views.index'))
account = result['account']
return redirect(url_for('views.setup_account', service=SERVICE_NAME,
user_id=account.user_id))
except:
current_app.logger.exception('During Twitter authorization callback')
flash(html.escape(str(sys.exc_info()[0])), 'danger')
return redirect(url_for('views.index'))
def get_authorize_url(callback_uri, me=None, **kwargs):
session.pop('oauth_token', None)
session.pop('oauth_token_secret', None)
oauth_session = OAuth1Session(
client_key=current_app.config['TWITTER_CLIENT_KEY'],
client_secret=current_app.config['TWITTER_CLIENT_SECRET'],
callback_uri=callback_uri)
r = oauth_session.fetch_request_token(REQUEST_TOKEN_URL)
session['oauth_token'] = r.get('oauth_token')
session['oauth_token_secret'] = r.get('oauth_token_secret')
params = {'force_login': 'true'}
if me:
params['screen_name'] = me.split('/')[-1]
return oauth_session.authorization_url(
AUTHORIZE_URL + '?' + urllib.parse.urlencode(params))
def process_callback(callback_uri):
verifier = request.args.get('oauth_verifier')
if not verifier:
# user declined
return {'error': 'Twitter authorization declined'}
request_token = session.get('oauth_token')
request_token_secret = session.get('oauth_token_secret')
oauth_session = OAuth1Session(
client_key=current_app.config['TWITTER_CLIENT_KEY'],
client_secret=current_app.config['TWITTER_CLIENT_SECRET'],
resource_owner_key=request_token,
resource_owner_secret=request_token_secret,
callback_uri=callback_uri)
oauth_session.parse_authorization_response(request.url)
# get the access token and secret
r = oauth_session.fetch_access_token(ACCESS_TOKEN_URL)
access_token = r.get('oauth_token')
access_token_secret = r.get('oauth_token_secret')
current_app.logger.debug('request token: %s, secret: %s',
request_token, request_token_secret)
current_app.logger.debug('access token: %s, secret: %s',
access_token, access_token_secret)
auth = OAuth1(
client_key=current_app.config['TWITTER_CLIENT_KEY'],
client_secret=current_app.config['TWITTER_CLIENT_SECRET'],
resource_owner_key=access_token,
resource_owner_secret=access_token_secret)
user_info = requests.get(VERIFY_CREDENTIALS_URL, auth=auth).json()
if 'errors' in user_info:
return {'error': 'Error fetching credentials %r'
% user_info.get('errors')}
user_id = user_info.get('id_str')
username = user_info.get('screen_name')
current_app.logger.debug('verified credentials. user_id=%s, username=%s',
user_id, username)
current_app.logger.debug('user_info: %r', user_info)
account = Account.query.filter_by(
service='twitter', user_id=user_id).first()
if not account:
account = Account(service='twitter', user_id=user_id)
db.session.add(account)
account.username = username
account.user_info = user_info
account.token = access_token
account.token_secret = access_token_secret
account.update_sites([Twitter(
url='https://twitter.com/{}'.format(account.username),
domain='twitter.com/{}'.format(account.username),
site_id=account.user_id)])
db.session.commit()
util.set_authed(account.sites)
return {'account': account}
def publish(site):
auth = OAuth1(
client_key=current_app.config['TWITTER_CLIENT_KEY'],
client_secret=current_app.config['TWITTER_CLIENT_SECRET'],
resource_owner_key=site.account.token,
resource_owner_secret=site.account.token_secret)
def interpret_response(result):
if result.status_code // 100 != 2:
return util.wrap_silo_error_response(result)
result_json = result.json()
twitter_url = 'https://twitter.com/{}/status/{}'.format(
result_json.get('user', {}).get('screen_name'),
result_json.get('id_str'))
return util.make_publish_success_response(twitter_url, result_json)
def get_tweet_id(original):
tweet_url = util.posse_post_discovery(original, TWEET_RE)
if tweet_url:
m = TWEET_RE.match(tweet_url)
if m:
return m.group(1), m.group(2)
return None, None
def upload_photo(photo):
current_app.logger.debug('uploading photo, name=%s, type=%s',
photo.filename, photo.content_type)
result = requests.post(UPLOAD_MEDIA_URL, files={
'media': (photo.filename, photo.stream, photo.content_type),
}, auth=auth)
if result.status_code // 100 != 2:
return None, result
result_data = result.json()
current_app.logger.debug('upload result: %s', result_data)
return result_data.get('media_id_string'), None
def upload_video(video, default_content_type='video/mp4'):
# chunked video upload
chunk_files = []
def cleanup():
for f in chunk_files:
os.unlink(f)
chunk_size = 1 << 20
total_size = 0
while True:
chunk = video.read(chunk_size)
if not chunk:
break
total_size += len(chunk)
tempfd, tempfn = tempfile.mkstemp('-%03d-%s' % (
len(chunk_files), video.filename))
with open(tempfn, 'wb') as f:
f.write(chunk)
chunk_files.append(tempfn)
current_app.logger.debug('init upload. type=%s, length=%s',
video.content_type, video.content_length)
result = requests.post(UPLOAD_MEDIA_URL, data={
'command': 'INIT',
'media_type': video.content_type or default_content_type,
'total_bytes': total_size,
}, auth=auth)
current_app.logger.debug('init result: %s %s', result, result.text)
if result.status_code // 100 != 2:
cleanup()
return None, result
result_data = result.json()
media_id = result_data.get('media_id_string')
segment_idx = 0
for chunk_file in chunk_files:
current_app.logger.debug('appending file: %s', chunk_file)
result = requests.post(UPLOAD_MEDIA_URL, data={
'command': 'APPEND',
'media_id': media_id,
'segment_index': segment_idx,
}, files={
'media': open(chunk_file, 'rb'),
}, auth=auth)
current_app.logger.debug(
'append result: %s %s', result, result.text)
if result.status_code // 100 != 2:
cleanup()
return None, result
segment_idx += 1
current_app.logger.debug('finalize uploading video: %s', media_id)
result = requests.post(UPLOAD_MEDIA_URL, data={
'command': 'FINALIZE',
'media_id': media_id,
}, auth=auth)
current_app.logger.debug('finalize result: %s %s', result, result.text)
if result.status_code // 100 != 2:
cleanup()
return None, result
cleanup()
return media_id, None
data = {}
format = brevity.FORMAT_NOTE
content = request.form.get('content[value]') or request.form.get('content')
if 'name' in request.form:
format = brevity.FORMAT_ARTICLE
content = request.form.get('name')
repost_ofs = util.get_possible_array_value(request.form, 'repost-of')
for repost_of in repost_ofs:
_, tweet_id = get_tweet_id(repost_of)
if tweet_id:
return interpret_response(
requests.post(RETWEET_STATUS_URL.format(tweet_id), auth=auth))
else:
if repost_ofs:
content = 'Reposted: {}'.format(repost_ofs[0])
like_ofs = util.get_possible_array_value(request.form, 'like-of')
for like_of in like_ofs:
_, tweet_id = get_tweet_id(like_of)
if tweet_id:
return interpret_response(
requests.post(FAVE_STATUS_URL, data={'id': tweet_id}, auth=auth))
else:
if like_ofs:
content = 'Liked: {}'.format(like_ofs[0])
media_ids = []
for photo in util.get_files_or_urls_as_file_storage(request.files, request.form, 'photo'):
media_id, err = upload_photo(photo)
if err:
return util.wrap_silo_error_response(err)
media_ids.append(media_id)
for video in util.get_files_or_urls_as_file_storage(request.files, request.form, 'video'):
media_id, err = upload_video(video)
if err:
return util.wrap_silo_error_response(err)
media_ids.append(media_id)
in_reply_tos = util.get_possible_array_value(request.form, 'in-reply-to')
for in_reply_to in in_reply_tos:
twitterer, tweet_id = get_tweet_id(in_reply_to)
if tweet_id:
data['in_reply_to_status_id'] = tweet_id
break
else:
if in_reply_tos:
content = 'Re: {}, {}'.format(in_reply_tos[0], content)
location = request.form.get('location')
current_app.logger.debug('received location param: %s', location)
data['lat'], data['long'] = util.parse_geo_uri(location)
permalink_url = request.form.get('url')
if media_ids:
data['media_ids'] = ','.join(media_ids)
if content:
data['status'] = brevity.shorten(content, permalink=permalink_url,
format=format, target_length=280)
# for in-reply-to tweets, leading @mentions will be looked up from the original Tweet, and added to the new Tweet from there.
# https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-update
data['auto_populate_reply_metadata'] = 'true'
data = util.trim_nulls(data)
current_app.logger.debug('publishing with params %s', data)
return interpret_response(
requests.post(CREATE_STATUS_URL, data=data, auth=auth))
| bsd-2-clause | -8,404,616,224,606,275,000 | 35.905714 | 129 | 0.609662 | false |
jarekwg/pyxero | xero/constants.py | 3 | 2057 | XERO_BASE_URL = "https://api.xero.com"
REQUEST_TOKEN_URL = "/oauth/RequestToken"
AUTHORIZE_URL = "/oauth/Authorize"
ACCESS_TOKEN_URL = "/oauth/AccessToken"
XERO_API_URL = "/api.xro/2.0"
XERO_FILES_URL = "/files.xro/1.0"
XERO_PAYROLL_URL = "/payroll.xro/1.0"
XERO_PROJECTS_URL = "/projects.xro/2.0"
XERO_OAUTH2_AUTHORIZE_URL = "https://login.xero.com/identity/connect/authorize"
XERO_OAUTH2_TOKEN_URL = "https://identity.xero.com/connect/token"
XERO_OAUTH2_CONNECTIONS_URL = "/connections"
class XeroScopes:
# Offline Access
OFFLINE_ACCESS = "offline_access"
# OpenID connection
OPENID = "openid"
PROFILE = "profile"
EMAIL = "email"
# Accounting API
ACCOUNTING_TRANSACTIONS = "accounting.transactions"
ACCOUNTING_TRANSACTIONS_READ = "accounting.transactions.read"
ACCOUNTING_REPORTS_READ = "accounting.reports.read"
ACCOUNTING_JOURNALS_READ = "accounting.journals.read"
ACCOUNTING_SETTINGS = "accounting.settings"
ACCOUNTING_SETTINGS_READ = "accounting.settings.read"
ACCOUNTING_CONTACTS = "accounting.contacts"
ACCOUNTING_CONTACTS_READ = "accounting.contacts.read"
ACCOUNTING_ATTACHMENTS = "accounting.attachments"
ACCOUNTING_ATTACHMENTS_READ = "accounting.attachments.read"
# Payroll API
PAYROLL_EMPLOYEES = "payroll.employees"
PAYROLL_EMPLOYEES_READ = "payroll.employees.read"
PAYROLL_PAYRUNS = "payroll.payruns"
PAYROLL_PAYRUNS_READ = "payroll.payruns.read"
PAYROLL_PAYSLIP = "payroll.payslip"
PAYROLL_PAYSLIP_READ = "payroll.payslip.read"
PAYROLL_TIMESHEETS = "payroll.timesheets"
PAYROLL_TIMESHEETS_READ = "payroll.timesheets.read"
PAYROLL_SETTINGS = "payroll.settings"
PAYROLL_SETTINGS_READ = "payroll.settings.read"
# Files API
FILES = "files"
FILES_READ = "files.read"
# Asssets API
ASSETS = "assets"
ASSETS_READ = "assets.read"
# Projects API
PROJECTS = "projects"
PROJECTS_READ = "projects.read"
# Restricted Scopes
PAYMENTSERVICES = "paymentservices"
BANKFEEDS = "bankfeeds"
| bsd-3-clause | -6,868,531,020,613,917,000 | 31.650794 | 79 | 0.707827 | false |
Visgean/django-autocomplete-light | autocomplete_light/tests/test_fields.py | 2 | 5744 | from __future__ import unicode_literals
import pytest
import autocomplete_light.shortcuts as autocomplete_light
from django import forms
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from ..example_apps.basic.models import GfkModel
from ..example_apps.security_test.models import Item
class BaseMixin(object):
GOOD_VALUE = 'b'
CLEANED_VALUE = 'b'
BAD_VALUE = 'xx'
EMPTY_VALUE = None
class TestAutocomplete(autocomplete_light.AutocompleteListBase):
choices = ['a', 'b', 'c']
def test_automatic_field_choices(self):
test = self.field_class(self.TestAutocomplete)
self.assertEqual(test.choices, [('a', 'a'), ('b', 'b'), ('c', 'c')])
def test_validate(self):
test = self.field_class(self.TestAutocomplete)
test.validate(self.GOOD_VALUE)
with self.assertRaises(forms.ValidationError):
test.validate(self.BAD_VALUE)
def test_validate_required(self):
test = self.field_class(self.TestAutocomplete, required=True)
with pytest.raises(forms.ValidationError):
test.validate(self.EMPTY_VALUE)
def test_select_choice(self):
class TestForm(forms.Form):
test_field = self.field_class(self.TestAutocomplete)
form = TestForm({'test_field': self.GOOD_VALUE})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['test_field'], self.CLEANED_VALUE)
class ChoiceFieldTestCase(BaseMixin, TestCase):
field_class = autocomplete_light.ChoiceField
class MultipleChoiceFieldTestCase(BaseMixin, TestCase):
field_class = autocomplete_light.MultipleChoiceField
GOOD_VALUE = ['b']
CLEANED_VALUE = ['b']
class ModelChoiceFieldTestCase(BaseMixin, TestCase):
fixtures = ['security_test.json']
field_class = autocomplete_light.ModelChoiceField
GOOD_VALUE = 1
BAD_VALUE = 2
def setUp(self):
self.CLEANED_VALUE = Item.objects.get(pk=self.GOOD_VALUE)
class TestAutocomplete(autocomplete_light.AutocompleteModelBase):
choices = Item.objects.filter(private=False)
def test_automatic_field_choices(self):
test = self.field_class(self.TestAutocomplete, required=True)
self.assertEqual(list(test.choices),
[('', '---------'), (1, 'public'), (3, 'linked')])
class ModelMultipleChoiceFieldTestCase(ModelChoiceFieldTestCase):
field_class = autocomplete_light.ModelMultipleChoiceField
GOOD_VALUE = [1]
BAD_VALUE = [2]
def setUp(self):
self.CLEANED_VALUE = Item.objects.filter(pk=1)
def test_automatic_field_choices(self):
test = self.field_class(self.TestAutocomplete, required=True)
self.assertEqual(list(test.choices),
[(1, 'public'), (3, 'linked')])
def test_select_choice(self):
class TestForm(forms.Form):
test_field = self.field_class(self.TestAutocomplete)
form = TestForm({'test_field': self.GOOD_VALUE})
self.assertTrue(form.is_valid())
self.assertEqual(len(form.cleaned_data['test_field']),
len(self.CLEANED_VALUE))
self.assertEqual(form.cleaned_data['test_field'][0],
self.CLEANED_VALUE[0])
class CustomModelChoiceField(forms.ModelChoiceField):
pass
class CustomModelMultipleChoiceField(forms.ModelMultipleChoiceField):
pass
class CustomAutocompleteModelChoiceField(autocomplete_light.FieldBase,
CustomModelChoiceField):
""" Autocomplete form field which inherently, but not directly, inherits
forms.ModelChoiceField
"""
widget = autocomplete_light.ChoiceWidget
class CustomAutocompleteModelMultipleChoiceField(autocomplete_light.FieldBase,
CustomModelMultipleChoiceField):
""" Autocomplete form field which inherently, but not directly, inherits
forms.ModelMultipleChoiceField
"""
widget = autocomplete_light.MultipleChoiceWidget
class CustomModelFieldTestCase(ModelChoiceFieldTestCase):
""" Regression test for a custom ModelChoiceField
https://github.com/yourlabs/django-autocomplete-light/issues/379
"""
field_class = CustomAutocompleteModelChoiceField
class CustomMultipleModelFieldTestCase(ModelMultipleChoiceFieldTestCase):
""" Regression test for a custom ModelMultipleChoiceField
https://github.com/yourlabs/django-autocomplete-light/issues/379
"""
field_class = CustomAutocompleteModelMultipleChoiceField
class GenericModelChoiceFieldTestCase(BaseMixin, TestCase):
field_class = autocomplete_light.GenericModelChoiceField
fixtures = ['basic_gfk_gmtm.json']
class TestAutocomplete(autocomplete_light.AutocompleteGenericBase):
choices = [GfkModel.objects.all()]
def setUp(self):
self.gfk_ct = ContentType.objects.get_for_model(GfkModel)
self.GOOD_VALUE = '%s-%s' % (self.gfk_ct.pk, 1)
self.BAD_VALUE = '%s-%s' % (self.gfk_ct.pk, 1234)
self.CLEANED_VALUE = GfkModel.objects.get(pk=1)
def test_automatic_field_choices(self):
pass # generic model choice field has no choices
class GenericModelMultipleChoiceFieldTestCase(GenericModelChoiceFieldTestCase):
field_class = autocomplete_light.GenericModelMultipleChoiceField
def setUp(self):
self.gfk_ct = ContentType.objects.get_for_model(GfkModel)
self.GOOD_VALUE = ['%s-%s' % (self.gfk_ct.pk, 1)]
self.BAD_VALUE = ['%s-%s' % (self.gfk_ct.pk, 1234)]
self.CLEANED_VALUE = [GfkModel.objects.get(pk=1)]
def test_automatic_field_choices(self):
pass # generic model choice field has no choices
| mit | -4,094,766,424,217,917,400 | 33.190476 | 81 | 0.685411 | false |
Kaezon/allianceauth | allianceauth/services/modules/teamspeak3/manager.py | 2 | 12647 | import logging
from django.conf import settings
from .util.ts3 import TS3Server, TeamspeakError
from .models import TSgroup
logger = logging.getLogger(__name__)
class Teamspeak3Manager:
def __init__(self):
self._server = None
@property
def server(self):
if self._server is not None and self._server._connected:
return self._server
else:
raise ValueError("Teamspeak not connected")
def connect(self):
self._server = self.__get_created_server()
return self
def disconnect(self):
self._server.disconnect()
self._server = None
def __enter__(self):
logger.debug("Entering with statement, connecting")
self.connect()
return self
def __exit__(self, _type, value, traceback):
logger.debug("Exiting with statement, cleaning up")
self.disconnect()
@staticmethod
def __get_created_server():
server = TS3Server(settings.TEAMSPEAK3_SERVER_IP, settings.TEAMSPEAK3_SERVER_PORT)
server.login(settings.TEAMSPEAK3_SERVERQUERY_USER, settings.TEAMSPEAK3_SERVERQUERY_PASSWORD)
server.use(settings.TEAMSPEAK3_VIRTUAL_SERVER)
logger.debug("Got TS3 server instance based on settings.")
return server
@staticmethod
def __santatize_username(username):
sanatized = username.replace(" ", "_")
sanatized = sanatized.replace("'", "-")
return sanatized
def _get_userid(self, uid):
logger.debug("Looking for uid %s on TS3 server." % uid)
try:
ret = self.server.send_command('customsearch', {'ident': 'sso_uid', 'pattern': uid})
if ret and 'keys' in ret and 'cldbid' in ret['keys']:
logger.debug("Got userid %s for uid %s" % (ret['keys']['cldbid'], uid))
return ret['keys']['cldbid']
except TeamspeakError as e:
if not e.code == '1281':
raise e
return None
def _group_id_by_name(self, groupname):
logger.debug("Looking for group %s on TS3 server." % groupname)
group_cache = self.server.send_command('servergrouplist')
logger.debug("Received group cache from server: %s" % group_cache)
for group in group_cache:
logger.debug("Checking group %s" % group)
if group['keys']['name'] == groupname:
logger.debug("Found group %s, returning id %s" % (groupname, group['keys']['sgid']))
return group['keys']['sgid']
logger.debug("Group %s not found on server." % groupname)
return None
def _create_group(self, groupname):
logger.debug("Creating group %s on TS3 server." % groupname)
sgid = self._group_id_by_name(groupname)
if not sgid:
logger.debug("Group does not yet exist. Proceeding with creation.")
ret = self.server.send_command('servergroupadd', {'name': groupname})
self.__group_cache = None
sgid = ret['keys']['sgid']
self.server.send_command('servergroupaddperm',
{'sgid': sgid, 'permsid': 'i_group_needed_modify_power', 'permvalue': 75,
'permnegated': 0, 'permskip': 0})
self.server.send_command('servergroupaddperm',
{'sgid': sgid, 'permsid': 'i_group_needed_member_add_power', 'permvalue': 100,
'permnegated': 0, 'permskip': 0})
self.server.send_command('servergroupaddperm',
{'sgid': sgid, 'permsid': 'i_group_needed_member_remove_power', 'permvalue': 100,
'permnegated': 0, 'permskip': 0})
logger.info("Created group on TS3 server with name %s and id %s" % (groupname, sgid))
return sgid
def _user_group_list(self, cldbid):
logger.debug("Retrieving group list for user with id %s" % cldbid)
server = Teamspeak3Manager.__get_created_server()
try:
groups = self.server.send_command('servergroupsbyclientid', {'cldbid': cldbid})
except TeamspeakError as e:
if e.code == '1281': # no groups
groups = []
else:
raise e
logger.debug("Retrieved group list: %s" % groups)
outlist = {}
if type(groups) == list:
logger.debug("Recieved multiple groups. Iterating.")
for group in groups:
outlist[group['keys']['name']] = group['keys']['sgid']
elif type(groups) == dict:
logger.debug("Recieved single group.")
outlist[groups['keys']['name']] = groups['keys']['sgid']
logger.debug("Returning name/id pairing: %s" % outlist)
return outlist
def _group_list(self):
logger.debug("Retrieving group list on TS3 server.")
group_cache = self.server.send_command('servergrouplist')
logger.debug("Received group cache from server: %s" % group_cache)
outlist = {}
if group_cache:
for group in group_cache:
logger.debug("Assigning name/id dict: %s = %s" % (group['keys']['name'], group['keys']['sgid']))
outlist[group['keys']['name']] = group['keys']['sgid']
else:
logger.error("Received empty group cache while retrieving group cache from TS3 server. 1024 error.")
logger.debug("Returning name/id pairing: %s" % outlist)
return outlist
def _add_user_to_group(self, uid, groupid):
logger.debug("Adding group id %s to TS3 user id %s" % (groupid, uid))
user_groups = self._user_group_list(uid)
if groupid not in user_groups.values():
logger.debug("User does not have group already. Issuing command to add.")
self.server.send_command('servergroupaddclient',
{'sgid': str(groupid), 'cldbid': uid})
logger.info("Added user id %s to group id %s on TS3 server." % (uid, groupid))
def _remove_user_from_group(self, uid, groupid):
logger.debug("Removing group id %s from TS3 user id %s" % (groupid, uid))
user_groups = self._user_group_list(uid)
if str(groupid) in user_groups.values():
logger.debug("User is in group. Issuing command to remove.")
self.server.send_command('servergroupdelclient',
{'sgid': str(groupid), 'cldbid': uid})
logger.info("Removed user id %s from group id %s on TS3 server." % (uid, groupid))
def _sync_ts_group_db(self):
logger.debug("_sync_ts_group_db function called.")
try:
remote_groups = self._group_list()
local_groups = TSgroup.objects.all()
logger.debug("Comparing remote groups to TSgroup objects: %s" % local_groups)
for key in remote_groups:
logger.debug("Typecasting remote_group value at position %s to int: %s" % (key, remote_groups[key]))
remote_groups[key] = int(remote_groups[key])
for group in local_groups:
logger.debug("Checking local group %s" % group)
if group.ts_group_id not in remote_groups.values():
logger.debug(
"Local group id %s not found on server. Deleting model %s" % (group.ts_group_id, group))
TSgroup.objects.filter(ts_group_id=group.ts_group_id).delete()
for key in remote_groups:
g = TSgroup(ts_group_id=remote_groups[key], ts_group_name=key)
q = TSgroup.objects.filter(ts_group_id=g.ts_group_id)
if not q:
logger.debug("Local group does not exist for TS group %s. Creating TSgroup model %s" % (
remote_groups[key], g))
g.save()
except TeamspeakError as e:
logger.error("Error occured while syncing TS group db: %s" % str(e))
except:
logger.exception("An unhandled exception has occured while syncing TS groups.")
def add_user(self, username):
username_clean = self.__santatize_username(username[:30])
logger.debug("Adding user to TS3 server with cleaned username %s" % username_clean)
server_groups = self._group_list()
if 'Member' not in server_groups:
self._create_group('Member')
alliance_group_id = self._group_id_by_name('Member')
try:
ret = self.server.send_command('tokenadd', {'tokentype': 0, 'tokenid1': alliance_group_id, 'tokenid2': 0,
'tokendescription': username_clean,
'tokencustomset': "ident=sso_uid value=%s" % username_clean})
except TeamspeakError as e:
logger.error("Failed to add teamspeak user %s: %s" % (username, str(e)))
return "",""
try:
token = ret['keys']['token']
logger.info("Created permission token for user %s on TS3 server" % username_clean)
return username_clean, token
except:
logger.exception("Failed to add teamspeak user %s - received response: %s" % (username_clean, ret))
return "", ""
def delete_user(self, uid):
user = self._get_userid(uid)
logger.debug("Deleting user %s with id %s from TS3 server." % (user, uid))
if user:
clients = self.server.send_command('clientlist')
if isinstance(clients, dict):
# Rewrap list
clients = [clients]
for client in clients:
try:
if client['keys']['client_database_id'] == user:
logger.debug("Found user %s on TS3 server - issuing deletion command." % user)
self.server.send_command('clientkick', {'clid': client['keys']['clid'], 'reasonid': 5,
'reasonmsg': 'Auth service deleted'})
except:
logger.exception("Failed to delete user id %s from TS3 - received response %s" % (uid, client))
return False
try:
ret = self.server.send_command('clientdbdelete', {'cldbid': user})
except TeamspeakError as e:
logger.error("Failed to delete teamspeak user %s: %s" % (uid, str(e)))
return False
if ret == '0':
logger.info("Deleted user with id %s from TS3 server." % uid)
return True
else:
logger.exception("Failed to delete user id %s from TS3 - received response %s" % (uid, ret))
return False
else:
logger.warn("User with id %s not found on TS3 server. Assuming succesful deletion." % uid)
return True
def check_user_exists(self, uid):
if self._get_userid(uid):
return True
return False
def generate_new_permissionkey(self, uid, username):
logger.debug("Re-issuing permission key for user id %s" % uid)
self.delete_user(uid)
return self.add_user(username)
def update_groups(self, uid, ts_groups):
logger.debug("Updating uid %s TS3 groups %s" % (uid, ts_groups))
userid = self._get_userid(uid)
addgroups = []
remgroups = []
if userid is not None:
user_ts_groups = self._user_group_list(userid)
logger.debug("User has groups on TS3 server: %s" % user_ts_groups)
for key in user_ts_groups:
user_ts_groups[key] = int(user_ts_groups[key])
for ts_group_key in ts_groups:
logger.debug("Checking if user has group %s on TS3 server." % ts_group_key)
if ts_groups[ts_group_key] not in user_ts_groups.values():
addgroups.append(ts_groups[ts_group_key])
for user_ts_group_key in user_ts_groups:
if user_ts_groups[user_ts_group_key] not in ts_groups.values():
remgroups.append(user_ts_groups[user_ts_group_key])
for g in addgroups:
logger.info("Adding Teamspeak user %s into group %s" % (userid, g))
self._add_user_to_group(userid, g)
for g in remgroups:
logger.info("Removing Teamspeak user %s from group %s" % (userid, g))
self._remove_user_from_group(userid, g)
| gpl-2.0 | 2,354,705,907,607,022,000 | 44.822464 | 118 | 0.558868 | false |
hvandenb/splunk-elasticsearch | search-elasticsearch/bin/splunklib/searchcommands/__init__.py | 7 | 8457 | # Copyright 2011-2014 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
.. topic:: Design Notes
1. Commands are constrained to this ABNF grammar::
command = command-name *[wsp option] *[wsp [dquote] field-name [dquote]]
command-name = alpha *( alpha / digit )
option = option-name [wsp] "=" [wsp] option-value
option-name = alpha *( alpha / digit / "_" )
option-value = word / quoted-string
word = 1*( %01-%08 / %0B / %0C / %0E-1F / %21 / %23-%FF ) ; Any character but DQUOTE and WSP
quoted-string = dquote *( word / wsp / "\" dquote / dquote dquote ) dquote
field-name = ( "_" / alpha ) *( alpha / digit / "_" / "." / "-" )
It is Constrained to an 8-bit character set. It does not show that
:code:`field-name` values may be comma-separated. This is because Splunk strips
commas from the command line. A search command will never see them.
3. Commands must be statically configured as follows:
.. code-block:: text
:linenos:
[commandname]
filename = commandname.py
supports_getinfo = true
supports_rawargs = true
No other static configuration is required or expected and may interfere with
command execution.
2. Commands support dynamic probing for settings.
Splunk probes for settings dynamically when :code:`supports_getinfo=true`.
You must add this line to the commands.conf stanza for each of your search
commands.
4. Commands do not support parsed arguments on the command line.
Splunk parses arguments when :code:`supports_rawargs=false`. The
:code:`SearchCommand` class sets this value unconditionally. You cannot
override it.
**Rationale**
Splunk parses arguments by stripping quotes, nothing more. This may be useful
in some cases, but doesn't work well with our chosen grammar.
5. Commands consume input headers.
An input header is provided by Splunk when :code:`enableheader=true`. The
:class:`SearchCommand` class sets this value unconditionally. You cannot
override it.
6. Commands produce an output messages header.
Splunk expects a command to produce an output messages header when
:code:`outputheader=true`. The :class:`SearchCommand` class sets this value
unconditionally. You cannot override it.
7. Commands support multi-value fields.
Multi-value fields are provided and consumed by Splunk when
:code:`supports_multivalue=true`. This value is fixed. You cannot override
it.
8. This module represents all fields on the output stream in multi-value
format.
Splunk recognizes two kinds of data: :code:`value` and :code:`list(value)`.
The multi-value format represents these data in field pairs. Given field
:code:`name` the multi-value format calls for the creation of this pair of
fields.
================= =========================================================
Field name Field data
================= =========================================================
:code:`name` Value or text from which a list of values was derived.
:code:`__mv_name` Empty, if :code:`field` represents a :code:`value`;
otherwise, an encoded :code:`list(value)`. Values in the
list are wrapped in dollar signs ($) and separated by
semi-colons (;). Dollar signs ($) within a value are
represented by a pair of dollar signs ($$).
================= =========================================================
Serializing data in this format enables streaming and reduces a command's
memory footprint at the cost of one extra byte of data per field per record
and a small amount of extra processing time by the next command in the
pipeline.
9. A :class:`ReportingCommand` must override :meth:`~ReportingCommand.reduce`
and may override :meth:`~ReportingCommand.map`. Map/reduce commands on the
Splunk processing pipeline are distinguished as this example illustrates.
**Splunk command**
.. code-block:: text
sum total=total_date_hour date_hour
**Map command line**
.. code-block:: text
sum __GETINFO__ __map__ total=total_date_hour date_hour
sum __EXECUTE__ __map__ total=total_date_hour date_hour
**Reduce command line**
.. code-block:: text
sum __GETINFO__ total=total_date_hour date_hour
sum __EXECUTE__ total=total_date_hour date_hour
The :code:`__map__` argument is introduced by
:meth:`ReportingCommand._execute`. Search command authors cannot influence
the contents of the command line in this release.
.. topic:: References
1. `Search command style guide <http://docs.splunk.com/Documentation/Splunk/6.0/Search/Searchcommandstyleguide>`_
2. `Commands.conf.spec <http://docs.splunk.com/Documentation/Splunk/5.0.5/Admin/Commandsconf>`_
"""
from __future__ import absolute_import
from .decorators import *
from .validators import *
from .generating_command import GeneratingCommand
from .reporting_command import ReportingCommand
from .streaming_command import StreamingCommand
if sys.platform == 'win32':
# Work around the fact that on Windows '\n' is mapped to '\r\n'
# The typical solution is to simply open files in binary mode, but stdout
# is already open, thus this hack
import msvcrt
import os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
def dispatch(command_class, argv=sys.argv, input_file=sys.stdin, output_file=
sys.stdout, module_name=None):
""" Instantiates and executes a search command class
This function implements a `conditional script stanza <http://goo.gl/OFaox6>`_
based on the value of :code:`module_name`::
if module_name is None or module_name == '__main__':
# execute command
Call this function at module scope with :code:`module_name=__name__`, if you
would like your module to act as either a reusable module or a standalone
program. Otherwise, if you wish this function to unconditionally instantiate
and execute :code:`command_class`, pass :const:`None` as the value of
:code:`module_name`.
:param command_class: Class to instantiate and execute.
:type command_class: :code:`SearchCommand`
:param argv: List of arguments to the command.
:type argv: :code:`list`
:param input_file: File from which the command will read data.
:type input_file: :code:`file`
:param output_file: File to which the command will write data.
:type output_file: :code:`file`
:param module_name: Name of the module calling :code:`dispatch` or :const:`None`.
:type module_name: :code:`str`
:returns: :const:`None`
**Example**
.. code-block:: python
:linenos:
#!/usr/bin/env python
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand, module_name=__name__)
Dispatches the :code:`SomeStreamingCommand`, if and only if
:code:`__name__` is equal to :code:`'__main__'`.
**Example**
.. code-block:: python
:linenos:
from splunklib.searchcommands import dispatch, StreamingCommand, Configuration, Option, validators
@Configuration()
class SomeStreamingCommand(StreamingCommand):
...
def stream(records):
...
dispatch(SomeStreamingCommand)
Unconditionally dispatches :code:`SomeStreamingCommand`.
"""
if module_name is None or module_name == '__main__':
command_class().process(argv, input_file, output_file)
return
| apache-2.0 | 5,980,244,803,440,918,000 | 36.586667 | 115 | 0.65165 | false |
conejoninja/plugin.video.pelisalacarta | servers/filesmonster.py | 16 | 1297 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para uploaz
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[filesmonster.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# http://uploaz.com/file/
patronvideos = '"filesmonster.com/download(.*?)"'
logger.info("[filemonster.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[filesmonster]"
url = "http://filesmonster.com/download"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'filemonster' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
| gpl-3.0 | -86,957,100,224,683,800 | 31.4 | 91 | 0.583333 | false |
xuleiboy1234/autoTitle | tensorflow/tensorflow/python/ops/nn_fused_batchnorm_test.py | 4 | 10503 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fused_batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class BatchNormalizationTest(test.TestCase):
def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
y = nn_impl.batch_normalization(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return y.eval()
def _test_inference(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
mean_val = np.random.random_sample(scale_shape).astype(np.float32)
var_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
mean = constant_op.constant(mean_val, name='mean')
var = constant_op.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=var,
epsilon=epsilon,
data_format=data_format,
is_training=False)
y_val = sess.run(y)
y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,
data_format)
self.assertAllClose(y_ref, y_val, atol=1e-3)
def _training_ref(self, x, scale, offset, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW']:
raise ValueError('data_format must be NCHW or NHWC, '
'got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
mean, var = nn_impl.moments(x, [0, 1, 2], keep_dims=False)
y = nn_impl.batch_normalization(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
return y.eval(), mean.eval(), var.eval()
def _test_training(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
epsilon = 0.001
y, mean, var = nn_impl.fused_batch_norm(
x,
scale,
offset,
epsilon=epsilon,
data_format=data_format,
is_training=True)
y_val, mean_val, var_val = sess.run([y, mean, var])
y_ref, mean_ref, var_ref = self._training_ref(x, scale, offset, epsilon,
data_format)
self.assertAllClose(y_ref, y_val, atol=1e-3)
self.assertAllClose(mean_ref, mean_val, atol=1e-3)
# This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as
# the denominator in the formula to calculate variance, while
# tf.nn.fused_batch_norm has Bessel's correction built in.
sample_size = x_val.size / scale_val.size
var_ref = var_ref * sample_size / (max(sample_size - 1.0, 1.0))
self.assertAllClose(var_ref, var_val, atol=1e-3)
def _test_gradient(self,
x_shape,
scale_shape,
use_gpu=True,
data_format='NHWC',
is_training=True):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(np.float32)
scale_val = np.random.random_sample(scale_shape).astype(np.float32)
offset_val = np.random.random_sample(scale_shape).astype(np.float32)
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(np.float32)
pop_var = np.random.random_sample(scale_shape).astype(np.float32)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
is_training=is_training)
err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = gradient_checker.compute_gradient_error(scale, scale_shape, y,
x_shape)
err_offset = gradient_checker.compute_gradient_error(offset, scale_shape,
y, x_shape)
err_tolerance = 1e-3
self.assertLess(err_x, err_tolerance)
self.assertLess(err_scale, err_tolerance)
self.assertLess(err_offset, err_tolerance)
def testInference(self):
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
if test.is_gpu_available(cuda_only=True):
self._test_inference(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_inference(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_inference(x_shape, [6], use_gpu=False, data_format='NHWC')
def testTraining(self):
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [1], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [1], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [1], use_gpu=False, data_format='NHWC')
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [2], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [2], use_gpu=False, data_format='NHWC')
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [2], use_gpu=True, data_format='NCHW')
x_shape = [27, 131, 127, 6]
if test.is_gpu_available(cuda_only=True):
self._test_training(x_shape, [131], use_gpu=True, data_format='NCHW')
self._test_training(x_shape, [6], use_gpu=True, data_format='NHWC')
self._test_training(x_shape, [6], use_gpu=False, data_format='NHWC')
def testBatchNormGrad(self):
for is_training in [True, False]:
x_shape = [1, 1, 6, 1]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [1],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [1],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape, [1],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 1, 6, 2]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [2],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [2],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
x_shape = [1, 2, 1, 6]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [2],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
x_shape = [7, 9, 13, 6]
if test.is_gpu_available(cuda_only=True):
self._test_gradient(
x_shape, [9],
use_gpu=True,
data_format='NCHW',
is_training=is_training)
self._test_gradient(
x_shape, [6],
use_gpu=True,
data_format='NHWC',
is_training=is_training)
self._test_gradient(
x_shape, [6],
use_gpu=False,
data_format='NHWC',
is_training=is_training)
if __name__ == '__main__':
test.main()
| mit | 8,295,654,404,085,367,000 | 38.484962 | 80 | 0.588022 | false |
dekom/threepress-bookworm-read-only | bookworm/gdata/tests/gdata_tests/calendar_test.py | 41 | 39080 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (Jeff Scudder)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
from gdata import test_data
import gdata.calendar
class CalendarFeedTest(unittest.TestCase):
def setUp(self):
self.calendar_feed = gdata.calendar.CalendarListFeedFromString(
test_data.CALENDAR_FEED)
def testEntryCount(self):
# Assert the number of items in the feed of calendars
self.assertEquals(len(self.calendar_feed.entry),2)
def testToAndFromString(self):
# Assert the appropriate type for each entry
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry, gdata.calendar.CalendarListEntry),
'Entry must be an instance of CalendarListEntry')
# Regenerate feed from xml text
new_calendar_feed = (
gdata.calendar.CalendarListFeedFromString(str(self.calendar_feed)))
for an_entry in new_calendar_feed.entry:
self.assert_(isinstance(an_entry, gdata.calendar.CalendarListEntry),
'Entry in regenerated feed must be an instance of CalendarListEntry')
def testAuthor(self):
"""Tests the existence of a <atom:author> and verifies the name and email"""
# Assert that each element in the feed author list is an atom.Author
for an_author in self.calendar_feed.author:
self.assert_(isinstance(an_author, atom.Author),
"Calendar feed <atom:author> element must be an instance of " +
"atom.Author: %s" % an_author)
# Assert the feed author name is as expected
self.assertEquals(self.calendar_feed.author[0].name.text, 'GData Ops Demo')
# Assert the feed author name is as expected
self.assertEquals(self.calendar_feed.author[0].email.text,
'[email protected]')
# Assert one of the values for an entry author
self.assertEquals(self.calendar_feed.entry[0].author[0].name.text,
'GData Ops Demo')
self.assertEquals(self.calendar_feed.entry[0].author[0].email.text,
'[email protected]')
def testId(self):
"""Tests the existence of a <atom:id> in the feed and entries
and verifies the value"""
# Assert the feed id exists and is an atom.Id
self.assert_(isinstance(self.calendar_feed.id, atom.Id),
"Calendar feed <atom:id> element must be an instance of atom.Id: %s" % (
self.calendar_feed.id))
# Assert the feed id value is as expected
self.assertEquals(self.calendar_feed.id.text,
'http://www.google.com/calendar/feeds/default')
# Assert that each entry has an id which is an atom.Id
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.id, atom.Id),
"Calendar entry <atom:id> element must be an instance of " +
"atom.Id: %s" % an_entry.id)
# Assert one of the values for an id
self.assertEquals(self.calendar_feed.entry[1].id.text,
'http://www.google.com/calendar/feeds/default/' +
'jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com')
def testPublished(self):
"""Tests the existence of a <atom:published> in the entries
and verifies the value"""
# Assert that each entry has a published value which is an atom.Published
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.published, atom.Published),
"Calendar entry <atom:published> element must be an instance of " +
"atom.Published: %s" % an_entry.published)
# Assert one of the values for published is as expected
self.assertEquals(self.calendar_feed.entry[1].published.text,
'2007-03-20T22:48:57.837Z')
def testUpdated(self):
"""Tests the existence of a <atom:updated> in the feed and the entries
and verifies the value"""
# Assert that the feed updated element exists and is an atom.Updated
self.assert_(isinstance(self.calendar_feed.updated, atom.Updated),
"Calendar feed <atom:updated> element must be an instance of " +
"atom.Updated: %s" % self.calendar_feed.updated)
# Assert that each entry has a updated value which is an atom.Updated
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.updated, atom.Updated),
"Calendar entry <atom:updated> element must be an instance of" +
"atom.Updated: %s" % an_entry.updated)
# Assert the feed updated value is as expected
self.assertEquals(self.calendar_feed.updated.text,
'2007-03-20T22:48:57.833Z')
# Assert one of the values for updated
self.assertEquals(self.calendar_feed.entry[0].updated.text,
'2007-03-20T22:48:52.000Z')
def testTitle(self):
"""Tests the existence of a <atom:title> in the feed and the entries and
verifies the value"""
# Assert that the feed title element exists and is an atom.Title
self.assert_(isinstance(self.calendar_feed.title, atom.Title),
"Calendar feed <atom:title> element must be an instance of " +
"atom.Title: %s" % self.calendar_feed.title)
# Assert that each entry has a title value which is an atom.Title
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.title, atom.Title),
"Calendar entry <atom:title> element must be an instance of " +
"atom.Title: %s" % an_entry.title)
# Assert the feed title value is as expected
self.assertEquals(self.calendar_feed.title.text,
'GData Ops Demo\'s Calendar List')
# Assert one of the values for title
self.assertEquals(self.calendar_feed.entry[0].title.text, 'GData Ops Demo')
def testColor(self):
"""Tests the existence of a <gCal:color> and verifies the value"""
# Assert the color is present and is a gdata.calendar.Color
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.color, gdata.calendar.Color),
"Calendar feed <gCal:color> element must be an instance of " +
"gdata.calendar.Color: %s" % an_entry.color)
# Assert the color value is as expected
self.assertEquals(self.calendar_feed.entry[0].color.value, '#2952A3')
def testAccessLevel(self):
"""Tests the existence of a <gCal:accesslevel> element and verifies the
value"""
# Assert the access_level is present and is a gdata.calendar.AccessLevel
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.access_level, gdata.calendar.AccessLevel),
"Calendar feed <gCal:accesslevel> element must be an instance of " +
"gdata.calendar.AccessLevel: %s" % an_entry.access_level)
# Assert the access_level value is as expected
self.assertEquals(self.calendar_feed.entry[0].access_level.value, 'owner')
def testTimezone(self):
"""Tests the existence of a <gCal:timezone> element and verifies the
value"""
# Assert the timezone is present and is a gdata.calendar.Timezone
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.timezone, gdata.calendar.Timezone),
"Calendar feed <gCal:timezone> element must be an instance of " +
"gdata.calendar.Timezone: %s" % an_entry.timezone)
# Assert the timezone value is as expected
self.assertEquals(self.calendar_feed.entry[0].timezone.value,
'America/Los_Angeles')
def testHidden(self):
"""Tests the existence of a <gCal:hidden> element and verifies the
value"""
# Assert the hidden is present and is a gdata.calendar.Hidden
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.hidden, gdata.calendar.Hidden),
"Calendar feed <gCal:hidden> element must be an instance of " +
"gdata.calendar.Hidden: %s" % an_entry.hidden)
# Assert the hidden value is as expected
self.assertEquals(self.calendar_feed.entry[0].hidden.value, 'false')
def testOpenSearch(self):
"""Tests the existence of <openSearch:startIndex>"""
# Assert that the elements exist and are the appropriate type
self.assert_(isinstance(self.calendar_feed.start_index, gdata.StartIndex),
"Calendar feed <openSearch:startIndex> element must be an " +
"instance of gdata.StartIndex: %s" % self.calendar_feed.start_index)
# Assert the values for each openSearch element are as expected
self.assertEquals(self.calendar_feed.start_index.text, '1')
def testGenerator(self):
"""Tests the existence of <atom:generator> and verifies the value"""
# Assert that the element exists and is of the appropriate type
self.assert_(isinstance(self.calendar_feed.generator, atom.Generator),
"Calendar feed <atom:generator> element must be an instance of " +
"atom.Generator: %s" % self.calendar_feed.generator)
# Assert the generator version, uri and text are as expected
self.assertEquals(self.calendar_feed.generator.text, 'Google Calendar')
self.assertEquals(self.calendar_feed.generator.version, '1.0')
self.assertEquals(self.calendar_feed.generator.uri,
'http://www.google.com/calendar')
def testEntryLink(self):
"""Makes sure entry links in the private composite feed are parsed."""
entry = gdata.calendar.CalendarEventEntryFromString(
test_data.RECURRENCE_EXCEPTION_ENTRY)
self.assert_(isinstance(entry.recurrence_exception, list))
self.assert_(isinstance(entry.recurrence_exception[0].entry_link,
gdata.EntryLink))
self.assert_(isinstance(entry.recurrence_exception[0].entry_link.entry,
gdata.calendar.CalendarEventEntry))
self.assertEquals(
entry.recurrence_exception[0].entry_link.entry.author[0].name.text,
'gdata ops')
def testSequence(self):
entry = gdata.calendar.CalendarEventEntry(
sequence=gdata.calendar.Sequence(value='1'))
entry2 = gdata.calendar.CalendarEventEntryFromString(str(entry))
self.assertEqual(entry.sequence.value, entry2.sequence.value)
entry = gdata.calendar.CalendarEventEntryFromString(
'<entry xmlns="%s"><sequence xmlns="%s" value="7" /></entry>' % (
atom.ATOM_NAMESPACE, gdata.calendar.GCAL_NAMESPACE))
self.assertEqual(entry.sequence.value, '7')
def testOriginalEntry(self):
"""Make sure original entry in the private composite feed are parsed."""
entry = gdata.calendar.CalendarEventEntryFromString(
test_data.RECURRENCE_EXCEPTION_ENTRY)
self.assertEquals(
entry.recurrence_exception[0].entry_link.entry.original_event.id,
'i7lgfj69mjqjgnodklif3vbm7g')
class CalendarFeedTestRegenerated(CalendarFeedTest):
def setUp(self):
old_calendar_feed = (
gdata.calendar.CalendarListFeedFromString(test_data.CALENDAR_FEED))
self.calendar_feed = (
gdata.calendar.CalendarListFeedFromString(str(old_calendar_feed)))
tree = ElementTree.fromstring(str(old_calendar_feed))
class CalendarEventFeedTest(unittest.TestCase):
def setUp(self):
self.calendar_event_feed = (
gdata.calendar.CalendarEventFeedFromString(
test_data.CALENDAR_FULL_EVENT_FEED))
def testEntryCount(self):
# Assert the number of items in the feed of events
self.assertEquals(len(self.calendar_event_feed.entry),11)
def testToAndFromString(self):
# Assert the appropriate type for each entry
for an_entry in self.calendar_event_feed.entry:
self.assert_(isinstance(an_entry, gdata.calendar.CalendarEventEntry),
"Entry must be an instance of a CalendarEventEntry")
# Regenerate feed from xml text
new_calendar_event_feed = gdata.calendar.CalendarEventFeedFromString(
str(self.calendar_event_feed))
for an_entry in new_calendar_event_feed.entry:
self.assert_(isinstance(an_entry, gdata.calendar.CalendarEventEntry),
"Entry in regenerated feed must be an instance of CalendarEventEntry")
def testAuthor(self):
"""Tests the existence of a <atom:author> and verifies the name and email"""
# Assert that each element in the feed author list is an atom.Author
for an_author in self.calendar_event_feed.author:
self.assert_(isinstance(an_author, atom.Author),
"Calendar event feed <atom:author> element must be an instance of " +
"atom.Author: %s" % an_author)
# Assert the feed author name is as expected
self.assertEquals(self.calendar_event_feed.author[0].name.text,
'GData Ops Demo')
# Assert the feed author name is as expected
self.assertEquals(self.calendar_event_feed.author[0].email.text,
'[email protected]')
# Assert one of the values for an entry author
self.assertEquals(self.calendar_event_feed.entry[0].author[0].name.text,
'GData Ops Demo')
self.assertEquals(self.calendar_event_feed.entry[0].author[0].email.text,
'[email protected]')
def testId(self):
"""Tests the existence of a <atom:id> in the feed and entries and
verifies the value"""
# Assert the feed id exists and is an atom.Id
self.assert_(isinstance(self.calendar_event_feed.id, atom.Id),
"Calendar event feed <atom:id> element must be an instance of " +
"atom.Id: %s" % self.calendar_event_feed.id)
# Assert the feed id value is as expected
self.assertEquals(self.calendar_event_feed.id.text,
'http://www.google.com/calendar/feeds/default/private/full')
# Assert that each entry has an id which is an atom.Id
for an_entry in self.calendar_event_feed.entry:
self.assert_(isinstance(an_entry.id, atom.Id),
"Calendar event entry <atom:id> element must be an " +
"instance of atom.Id: %s" % an_entry.id)
# Assert one of the values for an id
self.assertEquals(self.calendar_event_feed.entry[1].id.text,
'http://www.google.com/calendar/feeds/default/private/full/' +
'2qt3ao5hbaq7m9igr5ak9esjo0')
def testPublished(self):
"""Tests the existence of a <atom:published> in the entries and
verifies the value"""
# Assert that each entry has a published value which is an atom.Published
for an_entry in self.calendar_event_feed.entry:
self.assert_(isinstance(an_entry.published, atom.Published),
"Calendar event entry <atom:published> element must be an instance " +
"of atom.Published: %s" % an_entry.published)
# Assert one of the values for published is as expected
self.assertEquals(self.calendar_event_feed.entry[1].published.text,
'2007-03-20T21:26:04.000Z')
def testUpdated(self):
"""Tests the existence of a <atom:updated> in the feed and the entries and
verifies the value"""
# Assert that the feed updated element exists and is an atom.Updated
self.assert_(isinstance(self.calendar_event_feed.updated, atom.Updated),
"Calendar feed <atom:updated> element must be an instance of " +
"atom.Updated: %s" % self.calendar_event_feed.updated)
# Assert that each entry has a updated value which is an atom.Updated
for an_entry in self.calendar_event_feed.entry:
self.assert_(isinstance(an_entry.updated, atom.Updated),
"Calendar event entry <atom:updated> element must be an instance " +
"of atom.Updated: %s" % an_entry.updated)
# Assert the feed updated value is as expected
self.assertEquals(self.calendar_event_feed.updated.text,
'2007-03-20T21:29:57.000Z')
# Assert one of the values for updated
self.assertEquals(self.calendar_event_feed.entry[3].updated.text,
'2007-03-20T21:25:46.000Z')
def testTitle(self):
"""Tests the existence of a <atom:title> in the feed and the entries
and verifies the value"""
# Assert that the feed title element exists and is an atom.Title
self.assert_(isinstance(self.calendar_event_feed.title, atom.Title),
"Calendar feed <atom:title> element must be an instance of " +
"atom.Title: %s" % self.calendar_event_feed.title)
# Assert that each entry has a title value which is an atom.Title
for an_entry in self.calendar_event_feed.entry:
self.assert_(isinstance(an_entry.title, atom.Title),
"Calendar event entry <atom:title> element must be an instance of " +
"atom.Title: %s" % an_entry.title)
# Assert the feed title value is as expected
self.assertEquals(self.calendar_event_feed.title.text, 'GData Ops Demo')
# Assert one of the values for title
self.assertEquals(self.calendar_event_feed.entry[0].title.text,
'test deleted')
def testPostLink(self):
"""Tests the existence of a <atom:link> with a rel='...#post'
and verifies the value"""
# Assert that each link in the feed is an atom.Link
for a_link in self.calendar_event_feed.link:
self.assert_(isinstance(a_link, atom.Link),
"Calendar event entry <atom:link> element must be an instance of " +
"atom.Link: %s" % a_link)
# Assert post link exists
self.assert_(self.calendar_event_feed.GetPostLink() is not None)
# Assert the post link value is as expected
self.assertEquals(self.calendar_event_feed.GetPostLink().href,
'http://www.google.com/calendar/feeds/default/private/full')
def testEditLink(self):
"""Tests the existence of a <atom:link> with a rel='edit' in each entry
and verifies the value"""
# Assert that each link in the feed is an atom.Link
for a_link in self.calendar_event_feed.link:
self.assert_(isinstance(a_link, atom.Link),
"Calendar event entry <atom:link> element must be an instance of " +
"atom.Link: %s" % a_link)
# Assert edit link exists
for a_entry in self.calendar_event_feed.entry:
self.assert_(a_entry.GetEditLink() is not None)
# Assert the edit link value is as expected
self.assertEquals(self.calendar_event_feed.entry[0].GetEditLink().href,
'http://www.google.com/calendar/feeds/default/private/full/o99flmgm' +
'kfkfrr8u745ghr3100/63310109397')
self.assertEquals(self.calendar_event_feed.entry[0].GetEditLink().type,
'application/atom+xml')
def testOpenSearch(self):
"""Tests the existence of <openSearch:totalResults>,
<openSearch:startIndex>, <openSearch:itemsPerPage>"""
# Assert that the elements exist and are the appropriate type
self.assert_(isinstance(self.calendar_event_feed.total_results,
gdata.TotalResults),
"Calendar event feed <openSearch:totalResults> element must be an " +
"instance of gdata.TotalResults: %s" % (
self.calendar_event_feed.total_results))
self.assert_(
isinstance(self.calendar_event_feed.start_index, gdata.StartIndex),
"Calendar event feed <openSearch:startIndex> element must be an " +
"instance of gdata.StartIndex: %s" % (
self.calendar_event_feed.start_index))
self.assert_(
isinstance(self.calendar_event_feed.items_per_page, gdata.ItemsPerPage),
"Calendar event feed <openSearch:itemsPerPage> element must be an " +
"instance of gdata.ItemsPerPage: %s" % (
self.calendar_event_feed.items_per_page))
# Assert the values for each openSearch element are as expected
self.assertEquals(self.calendar_event_feed.total_results.text, '10')
self.assertEquals(self.calendar_event_feed.start_index.text, '1')
self.assertEquals(self.calendar_event_feed.items_per_page.text, '25')
def testGenerator(self):
"""Tests the existence of <atom:generator> and verifies the value"""
# Assert that the element exists and is of the appropriate type
self.assert_(isinstance(self.calendar_event_feed.generator, atom.Generator),
"Calendar event feed <atom:generator> element must be an instance " +
"of atom.Generator: %s" % self.calendar_event_feed.generator)
# Assert the generator version, uri and text are as expected
self.assertEquals(self.calendar_event_feed.generator.text,
'Google Calendar')
self.assertEquals(self.calendar_event_feed.generator.version, '1.0')
self.assertEquals(self.calendar_event_feed.generator.uri,
'http://www.google.com/calendar')
def testCategory(self):
"""Tests the existence of <atom:category> and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for a_category in self.calendar_event_feed.category:
self.assert_(isinstance(a_category, atom.Category),
"Calendar event feed <atom:category> element must be an instance " +
"of atom.Category: %s" % a_category)
self.assertEquals(a_category.scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(a_category.term,
'http://schemas.google.com/g/2005#event')
for an_event in self.calendar_event_feed.entry:
for a_category in an_event.category:
self.assert_(isinstance(a_category, atom.Category),
"Calendar event feed entry <atom:category> element must be an " +
"instance of atom.Category: %s" % a_category)
self.assertEquals(a_category.scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(a_category.term,
'http://schemas.google.com/g/2005#event')
def testSendEventNotifications(self):
"""Test the existence of <gCal:sendEventNotifications>
and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for an_event in self.calendar_event_feed.entry:
self.assert_(isinstance(an_event.send_event_notifications,
gdata.calendar.SendEventNotifications),
("Calendar event feed entry <gCal:sendEventNotifications> element " +
"must be an instance of gdata.calendar.SendEventNotifications: %s") % (
an_event.send_event_notifications,))
# Assert the <gCal:sendEventNotifications> are as expected
self.assertEquals(
self.calendar_event_feed.entry[0].send_event_notifications.value,
'false')
self.assertEquals(
self.calendar_event_feed.entry[2].send_event_notifications.value,
'true')
def testQuickAdd(self):
"""Test the existence of <gCal:quickadd>
and verifies the value"""
entry = gdata.calendar.CalendarEventEntry()
entry.quick_add = gdata.calendar.QuickAdd(value='true')
unmarshalled_entry = entry.ToString()
tag = '{%s}quickadd' % (gdata.calendar.GCAL_NAMESPACE)
marshalled_entry = ElementTree.fromstring(unmarshalled_entry).find(tag)
self.assert_(marshalled_entry.attrib['value'],'true')
self.assert_(marshalled_entry.tag,tag)
def testEventStatus(self):
"""Test the existence of <gd:eventStatus>
and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for an_event in self.calendar_event_feed.entry:
self.assert_(isinstance(an_event.event_status,
gdata.calendar.EventStatus),
("Calendar event feed entry <gd:eventStatus> element " +
"must be an instance of gdata.calendar.EventStatus: %s") % (
an_event.event_status,))
# Assert the <gd:eventStatus> are as expected
self.assertEquals(
self.calendar_event_feed.entry[0].event_status.value,
'CANCELED')
self.assertEquals(
self.calendar_event_feed.entry[1].event_status.value,
'CONFIRMED')
def testComments(self):
"""Tests the existence of <atom:comments> and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for an_event in self.calendar_event_feed.entry:
self.assert_(an_event.comments is None or isinstance(an_event.comments,
gdata.calendar.Comments),
("Calendar event feed entry <gd:comments> element " +
"must be an instance of gdata.calendar.Comments: %s") % (
an_event.comments,))
def testVisibility(self):
"""Test the existence of <gd:visibility> and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for an_event in self.calendar_event_feed.entry:
self.assert_(isinstance(an_event.visibility,
gdata.calendar.Visibility),
("Calendar event feed entry <gd:visibility> element " +
"must be an instance of gdata.calendar.Visibility: %s") % (
an_event.visibility,))
# Assert the <gd:visibility> are as expected
self.assertEquals(
self.calendar_event_feed.entry[0].visibility.value,
'DEFAULT')
self.assertEquals(
self.calendar_event_feed.entry[1].visibility.value,
'PRIVATE')
self.assertEquals(
self.calendar_event_feed.entry[2].visibility.value,
'PUBLIC')
def testTransparency(self):
"""Test the existence of <gd:transparency> and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for an_event in self.calendar_event_feed.entry:
self.assert_(isinstance(an_event.transparency,
gdata.calendar.Transparency),
("Calendar event feed entry <gd:transparency> element " +
"must be an instance of gdata.calendar.Transparency: %s") % (
an_event.transparency,))
# Assert the <gd:transparency> are as expected
self.assertEquals(
self.calendar_event_feed.entry[0].transparency.value,
'OPAQUE')
self.assertEquals(
self.calendar_event_feed.entry[1].transparency.value,
'OPAQUE')
self.assertEquals(
self.calendar_event_feed.entry[2].transparency.value,
'OPAQUE')
# TODO: TEST VALUES OF VISIBILITY OTHER THAN OPAQUE
def testWhere(self):
"""Tests the existence of a <gd:where> in the entries
and verifies the value"""
# Assert that each entry has a where value which is an gdata.calendar.Where
for an_entry in self.calendar_event_feed.entry:
for a_where in an_entry.where:
self.assert_(isinstance(a_where, gdata.calendar.Where),
"Calendar event entry <gd:where> element must be an instance of " +
"gdata.calendar.Where: %s" % a_where)
# Assert one of the values for where is as expected
self.assertEquals(self.calendar_event_feed.entry[1].where[0].value_string,
'Dolores Park with Kim')
def testWhenAndReminder(self):
"""Tests the existence of a <gd:when> and <gd:reminder> in the entries
and verifies the values"""
# Assert that each entry's when value is a gdata.calendar.When
# Assert that each reminder is a gdata.calendar.Reminder
for an_entry in self.calendar_event_feed.entry:
for a_when in an_entry.when:
self.assert_(isinstance(a_when, gdata.calendar.When),
"Calendar event entry <gd:when> element must be an instance " +
"of gdata.calendar.When: %s" % a_when)
for a_reminder in a_when.reminder:
self.assert_(isinstance(a_reminder, gdata.calendar.Reminder),
"Calendar event entry <gd:reminder> element must be an " +
"instance of gdata.calendar.Reminder: %s" % a_reminder)
# Assert one of the values for when is as expected
self.assertEquals(self.calendar_event_feed.entry[0].when[0].start_time,
'2007-03-23T12:00:00.000-07:00')
self.assertEquals(self.calendar_event_feed.entry[0].when[0].end_time,
'2007-03-23T13:00:00.000-07:00')
# Assert the reminder child of when is as expected
self.assertEquals(
self.calendar_event_feed.entry[0].when[0].reminder[0].minutes, '10')
self.assertEquals(
self.calendar_event_feed.entry[1].when[0].reminder[0].minutes, '20')
def testBatchRequestParsing(self):
batch_request = gdata.calendar.CalendarEventFeedFromString(
test_data.CALENDAR_BATCH_REQUEST)
self.assertEquals(len(batch_request.entry), 4)
# Iterate over the batch request entries and match the operation with
# the batch id. These values are hard coded to match the test data.
for entry in batch_request.entry:
if entry.batch_id.text == '1':
self.assertEquals(entry.batch_operation.type, 'insert')
if entry.batch_id.text == '2':
self.assertEquals(entry.batch_operation.type, 'query')
if entry.batch_id.text == '3':
self.assertEquals(entry.batch_operation.type, 'update')
self.assertEquals(entry.title.text, 'Event updated via batch')
if entry.batch_id.text == '4':
self.assertEquals(entry.batch_operation.type, 'delete')
self.assertEquals(entry.id.text,
'http://www.google.com/calendar/feeds/default/'
'private/full/d8qbg9egk1n6lhsgq1sjbqffqc')
self.assertEquals(entry.GetEditLink().href,
'http://www.google.com/calendar/feeds/default/'
'private/full/d8qbg9egk1n6lhsgq1sjbqffqc/'
'63326018324')
def testBatchResponseParsing(self):
batch_response = gdata.calendar.CalendarEventFeedFromString(
test_data.CALENDAR_BATCH_RESPONSE)
self.assertEquals(len(batch_response.entry), 4)
for entry in batch_response.entry:
if entry.batch_id.text == '1':
self.assertEquals(entry.batch_operation.type, 'insert')
self.assertEquals(entry.batch_status.code, '201')
self.assertEquals(entry.batch_status.reason, 'Created')
self.assertEquals(entry.id.text, 'http://www.google.com/calendar/'
'feeds/default/private/full/'
'n9ug78gd9tv53ppn4hdjvk68ek')
if entry.batch_id.text == '2':
self.assertEquals(entry.batch_operation.type, 'query')
if entry.batch_id.text == '3':
self.assertEquals(entry.batch_operation.type, 'update')
if entry.batch_id.text == '4':
self.assertEquals(entry.batch_operation.type, 'delete')
self.assertEquals(entry.id.text, 'http://www.google.com/calendar/'
'feeds/default/private/full/'
'd8qbg9egk1n6lhsgq1sjbqffqc')
# TODO add reminder tests for absolute_time and hours/seconds (if possible)
# TODO test recurrence and recurrenceexception
# TODO test originalEvent
class CalendarWebContentTest(unittest.TestCase):
def setUp(self):
self.calendar_event_feed = (
gdata.calendar.CalendarEventFeedFromString(
test_data.CALENDAR_FULL_EVENT_FEED))
def testAddSimpleWebContentEventEntry(self):
"""Verifies that we can add a web content link to an event entry."""
title = "Al Einstein's Birthday!"
href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif'
type = 'image/jpeg'
url = 'http://gdata.ops.demo.googlepages.com/einstein.jpg'
width = '300'
height = '225'
# Create a web content event
event = gdata.calendar.CalendarEventEntry()
web_content = gdata.calendar.WebContent(url=url, width=width, height=height)
web_content_link = gdata.calendar.WebContentLink(title=title,
href=href, link_type=type, web_content=web_content)
event.link.append(web_content_link)
# Verify the web content link exists and contains the expected data
web_content_link = event.GetWebContentLink()
self.assertValidWebContentLink(title, href, type, web_content_link)
# Verify the web content element exists and contains the expected data
web_content_element = web_content_link.web_content
self.assertValidSimpleWebContent(url, width, height, web_content_element)
def testAddWebContentGadgetEventEntry(self):
"""Verifies that we can add a web content gadget link to an event entry."""
title = "Date and Time Gadget"
href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif'
url = 'http://google.com/ig/modules/datetime.xml'
type = 'application/x-google-gadgets+xml'
width = '300'
height = '200'
pref_name = 'color'
pref_value = 'green'
# Create a web content event
event = gdata.calendar.CalendarEventEntry()
web_content = gdata.calendar.WebContent(url=url, width=width, height=height)
web_content.gadget_pref.append(
gdata.calendar.WebContentGadgetPref(name=pref_name, value=pref_value))
web_content_link = gdata.calendar.WebContentLink(title=title,
href=href, web_content=web_content, link_type=type)
event.link.append(web_content_link)
# Verify the web content link exists and contains the expected data
web_content_link = event.GetWebContentLink()
self.assertValidWebContentLink(title, href, type, web_content_link)
# Verify the web content element exists and contains the expected data
web_content_element = web_content_link.web_content
self.assertValidWebContentGadget(url, width, height,
pref_name, pref_value, web_content_element)
def testFromXmlToSimpleWebContent(self):
"""Verifies that we can read a web content link from an event entry."""
# Expected values (from test_data.py file)
title = 'World Cup'
href = 'http://www.google.com/calendar/images/google-holiday.gif'
type = 'image/gif'
url = 'http://www.google.com/logos/worldcup06.gif'
width = '276'
height = '120'
# Note: The tenth event entry contains web content
web_content_event = self.calendar_event_feed.entry[9]
# Verify the web content link exists and contains the expected data
web_content_link = web_content_event.GetWebContentLink()
self.assertValidWebContentLink(title, href, type, web_content_link)
# Verify the web content element exists and contains the expected data
web_content_element = web_content_link.web_content
self.assertValidSimpleWebContent(url, width, height, web_content_element)
def testFromXmlToWebContentGadget(self):
"""Verifies that we can read a web content link from an event entry."""
# Expected values (from test_data.py file)
title = 'Date and Time Gadget'
href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif'
url = 'http://google.com/ig/modules/datetime.xml'
type = 'application/x-google-gadgets+xml'
width = '300'
height = '136'
pref_name = 'color'
pref_value = 'green'
# Note: The eleventh event entry contains web content
web_content_event = self.calendar_event_feed.entry[10]
# Verify the web content link exists and contains the expected data
web_content_link = web_content_event.GetWebContentLink()
self.assertValidWebContentLink(title, href, type, web_content_link)
# Verify the web content element exists and contains the expected data
web_content_element = web_content_link.web_content
self.assertValidWebContentGadget(url, width, height, pref_name,
pref_value, web_content_element)
def assertValidWebContentLink(self, expected_title=None, expected_href=None,
expected_type=None, web_content_link=None):
"""Asserts that the web content link is the correct type and contains the
expected values"""
self.assert_(isinstance(web_content_link, gdata.calendar.WebContentLink),
"Web content link element must be an " +
"instance of gdata.calendar.WebContentLink: %s" % web_content_link)
expected_rel = '%s/%s' % (gdata.calendar.GCAL_NAMESPACE, 'webContent')
self.assertEquals(expected_rel, web_content_link.rel)
self.assertEqual(expected_title, web_content_link.title)
self.assertEqual(expected_href, web_content_link.href)
self.assertEqual(expected_type, web_content_link.type)
def assertValidSimpleWebContent(self, expected_url=None, expected_width=None,
expected_height=None, web_content_element=None):
"""Asserts that the web content element is the correct type and contains
the expected values"""
self.assert_(isinstance(web_content_element, gdata.calendar.WebContent),
"Calendar event entry <gCal:webContent> element must be an " +
"instance of gdata.calendar.WebContent: %s" % web_content_element)
self.assertEquals(expected_width, web_content_element.width)
self.assertEquals(expected_height, web_content_element.height)
self.assertEquals(expected_url, web_content_element.url)
def assertValidWebContentGadget(self, expected_url=None, expected_width=None,
expected_height=None, expected_pref_name=None, expected_pref_value=None,
web_content_element=None):
"""Asserts that the web content element is the correct type and contains
the expected values"""
self.assert_(isinstance(web_content_element, gdata.calendar.WebContent),
"Calendar event entry <gCal:webContent> element must be an " +
"instance of gdata.calendar.WebContent: %s" % web_content_element)
self.assertEquals(expected_width, web_content_element.width)
self.assertEquals(expected_height, web_content_element.height)
self.assertEquals(expected_url, web_content_element.url)
self.assertEquals(expected_pref_name,
web_content_element.gadget_pref[0].name)
self.assertEquals(expected_pref_value,
web_content_element.gadget_pref[0].value)
def testSampleCode(self):
# From http://code.google.com/apis/calendar/gadgets/event/
wc = gdata.calendar.WebContent()
wc.url = 'http://www.thefreedictionary.com/_/WoD/wod-module.xml'
wc.width = '300'
wc.height = '136'
wc.gadget_pref.append(gdata.calendar.WebContentGadgetPref(name='Days', value='1'))
wc.gadget_pref.append(gdata.calendar.WebContentGadgetPref(name='Format', value='0'))
wcl = gdata.calendar.WebContentLink()
wcl.title = 'Word of the Day'
wcl.href = 'http://www.thefreedictionary.com/favicon.ico'
wcl.type = 'application/x-google-gadgets+xml'
wcl.web_content = wc
self.assertEqual(wcl.web_content.url,
'http://www.thefreedictionary.com/_/WoD/wod-module.xml')
self.assertEqual(wcl.type, 'application/x-google-gadgets+xml')
self.assertEqual(wcl.web_content.height, '136')
class ExtendedPropertyTest(unittest.TestCase):
def testExtendedPropertyToAndFromXml(self):
ep = gdata.calendar.ExtendedProperty(name='test')
ep.value = 'val'
xml_string = ep.ToString()
ep2 = gdata.ExtendedPropertyFromString(xml_string)
self.assertEquals(ep.name, ep2.name)
self.assertEquals(ep.value, ep2.value)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 366,560,241,756,866,240 | 42.134658 | 88 | 0.688383 | false |
ekcs/congress | congress/tests/test_utils.py | 1 | 1179 | # Copyright (c) 2014 VMware
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import testtools
import congress.utils as utils
class UtilsTest(testtools.TestCase):
def test_value_to_congress(self):
self.assertEqual("abc", utils.value_to_congress("abc"))
self.assertEqual("True", utils.value_to_congress(True))
self.assertEqual("False", utils.value_to_congress(False))
self.assertEqual(0, utils.value_to_congress(0))
self.assertEqual(1, utils.value_to_congress(1))
self.assertEqual(123, utils.value_to_congress(123))
if sys.version < '3':
self.assertEqual(456.0, utils.value_to_congress(456.0))
| apache-2.0 | -5,578,140,871,274,983,000 | 34.727273 | 69 | 0.715013 | false |
yeti-platform/yeti | plugins/import_methods/pdf.py | 1 | 1151 | from io import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from core.investigation import ImportMethod
class ImportPDF(ImportMethod):
default_values = {
"name": "import_pdf",
"description": "Perform investigation import from a PDF document.",
"acts_on": "application/pdf",
}
def do_import(self, results, filepath):
buff = StringIO()
fp = open(filepath, "rb")
laparams = LAParams()
laparams.all_texts = True
rsrcmgr = PDFResourceManager()
pagenos = set()
page_num = 0
for page in PDFPage.get_pages(fp, pagenos, check_extractable=True):
page_num += 1
device = TextConverter(rsrcmgr, buff, codec="utf-8", laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
interpreter.process_page(page)
buff.write("\n")
results.investigation.update(import_text=buff.getvalue())
fp.close()
buff.close()
| apache-2.0 | 3,848,743,023,659,732,500 | 27.073171 | 83 | 0.648132 | false |
mpimenov/omim | tools/python/testserver.py | 4 | 6611 | """
This is a simple web-server that does very few things. It is necessary for
the downloader tests.
Here is the logic behind the initialization:
Because several instances of the test can run simultaneously on the Build
machine, we have to take this into account and not start another server if
one is already running. However, there is a chance that a server will not
terminate correctly, and will still hold the port, so we will not be able
to initialize another server.
So before initializing the server, we check if any processes are using the port
that we want to use. If we find such a process, we assume that it might be
working, and wait for about 10 seconds for it to start serving. If it does not,
we kill it.
Next, we check the name of our process and see if there are other processes
with the same name. If there are, we assume that they might start serving any
moment. So we iterate over the ones that have PID lower than ours, and wait
for them to start serving. If a process doesn't serve, we kill it.
If we have killed (or someone has) all the processes with PIDs lower than ours,
we try to start serving. If we succeed, we kill all other processes with the
same name as ours. If we don't someone else will kill us.
"""
from __future__ import print_function
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from ResponseProvider import Payload
from ResponseProvider import ResponseProvider
from ResponseProvider import ResponseProviderMixin
from SiblingKiller import SiblingKiller
from threading import Timer
import os
import socket
import threading
import traceback
import logging
import logging.config
try:
from tornado_handler import MainHandler
USE_TORNADO = True
except:
USE_TORNADO = False
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
PORT = 34568
LIFESPAN = 180.0 # timeout for the self destruction timer - how much time
# passes between the last request and the server killing
# itself
PING_TIMEOUT = 5 # Nubmer of seconds to wait for ping response
class InternalServer(HTTPServer):
def kill_me(self):
self.shutdown()
logging.info("The server's life has come to an end, pid: {}".format(os.getpid()))
def reset_selfdestruct_timer(self):
if self.self_destruct_timer:
self.self_destruct_timer.cancel()
self.self_destruct_timer = Timer(LIFESPAN, self.kill_me)
self.self_destruct_timer.start()
def __init__(self, server_address, RequestHandlerClass,
bind_and_activate=True):
HTTPServer.__init__(self, server_address, RequestHandlerClass,
bind_and_activate=bind_and_activate)
self.self_destruct_timer = None
self.clients = 1
self.reset_selfdestruct_timer()
def suicide(self):
self.clients -= 1
if self.clients == 0:
if self.self_destruct_timer is not None:
self.self_destruct_timer.cancel()
quick_and_painless_timer = Timer(0.1, self.kill_me)
quick_and_painless_timer.start()
class TestServer:
def __init__(self):
self.may_serve = False
pid = os.getpid()
logging.info("Init server. Pid: {}".format(pid))
self.server = None
killer = SiblingKiller(PORT, PING_TIMEOUT)
killer.kill_siblings()
if killer.allow_serving():
try:
self.init_server()
logging.info("Started server with pid: {}".format(pid))
self.may_serve = True
except socket.error:
logging.info("Failed to start the server: Port is in use")
except Exception as e:
logging.debug(e)
logging.info("Failed to start serving for unknown reason")
traceback.print_exc()
else:
logging.info("Not allowed to start serving for process: {}".format(pid))
def init_server(self):
if USE_TORNADO:
MainHandler.init_server(PORT, LIFESPAN)
else:
print("""
*************
WARNING: Using the python's built-in BaseHTTPServer!
It is all right if you run the tests on your local machine, but if you are running tests on a server,
please consider installing Tornado. It is a much more powerful web-server. Otherwise you will find
that some of your downloader tests either fail or hang.
do
sudo pip install tornado
or go to http://www.tornadoweb.org/en/stable/ for more detail.
*************
""")
self.server = InternalServer(('localhost', PORT), PostHandler)
def start_serving(self):
if not self.may_serve:
return
if USE_TORNADO:
MainHandler.start_serving()
else:
thread = threading.Thread(target=self.server.serve_forever)
thread.deamon = True
thread.start()
class PostHandler(BaseHTTPRequestHandler, ResponseProviderMixin):
def dispatch_response(self, payload):
self.send_response(payload.response_code())
for h in payload.headers():
self.send_header(h, payload.headers()[h])
self.send_header("Content-Length", payload.length())
self.end_headers()
self.wfile.write(payload.message())
def init_vars(self):
self.response_provider = ResponseProvider(self)
def do_POST(self):
self.init_vars()
self.server.reset_selfdestruct_timer()
headers = self.prepare_headers()
payload = self.response_provider.response_for_url_and_headers(self.path, headers)
if payload.response_code() >= 300:
length = int(self.headers.getheader('content-length'))
self.dispatch_response(Payload(self.rfile.read(length)))
else:
self.dispatch_response(payload)
def do_GET(self):
headers = self.prepare_headers()
self.init_vars()
self.dispatch_response(self.response_provider.response_for_url_and_headers(self.path, headers))
def prepare_headers(self):
ret = dict()
for h in self.headers:
ret[h] = self.headers.get(h)
return ret
def got_pinged(self):
self.server.clients += 1
def kill(self):
logging.debug("Kill called in testserver")
self.server.suicide()
if __name__ == '__main__':
server = TestServer()
server.start_serving()
| apache-2.0 | -27,296,083,577,853,324 | 29.325688 | 103 | 0.643322 | false |
KarimAllah/celery | celery/tests/test_task/test_task_control.py | 18 | 4711 | from __future__ import absolute_import
from __future__ import with_statement
from functools import wraps
from kombu.pidbox import Mailbox
from celery.app import app_or_default
from celery.task import control
from celery.task import PingTask
from celery.utils import uuid
from celery.tests.utils import unittest
class MockMailbox(Mailbox):
sent = []
def _publish(self, command, *args, **kwargs):
self.__class__.sent.append(command)
def close(self):
pass
def _collect(self, *args, **kwargs):
pass
class Control(control.Control):
Mailbox = MockMailbox
def with_mock_broadcast(fun):
@wraps(fun)
def _resets(*args, **kwargs):
MockMailbox.sent = []
try:
return fun(*args, **kwargs)
finally:
MockMailbox.sent = []
return _resets
class test_inspect(unittest.TestCase):
def setUp(self):
app = app_or_default()
self.i = Control(app=app).inspect()
def test_prepare_reply(self):
self.assertDictEqual(self.i._prepare([{"w1": {"ok": 1}},
{"w2": {"ok": 1}}]),
{"w1": {"ok": 1}, "w2": {"ok": 1}})
i = control.inspect(destination="w1")
self.assertEqual(i._prepare([{"w1": {"ok": 1}}]),
{"ok": 1})
@with_mock_broadcast
def test_active(self):
self.i.active()
self.assertIn("dump_active", MockMailbox.sent)
@with_mock_broadcast
def test_scheduled(self):
self.i.scheduled()
self.assertIn("dump_schedule", MockMailbox.sent)
@with_mock_broadcast
def test_reserved(self):
self.i.reserved()
self.assertIn("dump_reserved", MockMailbox.sent)
@with_mock_broadcast
def test_stats(self):
self.i.stats()
self.assertIn("stats", MockMailbox.sent)
@with_mock_broadcast
def test_revoked(self):
self.i.revoked()
self.assertIn("dump_revoked", MockMailbox.sent)
@with_mock_broadcast
def test_asks(self):
self.i.registered()
self.assertIn("dump_tasks", MockMailbox.sent)
@with_mock_broadcast
def test_enable_events(self):
self.i.enable_events()
self.assertIn("enable_events", MockMailbox.sent)
@with_mock_broadcast
def test_disable_events(self):
self.i.disable_events()
self.assertIn("disable_events", MockMailbox.sent)
@with_mock_broadcast
def test_ping(self):
self.i.ping()
self.assertIn("ping", MockMailbox.sent)
@with_mock_broadcast
def test_add_consumer(self):
self.i.add_consumer("foo")
self.assertIn("add_consumer", MockMailbox.sent)
@with_mock_broadcast
def test_cancel_consumer(self):
self.i.cancel_consumer("foo")
self.assertIn("cancel_consumer", MockMailbox.sent)
class test_Broadcast(unittest.TestCase):
def setUp(self):
self.app = app_or_default()
self.control = Control(app=self.app)
self.app.control = self.control
def tearDown(self):
del(self.app.control)
def test_discard_all(self):
self.control.discard_all()
@with_mock_broadcast
def test_broadcast(self):
self.control.broadcast("foobarbaz", arguments=[])
self.assertIn("foobarbaz", MockMailbox.sent)
@with_mock_broadcast
def test_broadcast_limit(self):
self.control.broadcast("foobarbaz1", arguments=[], limit=None,
destination=[1, 2, 3])
self.assertIn("foobarbaz1", MockMailbox.sent)
@with_mock_broadcast
def test_broadcast_validate(self):
with self.assertRaises(ValueError):
self.control.broadcast("foobarbaz2",
destination="foo")
@with_mock_broadcast
def test_rate_limit(self):
self.control.rate_limit(PingTask.name, "100/m")
self.assertIn("rate_limit", MockMailbox.sent)
@with_mock_broadcast
def test_revoke(self):
self.control.revoke("foozbaaz")
self.assertIn("revoke", MockMailbox.sent)
@with_mock_broadcast
def test_ping(self):
self.control.ping()
self.assertIn("ping", MockMailbox.sent)
@with_mock_broadcast
def test_revoke_from_result(self):
self.app.AsyncResult("foozbazzbar").revoke()
self.assertIn("revoke", MockMailbox.sent)
@with_mock_broadcast
def test_revoke_from_resultset(self):
r = self.app.TaskSetResult(uuid(),
map(self.app.AsyncResult,
[uuid() for i in range(10)]))
r.revoke()
self.assertIn("revoke", MockMailbox.sent)
| bsd-3-clause | -670,010,542,523,870,300 | 26.549708 | 70 | 0.603057 | false |
daevaorn/kombu | kombu/utils/eventio.py | 1 | 10324 | """
kombu.utils.eventio
===================
Evented IO support for multiple platforms.
"""
from __future__ import absolute_import
import errno
import math
import select as __select__
import socket
from numbers import Integral
from kombu.syn import detect_environment
from . import fileno
__all__ = ['poll']
_selectf = __select__.select
_selecterr = __select__.error
xpoll = getattr(__select__, 'poll', None)
epoll = getattr(__select__, 'epoll', None)
kqueue = getattr(__select__, 'kqueue', None)
kevent = getattr(__select__, 'kevent', None)
KQ_EV_ADD = getattr(__select__, 'KQ_EV_ADD', 1)
KQ_EV_DELETE = getattr(__select__, 'KQ_EV_DELETE', 2)
KQ_EV_ENABLE = getattr(__select__, 'KQ_EV_ENABLE', 4)
KQ_EV_CLEAR = getattr(__select__, 'KQ_EV_CLEAR', 32)
KQ_EV_ERROR = getattr(__select__, 'KQ_EV_ERROR', 16384)
KQ_EV_EOF = getattr(__select__, 'KQ_EV_EOF', 32768)
KQ_FILTER_READ = getattr(__select__, 'KQ_FILTER_READ', -1)
KQ_FILTER_WRITE = getattr(__select__, 'KQ_FILTER_WRITE', -2)
KQ_FILTER_AIO = getattr(__select__, 'KQ_FILTER_AIO', -3)
KQ_FILTER_VNODE = getattr(__select__, 'KQ_FILTER_VNODE', -4)
KQ_FILTER_PROC = getattr(__select__, 'KQ_FILTER_PROC', -5)
KQ_FILTER_SIGNAL = getattr(__select__, 'KQ_FILTER_SIGNAL', -6)
KQ_FILTER_TIMER = getattr(__select__, 'KQ_FILTER_TIMER', -7)
KQ_NOTE_LOWAT = getattr(__select__, 'KQ_NOTE_LOWAT', 1)
KQ_NOTE_DELETE = getattr(__select__, 'KQ_NOTE_DELETE', 1)
KQ_NOTE_WRITE = getattr(__select__, 'KQ_NOTE_WRITE', 2)
KQ_NOTE_EXTEND = getattr(__select__, 'KQ_NOTE_EXTEND', 4)
KQ_NOTE_ATTRIB = getattr(__select__, 'KQ_NOTE_ATTRIB', 8)
KQ_NOTE_LINK = getattr(__select__, 'KQ_NOTE_LINK', 16)
KQ_NOTE_RENAME = getattr(__select__, 'KQ_NOTE_RENAME', 32)
KQ_NOTE_REVOKE = getattr(__select__, 'KQ_NOTE_REVOKE', 64)
POLLIN = getattr(__select__, 'POLLIN', 1)
POLLOUT = getattr(__select__, 'POLLOUT', 4)
POLLERR = getattr(__select__, 'POLLERR', 8)
POLLHUP = getattr(__select__, 'POLLHUP', 16)
POLLNVAL = getattr(__select__, 'POLLNVAL', 32)
READ = POLL_READ = 0x001
WRITE = POLL_WRITE = 0x004
ERR = POLL_ERR = 0x008 | 0x010
try:
SELECT_BAD_FD = {errno.EBADF, errno.WSAENOTSOCK}
except AttributeError:
SELECT_BAD_FD = {errno.EBADF}
class _epoll(object):
def __init__(self):
self._epoll = epoll()
def register(self, fd, events):
try:
self._epoll.register(fd, events)
except Exception as exc:
if getattr(exc, 'errno', None) != errno.EEXIST:
raise
return fd
def unregister(self, fd):
try:
self._epoll.unregister(fd)
except (socket.error, ValueError, KeyError, TypeError):
pass
except (IOError, OSError) as exc:
if getattr(exc, 'errno', None) not in (errno.ENOENT, errno.EPERM):
raise
def poll(self, timeout):
try:
return self._epoll.poll(timeout if timeout is not None else -1)
except Exception as exc:
if getattr(exc, 'errno', None) != errno.EINTR:
raise
def close(self):
self._epoll.close()
class _kqueue(object):
w_fflags = (KQ_NOTE_WRITE | KQ_NOTE_EXTEND |
KQ_NOTE_ATTRIB | KQ_NOTE_DELETE)
def __init__(self):
self._kqueue = kqueue()
self._active = {}
self.on_file_change = None
self._kcontrol = self._kqueue.control
def register(self, fd, events):
self._control(fd, events, KQ_EV_ADD)
self._active[fd] = events
return fd
def unregister(self, fd):
events = self._active.pop(fd, None)
if events:
try:
self._control(fd, events, KQ_EV_DELETE)
except socket.error:
pass
def watch_file(self, fd):
ev = kevent(fd,
filter=KQ_FILTER_VNODE,
flags=KQ_EV_ADD | KQ_EV_ENABLE | KQ_EV_CLEAR,
fflags=self.w_fflags)
self._kcontrol([ev], 0)
def unwatch_file(self, fd):
ev = kevent(fd,
filter=KQ_FILTER_VNODE,
flags=KQ_EV_DELETE,
fflags=self.w_fflags)
self._kcontrol([ev], 0)
def _control(self, fd, events, flags):
if not events:
return
kevents = []
if events & WRITE:
kevents.append(kevent(fd,
filter=KQ_FILTER_WRITE,
flags=flags))
if not kevents or events & READ:
kevents.append(
kevent(fd, filter=KQ_FILTER_READ, flags=flags),
)
control = self._kcontrol
for e in kevents:
try:
control([e], 0)
except ValueError:
pass
def poll(self, timeout):
try:
kevents = self._kcontrol(None, 1000, timeout)
except Exception as exc:
if getattr(exc, 'errno', None) == errno.EINTR:
return
raise
events, file_changes = {}, []
for k in kevents:
fd = k.ident
if k.filter == KQ_FILTER_READ:
events[fd] = events.get(fd, 0) | READ
elif k.filter == KQ_FILTER_WRITE:
if k.flags & KQ_EV_EOF:
events[fd] = ERR
else:
events[fd] = events.get(fd, 0) | WRITE
elif k.filter == KQ_EV_ERROR:
events[fd] = events.get(fd, 0) | ERR
elif k.filter == KQ_FILTER_VNODE:
if k.fflags & KQ_NOTE_DELETE:
self.unregister(fd)
file_changes.append(k)
if file_changes:
self.on_file_change(file_changes)
return list(events.items())
def close(self):
self._kqueue.close()
class _poll(object):
def __init__(self):
self._poller = xpoll()
self._quick_poll = self._poller.poll
self._quick_register = self._poller.register
self._quick_unregister = self._poller.unregister
def register(self, fd, events):
fd = fileno(fd)
poll_flags = 0
if events & ERR:
poll_flags |= POLLERR
if events & WRITE:
poll_flags |= POLLOUT
if events & READ:
poll_flags |= POLLIN
self._quick_register(fd, poll_flags)
return fd
def unregister(self, fd):
try:
fd = fileno(fd)
except socket.error as exc:
# we don't know the previous fd of this object
# but it will be removed by the next poll iteration.
if getattr(exc, 'errno', None) in SELECT_BAD_FD:
return fd
raise
self._quick_unregister(fd)
return fd
def poll(self, timeout, round=math.ceil,
POLLIN=POLLIN, POLLOUT=POLLOUT, POLLERR=POLLERR,
READ=READ, WRITE=WRITE, ERR=ERR, Integral=Integral):
timeout = 0 if timeout and timeout < 0 else round((timeout or 0) * 1e3)
try:
event_list = self._quick_poll(timeout)
except (_selecterr, socket.error) as exc:
if getattr(exc, 'errno', None) == errno.EINTR:
return
raise
ready = []
for fd, event in event_list:
events = 0
if event & POLLIN:
events |= READ
if event & POLLOUT:
events |= WRITE
if event & POLLERR or event & POLLNVAL or event & POLLHUP:
events |= ERR
assert events
if not isinstance(fd, Integral):
fd = fd.fileno()
ready.append((fd, events))
return ready
def close(self):
self._poller = None
class _select(object):
def __init__(self):
self._all = (self._rfd,
self._wfd,
self._efd) = set(), set(), set()
def register(self, fd, events):
fd = fileno(fd)
if events & ERR:
self._efd.add(fd)
if events & WRITE:
self._wfd.add(fd)
if events & READ:
self._rfd.add(fd)
return fd
def _remove_bad(self):
for fd in self._rfd | self._wfd | self._efd:
try:
_selectf([fd], [], [], 0)
except (_selecterr, socket.error) as exc:
if getattr(exc, 'errno', None) in SELECT_BAD_FD:
self.unregister(fd)
def unregister(self, fd):
try:
fd = fileno(fd)
except socket.error as exc:
# we don't know the previous fd of this object
# but it will be removed by the next poll iteration.
if getattr(exc, 'errno', None) in SELECT_BAD_FD:
return
raise
self._rfd.discard(fd)
self._wfd.discard(fd)
self._efd.discard(fd)
def poll(self, timeout):
try:
read, write, error = _selectf(
self._rfd, self._wfd, self._efd, timeout,
)
except (_selecterr, socket.error) as exc:
if getattr(exc, 'errno', None) == errno.EINTR:
return
elif getattr(exc, 'errno', None) in SELECT_BAD_FD:
return self._remove_bad()
raise
events = {}
for fd in read:
if not isinstance(fd, Integral):
fd = fd.fileno()
events[fd] = events.get(fd, 0) | READ
for fd in write:
if not isinstance(fd, Integral):
fd = fd.fileno()
events[fd] = events.get(fd, 0) | WRITE
for fd in error:
if not isinstance(fd, Integral):
fd = fd.fileno()
events[fd] = events.get(fd, 0) | ERR
return list(events.items())
def close(self):
self._rfd.clear()
self._wfd.clear()
self._efd.clear()
def _get_poller():
if detect_environment() != 'default':
# greenlet
return _select
elif epoll:
# Py2.6+ Linux
return _epoll
elif xpoll:
return _poll
elif kqueue:
# Py2.6+ on BSD / Darwin
# but kqueue has too many bugs
return _poll if xpoll else _select
else:
return _select
def poll(*args, **kwargs):
return _get_poller()(*args, **kwargs)
| bsd-3-clause | 987,859,090,038,365,000 | 29.635015 | 79 | 0.527993 | false |
maciekcc/tensorflow | tensorflow/python/estimator/canned/head.py | 12 | 32941 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstractions for the head(s) of a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export_output
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import nn
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
class _Head(object):
"""Interface for the head/top of a model.
Given logits (or output of a hidden layer), a Head knows how to compute
predictions, loss, train_op, metrics and export outputs. It is meant to:
1. Simplify writing model_fn and to make model_fn more configurable
2. Support wide range of machine learning models. Since most heads can work
with logits, they can support DNN, RNN, Wide, Wide&Deep,
Global objectives, Gradient boosted trees and many other types
of machine learning models.
Common usage:
Here is simplified model_fn to build a DNN regression model.
```python
def _my_dnn_model_fn(features, labels, mode, params, config=None):
# Optionally your callers can pass head to model_fn as a param.
head = tf.contrib.learn.regression_head(...)
input = tf.contrib.layers.input_from_feature_columns(features, ...)
last_hidden_layer_out = tf.contrib.layers.stack(
input, tf.contrib.layers.fully_connected, [1000, 500])
logits = tf.contrib.layers.fully_connected(
last_hidden_layer_out, head.logits_dimension, activation_fn=None)
def _train_op_fn(loss):
return optimizer.minimize(loss)
return head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
logits=logits,
train_op_fn=_train_op_fn)
```
There are cases where computing and applying gradients can not be meaningfully
captured with train_op_fn we support (for example, with sync optimizer). In
such case, you can take the responsibility on your own. Here is a common
use case,
```python
estimator_spec = head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
logits=logits,
train_op_fn=tf.contrib.learn.no_op_train_fn)
if mode == model_fn.ModeKeys.TRAIN:
optimizer = ...
sync = tf.train.SyncReplicasOptimizer(opt=optimizer, ...)
update_op = tf.contrib.layers.optimize_loss(optimizer=sync,
loss=estimator_spec.loss, ...)
hooks = [sync.make_session_run_hook(is_chief)]
... upate train_op and hooks in EstimatorSpec and return
```
"""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def logits_dimension(self):
"""Size of the last dimension of the logits `Tensor`.
Typically, logits is of shape `[batch_size, logits_dimension]`.
Returns:
The expected size of the `logits` tensor.
"""
raise NotImplementedError('Calling an abstract method.')
@abc.abstractmethod
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""Returns `EstimatorSpec` that a model_fn can return.
Please note that,
+ Exactly one of `logits` and `logits_input` must be provided.
+ All args must be passed via name.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used by the head.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss `Tensor` and returns an op
to optimize the model with the loss. This is used in TRAIN mode and
must not be None. None is allowed in other modes. If you want to
optimize loss yourself you can pass `no_op_train_fn` and then use
EstimatorSpec.loss to compute and apply gradients.
Returns:
`EstimatorSpec`.
"""
raise NotImplementedError('Calling an abstract method.')
def _maybe_expand_dim(tensor):
"""Expand the dim of `tensor` with static rank 1."""
with ops.name_scope(None, 'maybe_expand_dim', (tensor,)):
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor labels are not supported.')
static_shape = tensor.shape
if static_shape is None:
return tensor
return (array_ops.expand_dims(tensor, -1) if static_shape.ndims == 1
else tensor)
def _check_labels(labels, expected_labels_dimension):
"""Check labels type and shape."""
with ops.name_scope(None, 'labels', (labels,)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor labels are not supported.')
labels_shape = array_ops.shape(labels)
err_msg = 'labels shape must be [batch_size, {}]'.format(
expected_labels_dimension)
assert_rank = check_ops.assert_rank(labels, 2, message=err_msg)
with ops.control_dependencies([assert_rank]):
static_shape = labels.shape
if static_shape is not None:
dim1 = static_shape[1]
if (dim1 is not None) and (dim1 != expected_labels_dimension):
raise ValueError(
'labels shape must be [batch_size, labels_dimension], got %s.' %
(static_shape,))
assert_dimension = check_ops.assert_equal(
expected_labels_dimension, labels_shape[1], message=err_msg)
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(labels, name=scope)
def _check_logits(logits, expected_logits_dimension):
"""Check logits type and shape."""
with ops.name_scope(None, 'logits', (logits,)) as scope:
logits = math_ops.to_float(logits)
logits_shape = array_ops.shape(logits)
assert_rank = check_ops.assert_rank(
logits, 2, data=[logits_shape],
message='logits shape must be [batch_size, logits_dimension]')
with ops.control_dependencies([assert_rank]):
static_shape = logits.shape
if static_shape is not None:
dim1 = static_shape[1]
if (dim1 is not None) and (dim1 != expected_logits_dimension):
raise ValueError(
'logits shape must be [batch_size, logits_dimension], got %s.' %
(static_shape,))
assert_dimension = check_ops.assert_equal(
expected_logits_dimension, logits_shape[1], data=[logits_shape],
message='logits shape must be [batch_size, logits_dimension]')
with ops.control_dependencies([assert_dimension]):
return array_ops.identity(logits, name=scope)
def _indicator_labels_mean(labels, weights=None, name=None):
with ops.name_scope(name, 'labels_mean', (labels, weights)) as scope:
labels = math_ops.to_float(labels, name='labels')
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
return metrics_lib.mean(labels, weights=weights, name=scope)
def _accuracy_baseline(labels_mean):
"""Return accuracy baseline based on labels mean.
This is the best the model could do by always predicting one class.
Args:
labels_mean: Tuple of value and update op.
Returns:
Tuple of value and update op.
"""
with ops.name_scope(None, 'accuracy_baseline', labels_mean):
value, update_op = labels_mean
return (
math_ops.maximum(value, 1. - value, name='value'),
math_ops.maximum(update_op, 1 - update_op, name='update_op'))
def _predictions_mean(predictions, weights=None, name=None):
with ops.name_scope(
name, 'predictions_mean', (predictions, weights)) as scope:
predictions = math_ops.to_float(predictions, name='predictions')
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
return metrics_lib.mean(predictions, weights=weights, name=scope)
def _auc(labels, predictions, weights=None, curve='ROC', name=None):
with ops.name_scope(name, 'auc', (predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions, name='predictions')
if labels.dtype.base_dtype != dtypes.bool:
logging.warning('Casting %s labels to bool.', labels.dtype)
labels = math_ops.cast(labels, dtypes.bool)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
return metrics_lib.auc(
labels=labels, predictions=predictions, weights=weights, curve=curve,
name=scope)
def _accuracy_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'accuracy_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.accuracy(
labels=labels, predictions=threshold_predictions, weights=weights,
name=scope)
def _precision_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'precision_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
precision_tensor, update_op = metrics_lib.precision_at_thresholds(
labels=labels, predictions=predictions, thresholds=(threshold,),
weights=weights, name=scope)
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _recall_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'recall_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
precision_tensor, update_op = metrics_lib.recall_at_thresholds(
labels=labels, predictions=predictions, thresholds=(threshold,),
weights=weights, name=scope)
return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)
def _multi_class_head_with_softmax_cross_entropy_loss(n_classes,
weight_column=None,
label_vocabulary=None):
"""Creates a '_Head' for multi class classification.
This head expects to be fed integer labels specifying the class index.
Args:
n_classes: Number of classes, must be greater than 2 (for 2 classes, use
`_BinaryLogisticHeadWithSigmoidCrossEntropyLoss`).
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded as integer within
[0, n_classes). If given, labels must be string type and have any value in
`label_vocabulary`. Also there will be errors if vocabulary is not
provided and labels are string.
Returns:
An instance of `_Head` for multi class classification.
Raises:
ValueError: if `n_classes`, `metric_class_ids` or `label_keys` is invalid.
"""
if label_vocabulary is not None and not isinstance(label_vocabulary,
(list, tuple)):
raise ValueError('label_vocabulary should be a list. Given type: {}'.format(
type(label_vocabulary)))
return _MultiClassHeadWithSoftmaxCrossEntropyLoss(n_classes, weight_column,
label_vocabulary)
class _MultiClassHeadWithSoftmaxCrossEntropyLoss(_Head):
"""See `_multi_class_head_with_softmax_cross_entropy_loss`."""
def __init__(self, n_classes, weight_column=None, label_vocabulary=None):
if (n_classes is None) or (n_classes <= 2):
raise ValueError('n_classes must be > 2: %s.' % n_classes)
self._n_classes = n_classes
self._weight_column = weight_column
self._label_vocabulary = label_vocabulary
@property
def logits_dimension(self):
return self._n_classes
def _eval_metric_ops(self, labels, probabilities, logits,
class_ids, weights, unweighted_loss):
"""Returns the Eval metric ops."""
with ops.name_scope(
None, 'metrics',
(labels, probabilities, logits, class_ids, weights, unweighted_loss)):
keys = metric_keys.MetricKeys
metric_ops = {
# Estimator already adds a metric for loss.
# TODO(xiejw): Any other metrics?
keys.LOSS_MEAN: metrics_lib.mean(
unweighted_loss, weights=weights, name=keys.LOSS_MEAN),
keys.ACCURACY: metrics_lib.accuracy(
labels=labels, predictions=class_ids, weights=weights,
name=keys.ACCURACY),
}
return metric_ops
def _label_ids(self, labels):
"""Converts labels to integer id space."""
if self._label_vocabulary is None:
if not labels.dtype.is_integer:
raise ValueError('Labels dtype should be integer '
'Instead got %s.' % labels.dtype)
label_ids = labels
else:
if labels.dtype != dtypes.string:
raise ValueError('Labels dtype should be string if there is a '
'vocabulary. Instead got {}'.format(labels.dtype))
label_ids = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels)
return _assert_range(label_ids, self._n_classes)
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
with ops.name_scope('head'):
logits = _check_logits(logits, self.logits_dimension)
# Predict.
pred_keys = prediction_keys.PredictionKeys
with ops.name_scope(None, 'predictions', (logits,)):
# class_ids's shape is [batch_size]
class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASS_IDS)
class_ids = array_ops.expand_dims(class_ids, axis=(1,))
if self._label_vocabulary:
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=self._label_vocabulary,
name='class_string_lookup')
classes = table.lookup(class_ids)
else:
classes = string_ops.as_string(class_ids, name='str_classes')
probabilities = nn.softmax(logits, name=pred_keys.PROBABILITIES)
predictions = {
pred_keys.LOGITS: logits,
pred_keys.PROBABILITIES: probabilities,
# Expand to [batch_size, 1]
pred_keys.CLASS_IDS: class_ids,
pred_keys.CLASSES: classes,
}
if mode == model_fn.ModeKeys.PREDICT:
batch_size = array_ops.shape(probabilities)[0]
export_class_list = self._label_vocabulary
if not export_class_list:
export_class_list = string_ops.as_string(
math_ops.range(self._n_classes))
export_output_classes = array_ops.tile(
input=array_ops.expand_dims(input=export_class_list, axis=0),
multiples=[batch_size, 1])
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'':
export_output.ClassificationOutput(
scores=probabilities,
# `ClassificationOutput` requires string classes.
classes=export_output_classes)
})
# Eval.
label_ids = self._label_ids(_check_labels(_maybe_expand_dim(labels), 1))
unweighted_loss = losses.sparse_softmax_cross_entropy(
labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
# Restore the squeezed dim, so unweighted_loss matches the weights shape.
unweighted_loss = array_ops.expand_dims(unweighted_loss, axis=(1,))
weights = _weights(features, self._weight_column)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=label_ids,
probabilities=probabilities,
logits=logits,
class_ids=class_ids,
unweighted_loss=unweighted_loss,
weights=weights))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(metric_keys.MetricKeys.LOSS, training_loss)
summary.scalar(metric_keys.MetricKeys.LOSS_MEAN,
losses.compute_weighted_loss(
unweighted_loss,
weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
def _binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column=None, thresholds=None, label_vocabulary=None):
"""Creates a `Head` for single label binary classification.
This head uses `sigmoid_cross_entropy_with_logits` loss.
This head expects to be fed float labels of shape `(batch_size, 1)`.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
thresholds: Iterable of floats in the range `(0, 1)`. For binary
classification metrics such as precision and recall, an eval metric is
generated for each threshold value. This threshold is applied to the
logistic values to determine the binary classification (i.e., above the
threshold is `true`, below is `false`.
label_vocabulary: A list of strings represents possible label values. If it
is not given, that means labels are already encoded within [0, 1]. If
given, labels must be string type and have any value in
`label_vocabulary`. Also there will be errors if vocabulary is not
provided and labels are string.
Returns:
An instance of `Head` for binary classification.
Raises:
ValueError: if `thresholds` contains a value outside of `(0, 1)`.
"""
thresholds = tuple(thresholds) if thresholds else tuple()
if label_vocabulary is not None and not isinstance(label_vocabulary,
(list, tuple)):
raise ValueError('label_vocabulary should be a list. Given type: {}'.format(
type(label_vocabulary)))
for threshold in thresholds:
if (threshold <= 0.0) or (threshold >= 1.0):
raise ValueError('thresholds not in (0, 1): %s.' % (thresholds,))
return _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(
weight_column=weight_column,
thresholds=thresholds,
label_vocabulary=label_vocabulary)
class _BinaryLogisticHeadWithSigmoidCrossEntropyLoss(_Head):
"""See `_binary_logistic_head_with_sigmoid_cross_entropy_loss`."""
def __init__(self, weight_column=None, thresholds=None,
label_vocabulary=None):
self._weight_column = weight_column
self._thresholds = thresholds
self._label_vocabulary = label_vocabulary
@property
def logits_dimension(self):
return 1
def _eval_metric_ops(self,
labels,
logits,
logistic,
scores,
class_ids,
unweighted_loss,
weights=None):
with ops.name_scope(None, 'metrics', (labels, logits, logistic, scores,
class_ids, unweighted_loss, weights)):
keys = metric_keys.MetricKeys
labels_mean = _indicator_labels_mean(
labels=labels, weights=weights, name=keys.LABEL_MEAN)
metric_ops = {
# Estimator already adds a metric for loss.
keys.LOSS_MEAN:
metrics_lib.mean(
unweighted_loss, weights=weights, name=keys.LOSS_MEAN),
keys.ACCURACY:
metrics_lib.accuracy(
labels=labels,
predictions=class_ids,
weights=weights,
name=keys.ACCURACY),
keys.PREDICTION_MEAN:
_predictions_mean(
predictions=logistic,
weights=weights,
name=keys.PREDICTION_MEAN),
keys.LABEL_MEAN:
labels_mean,
keys.ACCURACY_BASELINE:
_accuracy_baseline(labels_mean),
keys.AUC:
_auc(
labels=labels,
predictions=logistic,
weights=weights,
name=keys.AUC),
keys.AUC_PR:
_auc(
labels=labels,
predictions=logistic,
weights=weights,
curve='PR',
name=keys.AUC_PR)
}
for threshold in self._thresholds:
accuracy_key = keys.ACCURACY_AT_THRESHOLD % threshold
metric_ops[accuracy_key] = _accuracy_at_threshold(
labels=labels, predictions=logistic, weights=weights,
threshold=threshold, name=accuracy_key)
# Precision for positive examples.
precision_key = keys.PRECISION_AT_THRESHOLD % threshold
metric_ops[precision_key] = _precision_at_threshold(
labels=labels, predictions=logistic, weights=weights,
threshold=threshold, name=precision_key)
# Recall for positive examples.
recall_key = keys.RECALL_AT_THRESHOLD % threshold
metric_ops[recall_key] = _recall_at_threshold(
labels=labels, predictions=logistic, weights=weights,
threshold=threshold, name=recall_key)
return metric_ops
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
# Predict.
with ops.name_scope('head'):
with ops.name_scope(None, 'predictions', (logits,)):
pred_keys = prediction_keys.PredictionKeys
logits = _check_logits(logits, self.logits_dimension)
logistic = math_ops.sigmoid(logits, name=pred_keys.LOGISTIC)
two_class_logits = array_ops.concat(
(array_ops.zeros_like(logits), logits), 1, name='two_class_logits')
scores = nn.softmax(two_class_logits, name=pred_keys.PROBABILITIES)
class_ids = array_ops.reshape(
math_ops.argmax(two_class_logits, axis=1), (-1, 1), name='classes')
if self._label_vocabulary:
table = lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=self._label_vocabulary,
name='class_string_lookup')
classes = table.lookup(class_ids)
else:
classes = string_ops.as_string(class_ids, name='str_classes')
predictions = {
pred_keys.LOGITS: logits,
pred_keys.LOGISTIC: logistic,
pred_keys.PROBABILITIES: scores,
pred_keys.CLASS_IDS: class_ids,
pred_keys.CLASSES: classes,
}
if mode == model_fn.ModeKeys.PREDICT:
batch_size = array_ops.shape(logistic)[0]
export_class_list = self._label_vocabulary
if not export_class_list:
export_class_list = string_ops.as_string([0, 1])
export_output_classes = array_ops.tile(
input=array_ops.expand_dims(input=export_class_list, axis=0),
multiples=[batch_size, 1])
classifier_output = export_output.ClassificationOutput(
scores=scores,
# `ClassificationOutput` requires string classes.
classes=export_output_classes)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'': classifier_output, # to be same as other heads.
'classification': classifier_output, # to be called by name.
_DEFAULT_SERVING_KEY: classifier_output, # default
'regression': export_output.RegressionOutput(value=logistic)
})
# Eval.
labels = _check_labels(_maybe_expand_dim(labels), self.logits_dimension)
if self._label_vocabulary is not None:
labels = lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self._label_vocabulary),
name='class_id_lookup').lookup(labels)
labels = math_ops.to_float(labels)
labels = _assert_range(labels, 2)
unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=logits, name='loss')
weights = _weights(features, self._weight_column)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=labels,
logits=logits,
logistic=logistic,
scores=scores,
class_ids=class_ids,
unweighted_loss=unweighted_loss,
weights=weights))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(metric_keys.MetricKeys.LOSS, training_loss)
summary.scalar(metric_keys.MetricKeys.LOSS_MEAN,
losses.compute_weighted_loss(
unweighted_loss,
weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
def _regression_head_with_mean_squared_error_loss(weight_column=None,
label_dimension=1):
"""Creates a `_Head` for regression using the mean squared loss.
Args:
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
label_dimension: Number of regression labels per example. This is the size
of the last dimension of the labels `Tensor` (typically, this has shape
`[batch_size, label_dimension]`).
Returns:
An instance of `_Head` for linear regression.
"""
return _RegressionHeadWithMeanSquaredErrorLoss(
weight_column=weight_column, label_dimension=label_dimension)
class _RegressionHeadWithMeanSquaredErrorLoss(_Head):
"""`Head` for regression using the mean squared loss."""
def __init__(self, label_dimension, weight_column=None):
"""`Head` for regression."""
if label_dimension < 1:
raise ValueError('Invalid label_dimension %s.' % label_dimension)
self._logits_dimension = label_dimension
self._weight_column = weight_column
@property
def logits_dimension(self):
return self._logits_dimension
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
# Predict.
with ops.name_scope('head'):
logits = _check_logits(logits, self._logits_dimension)
predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
if mode == model_fn.ModeKeys.PREDICT:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={'': export_output.RegressionOutput(value=logits)})
# Eval.
labels = _check_labels(_maybe_expand_dim(math_ops.to_float(labels)),
self._logits_dimension)
unweighted_loss = losses.mean_squared_error(
labels=labels, predictions=logits, reduction=losses.Reduction.NONE)
weights = _weights(features, self._weight_column)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
# Estimator already adds a metric for loss.
eval_metric_ops = {
metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(
unweighted_loss, weights=weights)
}
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=eval_metric_ops)
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(metric_keys.MetricKeys.LOSS, training_loss)
summary.scalar(metric_keys.MetricKeys.LOSS_MEAN,
losses.compute_weighted_loss(
unweighted_loss,
weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
def _assert_range(labels, n_classes):
with ops.name_scope(None, 'assert_range', (labels,)):
assert_less = check_ops.assert_less(
labels,
ops.convert_to_tensor(n_classes, dtype=labels.dtype),
message='Label IDs must < n_classes')
assert_greater = check_ops.assert_non_negative(
labels, message='Label IDs must >= 0')
with ops.control_dependencies((assert_less, assert_greater)):
return array_ops.identity(labels)
def _weights(features, weight_column):
"""Fetches weights from features."""
with ops.name_scope(None, 'weights', values=features.values()):
if weight_column is None:
return 1.
if isinstance(weight_column, six.string_types):
weight_column = feature_column_lib.numeric_column(key=weight_column)
if not isinstance(weight_column, feature_column_lib._NumericColumn): # pylint: disable=protected-access
raise TypeError('Weight column must be either a string or _NumericColumn.'
' Given type: {}.'.format(type(weight_column)))
weights = weight_column._get_dense_tensor( # pylint: disable=protected-access
feature_column_lib._LazyBuilder(features)) # pylint: disable=protected-access
if not (weights.dtype.is_floating or weights.dtype.is_integer):
raise ValueError('Weight column should be castable to float. '
'Given dtype: {}'.format(weights.dtype))
weights = _maybe_expand_dim(math_ops.to_float(weights, name='weights'))
return weights
| apache-2.0 | -3,889,266,735,990,661,000 | 41.124041 | 108 | 0.64488 | false |
andressanchezanillo/WIMOS_ | Software/Desktop/Wimos Desktop/src/NetworkThreads.py | 1 | 24548 | from random import randint
from datetime import datetime
from PyQt4 import QtCore, QtGui
from NetworkList import QNetworkList
from serial.tools import list_ports
import numpy
import threading, Queue
import serial
import sys
import os
import re
import threading
import time
class QNetworkThreads(QtGui.QWidget):
signalInfo = QtCore.pyqtSignal(int, str, int,
int, str, str,
str, str, int,
int, int, str)
signalAlert = QtCore.pyqtSignal(int, str, int,
int, str, str,
int, int, int,
int, int, str)
signalCenter = QtCore.pyqtSignal(str, int, str, int, int )
def __init__(self):
super(QNetworkThreads, self).__init__()
#General Windows
self.NetworkLayout = QtGui.QVBoxLayout(self)
self.NetworkListScroll = QNetworkList(self)
self.NetworkLayout.addWidget(self.NetworkListScroll)
self.setLayout(self.NetworkLayout)
# Gui Signals and Slot
self.signalInfo.connect(self.NetworkListScroll.addInfo)
self.signalAlert.connect(self.NetworkListScroll.addAlert)
self.signalCenter.connect(self.NetworkListScroll.addCenter)
# Sharing structure
self.SerialInput = Queue.Queue()
self.ProcessDataInput = Queue.Queue()
# Serial Thread.
#t1.isAlive()
items = []
for comPort in list(list_ports.comports()):
items.append(comPort[0])
item, ok = QtGui.QInputDialog.getItem(self, "select input dialog",
"COM Ports available", items , 0, False)
self.SerialPort = 'COM6'
self.SerialThreadRunning = True
self.SerialThread = threading.Thread(target=self.runSerial, args=())
self.SerialThread.daemon = True
self.SerialThread.start()
# Process Thread.
self.ProcessThread = threading.Thread(target=self.runProcess, args=())
self.ProcessThread.daemon = True
self.ProcessThread.start()
# Refresh Thread.
self.RefreshThread = threading.Thread(target=self.runRefresh, args=())
self.RefreshThread.daemon = True
self.RefreshThread.start()
def __exit__(self, exc_type, exc_value, traceback):
self.SerialThreadRunning = False
def runSerial(self):
#Daemon for Serial Management.
while True:
# Open Serial
SerialPort = serial.Serial(port= self.SerialPort,\
baudrate=115200,\
parity=serial.PARITY_NONE,\
stopbits=serial.STOPBITS_ONE,\
bytesize=serial.EIGHTBITS,\
timeout=1)
# Clear Serial Buffer
SerialBuffer = ""
print "["+datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"] connected to: " + SerialPort.portstr
# While Serial is Open and Connected.
while SerialPort.isOpen() and self.SerialThreadRunning:
# If buffer Serial is greater than 300
if(self.SerialInput.qsize() > 300):
# Print warning.
print "["+datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"] Oversize Serial Thread." + str(self.SerialInput.qsize())
# Bytes available to be read
bytesToRead = SerialPort.inWaiting()
# Bytes available
if bytesToRead > 0:
# Concatenate the Bytes available with the Serial Buffer.
SerialBuffer += SerialPort.read(bytesToRead)
# Split each line.
SerialSplitted = SerialBuffer.split("\r\n")
# if there are more than 2 lines.
if len(SerialSplitted) >= 2:
# For each line
for line in SerialSplitted:
# Input into share structure.
self.SerialInput.put([datetime.now(),line])
# Get the remainer buffer.
SerialBuffer = SerialSplitted[-1]
# Close Serial.
SerialPort.close()
def processHeader(self, datainput):
frameID = int(re.search(r'\d+', datainput[0]).group())
frameName = datainput[0]
idCenter = int(datainput[4], 16)
idHost = int(datainput[3], 16)
return [frameID, frameName, idCenter, idHost]
def processDateTime(self, datainput):
Date = str(int(datainput[5], 16))+"-"+str(int(datainput[6], 16))+"-"+str(int(datainput[7], 16))
Time = str(int(datainput[10], 16))+":"+str(int(datainput[9], 16))+":"+str(int(datainput[8], 16))
return [Date, Time]
def processAlertValues(self, datainput):
Alert1 = int(datainput[15], 16)
Alert2 = int(datainput[16], 16)
Alert3 = int(datainput[17], 16)
Alert4 = int(datainput[18], 16)
Alert5 = int(datainput[19], 16)
return Alert1, Alert2, Alert3, Alert4, Alert5
def processGPSValues(self, datainput):
#print datainput
# Calculate the latitude.
LatitudeDegree = int(datainput[11], 16)
LatitudeMinutes = int(datainput[15]+datainput[14]+datainput[13]+datainput[12], 16)
#print str(LatitudeDegree) + " -- " + str(LatitudeMinutes)
if LatitudeMinutes > 2147483647:
LatitudeMinutes -= 4294967294
LatitudeDegree = LatitudeDegree * -1
Latitude = str("%.5f" % (float(LatitudeDegree) + float(float(LatitudeMinutes)/600000)))
#print str(Latitude)
# Calculate the longitude.
LongitudeDegree = int(datainput[16], 16)
LongitudeMinutes = int(datainput[20]+datainput[19]+datainput[18]+datainput[17], 16)
#print str(LongitudeDegree) + " -- " + str(LongitudeMinutes)
if LongitudeMinutes > 2147483647:
LongitudeMinutes -= 4294967294
LongitudeDegree = LongitudeDegree * -1
Longitude = str("%.5f" % (float(LongitudeDegree) + float(float(LongitudeMinutes)/600000)))
#print str(Longitude)
return Latitude, Longitude
def processStatus(self, datainput):
memory = int(datainput[21], 16)
battery = int(datainput[22], 16)
status = datainput[24]+datainput[25]+datainput[26]
return memory, battery, status
def processCenterInfo(self, datainput):
frameName = datainput[0]
#idCenter = datainput[1]
#messageRatio = int(datainput[2], 16)
#messageTime = int(datainput[3], 16)
idCenter = 0
messageRatio = int(datainput[1], 16)
messageTime = int(datainput[2], 16)
return frameName, idCenter, messageRatio, messageTime
def runProcess(self):
# Processor Data Daemon.
while True:
# If there is some data to be processed.
if self.SerialInput.qsize() > 0:
# Get the datetime and Data string.
DateTime,Data = self.SerialInput.get()
# If the data is not null
if(len(Data) > 0):
# Split each space
DataSplitted = Data.split(' ')
# If the data is valid.
if(DataSplitted[-1] == 'ACK-OK' or DataSplitted[-1] == 'OK'):
# If the frame is a Alert message.
if(DataSplitted[0] == 'FRAME162:'):
# Process message header.
[_FrameID, _FrameName, _IdCenter, _IdHost] = self.processHeader(DataSplitted)
# Process TimeStamp.
[_SystemDate, _SystemTime] = self.processDateTime(DataSplitted)
# Process the Alert.
[_AlertA1, _AlertA2, _AlertA3, _AlertA4, _AlertA5] = self.processAlertValues(DataSplitted)
# Input the new Data for updating.
self.ProcessDataInput.put([_FrameName,[ _FrameID, _FrameName,
_IdCenter, _IdHost,
_SystemDate, _SystemTime,
_AlertA1, _AlertA2, _AlertA3,
_AlertA4, _AlertA5,
DateTime.strftime("%Y-%m-%d %H:%M:%S")]])
# If the frame is Info message.
elif(DataSplitted[0] == 'FRAME161:'):
# Process message header.
[_FrameID, _FrameName, _IdCenter, _IdHost] = self.processHeader(DataSplitted)
# Process TimeStamp.
[_SystemDate, _SystemTime] = self.processDateTime(DataSplitted)
# Process the GPS data.
[_GpsLatitude, _GpsLongitude] = self.processGPSValues(DataSplitted)
# Process the Status.
[_Memory, _Battery, _Status] = self.processStatus(DataSplitted)
# Input the new Data for updating.
self.ProcessDataInput.put([_FrameName,[ _FrameID, _FrameName,
_IdCenter, _IdHost,
_SystemDate, _SystemTime,
_GpsLatitude, _GpsLongitude,
_Memory, _Battery, _Status,
DateTime.strftime("%Y-%m-%d %H:%M:%S")]])
# If the frame is a Center information.
elif(DataSplitted[0] == 'INFO001:'):
# Process Center Info.
[_FrameName, _CenterID, _MessageRatio, _MessageTime] = self.processCenterInfo(DataSplitted)
# Input the new Data for updating.
self.ProcessDataInput.put([_FrameName,[ _FrameName, _CenterID,
_MessageRatio, _MessageTime,
DateTime.strftime("%Y-%m-%d %H:%M:%S")]])
else:
print "["+datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"] ACK Error found"
def runRefresh(self):
""" Method that runs forever """
while True:
if self.ProcessDataInput.qsize() > 0:
Data = self.ProcessDataInput.get()
# If it is a Alert message.
if (Data[0] == 'FRAME162:'):
# Emit the signal Alert.
self.signalAlert.emit(Data[-1][0],Data[-1][1],
Data[-1][2],Data[-1][3],
Data[-1][4],Data[-1][5],
Data[-1][6],Data[-1][7],
Data[-1][8],Data[-1][9],
Data[-1][10],Data[-1][11])
# If it is a Info message.
elif (Data[0] == 'FRAME161:'):
# Emit the signal Info.
self.signalInfo.emit(Data[-1][0],Data[-1][1],
Data[-1][2],Data[-1][3],
Data[-1][4],Data[-1][5],
Data[-1][6],Data[-1][7],
Data[-1][8],Data[-1][9],
Data[-1][10],Data[-1][11])
# If it is a Center Info.
elif (Data[0] == 'INFO001:'):
self.signalCenter.emit(Data[-1][0],Data[-1][1],
Data[-1][4],Data[-1][2],
Data[-1][3])
def processHeader(self, datainput):
frameID = int(re.search(r'\d+', datainput[0]).group())
frameName = datainput[0]
idCenter = int(datainput[4], 16)
idHost = int(datainput[3], 16)
return [frameID, frameName, idCenter, idHost]
def n8UT01(self):
testCount = 0
testSuccess = 0
strVal = self.processHeader("FRAME162: ff 06 10 00 15 05 11 06 2D 14 00 00 00 00 00 01 02 03 04 0A ACK-OK".split(' '))
testCount += 1
if strVal[0] == 162:
testSuccess += 1
testCount += 1
if strVal[1] == "FRAME162:":
testSuccess += 1
testCount += 1
if strVal[2] == 0:
testSuccess += 1
testCount += 1
if strVal[3] == 16:
testSuccess += 1
return (testSuccess/testCount)*100
def n8UT02(self):
testCount = 0
testSuccess = 0
strVal = self.processDateTime("FRAME162: ff 06 10 00 15 05 11 06 2D 14 00 00 00 00 00 01 02 03 04 0A ACK-OK".split(' '))
testCount += 1
if strVal[0] == "21-5-17":
testSuccess += 1
testCount += 1
if strVal[1] == "20:45:6":
testSuccess += 1
return (testSuccess/testCount)*100
def n8UT03(self):
testCount = 0
testSuccess = 0
strVal = self.processAlertValues("FRAME162: ff 06 10 00 15 05 11 06 2D 14 00 00 00 00 00 01 02 03 04 0A ACK-OK".split(' '))
testCount += 1
if strVal[0] == 0:
testSuccess += 1
testCount += 1
if strVal[1] == 1:
testSuccess += 1
testCount += 1
if strVal[2] == 2:
testSuccess += 1
testCount += 1
if strVal[3] == 3:
testSuccess += 1
testCount += 1
if strVal[4] == 4:
testSuccess += 1
return (testSuccess/testCount)*100
def n8UT04(self):
testCount = 0
testSuccess = 0
strVal = self.processGPSValues("FRAME161: ff 05 10 00 15 05 11 06 2D 14 5A 01 30 40 50 4A 02 60 70 80 ff 00 ff 23 6b 0a ACK-OK".split(' '))
testCount += 1
if strVal[0] == "90.1346383873":
testSuccess += 1
testCount += 1
if strVal[1] == "-74.2140119036":
testSuccess += 1
return (testSuccess/testCount)*100
def n8UT05(self):
testCount = 0
testSuccess = 0
strVal = self.processCenterInfo("INFO001: 20 80 OK".split(' '))
#return frameName, idCenter, messageRatio, messageTime
testCount += 1
if strVal[0] == "INFO001:":
testSuccess += 1
testCount += 1
if strVal[1] == 0:
testSuccess += 1
testCount += 1
if strVal[2] == 32:
testSuccess += 1
testCount += 1
if strVal[3] == 128:
testSuccess += 1
return (testSuccess/testCount)*100
def n8VT01(self):
testCount = 0
testSuccess = 0
self.SerialInput.queue.clear()
self.ProcessDataInput.queue.clear()
for i in range(10):
self.SerialInput.put([datetime.now(),"FRAME162: ff 06 10 00 15 05 11 06 2D 14 00 00 00 00 00 01 02 03 04 0A ACK-OK"])
start = time.time()*1000
while((time.time()*1000) - start < 1000 and self.SerialInput.qsize() > 0):
pass
testCount += 1
if self.SerialInput.qsize() == 0:
testSuccess += 1
start = time.time()*1000
while(time.time()*1000) - start < 1000 and (self.ProcessDataInput.qsize() <= 0):
pass
testCount += 1
if self.ProcessDataInput.qsize() > 0:
testSuccess += 1
start = time.time()*1000
while((time.time()*1000) - start < 1000 and len(self.NetworkListScroll.NetworkItemArray) <= 0):
pass
testCount += 1
if len(self.NetworkListScroll.NetworkItemArray) > 0:
testSuccess += 1
return (testSuccess/testCount)*100
def n8VT02(self):
testCount = 0
testSuccess = 0
self.SerialInput.queue.clear()
self.ProcessDataInput.queue.clear()
for i in range(10):
self.SerialInput.put([datetime.now(),"FRAME162: ff 06 10 00 15 05 11 06 2D 14 00 00 00 00 00 01 02 03 04 0A ACK-OK"])
start = time.time()*1000
while(time.time()*1000) - start < 5000 and (self.ProcessDataInput.qsize() <= 0):
pass
strVal = self.ProcessDataInput.get()
testCount += 1
if strVal[0] == "FRAME162:":
testSuccess += 1
testCount += 1
if strVal[1][0] == 162:
testSuccess += 1
testCount += 1
if strVal[1][1] == "FRAME162:":
testSuccess += 1
testCount += 1
if strVal[1][2] == 00:
testSuccess += 1
testCount += 1
if strVal[1][3] == 16:
testSuccess += 1
testCount += 1
if strVal[1][4] == "21-5-17":
testSuccess += 1
testCount += 1
if strVal[1][5] == "20:45:6":
testSuccess += 1
testCount += 1
if strVal[1][6] == 0:
testSuccess += 1
testCount += 1
if strVal[1][7] == 1:
testSuccess += 1
testCount += 1
if strVal[1][8] == 2:
testSuccess += 1
testCount += 1
if strVal[1][9] == 3:
testSuccess += 1
testCount += 1
if strVal[1][10] == 4:
testSuccess += 1
return (testSuccess/testCount)*100
def n8VT03(self):
testCount = 0
testSuccess = 0
self.SerialInput.queue.clear()
self.ProcessDataInput.queue.clear()
for i in range(10):
self.SerialInput.put([datetime.now(),"FRAME161: ff 05 10 00 15 05 11 06 2D 14 5A 01 30 40 50 4A 02 60 70 80 ff 00 ff 23 6b 0a ACK-OK"])
start = time.time()*1000
while(time.time()*1000) - start < 5000 and (self.ProcessDataInput.qsize() <= 0):
pass
strVal = self.ProcessDataInput.get()
testCount += 1
if strVal[0] == "FRAME161:":
testSuccess += 1
testCount += 1
if strVal[1][0] == 161:
testSuccess += 1
testCount += 1
if strVal[1][1] == "FRAME161:":
testSuccess += 1
testCount += 1
if strVal[1][2] == 0:
testSuccess += 1
testCount += 1
if strVal[1][3] == 16:
testSuccess += 1
testCount += 1
if strVal[1][4] == "21-5-17":
testSuccess += 1
testCount += 1
if strVal[1][5] == "20:45:6":
testSuccess += 1
testCount += 1
if strVal[1][6] == "90.1346383873":
testSuccess += 1
testCount += 1
if strVal[1][7] == "-74.2140119036":
testSuccess += 1
testCount += 1
if strVal[1][8] == 255:
testSuccess += 1
testCount += 1
if strVal[1][9] == 0:
testSuccess += 1
testCount += 1
if strVal[1][10] == "236b0a":
testSuccess += 1
return (testSuccess/testCount)*100
def n8VT04(self):
testCount = 0
testSuccess = 0
self.SerialInput.queue.clear()
self.ProcessDataInput.queue.clear()
for i in range(10):
self.SerialInput.put([datetime.now(),"INFO001: 20 80 OK"])
start = time.time()*1000
while(time.time()*1000) - start < 5000 and (self.ProcessDataInput.qsize() <= 0):
pass
strVal = self.ProcessDataInput.get()
testCount += 1
if strVal[0] == "INFO001:":
testSuccess += 1
testCount += 1
if strVal[1][0] == "INFO001:":
testSuccess += 1
testCount += 1
if strVal[1][1] == 0:
testSuccess += 1
testCount += 1
if strVal[1][2] == 32:
testSuccess += 1
testCount += 1
if strVal[1][3] == 128:
testSuccess += 1
return (testSuccess/testCount)*100
def runVT(self):
resultn8VT01 = self.n8VT01()
if ( resultn8VT01 == 100):
print("[TEST] n8VT01\t[OK]\tn8.VT01 = 100%")
else:
print("[TEST] n8VT01\t[ERROR]\tn8.VT01 = "+(str(resultn8VT01))+"%")
resultn8VT02 = self.n8VT02()
if ( resultn8VT02 == 100):
print("[TEST] n8VT02\t[OK]\tn8.VT02 = 100%")
else:
print("[TEST] n8VT02\t[ERROR]\tn8.VT02 = "+(str(resultn8VT02))+"%")
resultn8VT03 = self.n8VT03()
if ( resultn8VT03 == 100):
print("[TEST] n8VT03\t[OK]\tn8.VT03 = 100%")
else:
print("[TEST] n8VT03\t[ERROR]\tn8.VT03 = "+(str(resultn8VT03))+"%")
resultn8VT04 = self.n8VT04()
if ( resultn8VT04 == 100):
print("[TEST] n8VT04\t[OK]\tn8.VT04 = 100%")
else:
print("[TEST] n8VT04\t[ERROR]\tn8.VT04 = "+(str(resultn8VT04))+"%")
def TestUTs(self):
resultn8UT01 = self.n8UT01()
if ( resultn8UT01 == 100):
print("[TEST] n8UT01\t[OK]\tn8.UT01 = 100%")
else:
print("[TEST] n8UT01\t[ERROR]\tn8.UT01 = "+(str(resultn8UT01))+"%")
resultn8UT02 = self.n8UT02()
if ( resultn8UT02 == 100):
print("[TEST] n8UT02\t[OK]\tn8.UT02 = 100%")
else:
print("[TEST] n8UT02\t[ERROR]\tn8.UT02 = "+(str(resultn8UT02))+"%")
resultn8UT03 = self.n8UT03()
if ( resultn8UT03 == 100):
print("[TEST] n8UT03\t[OK]\tn8.UT03 = 100%")
else:
print("[TEST] n8UT03\t[ERROR]\tn8.UT03 = "+(str(resultn8UT03))+"%")
resultn8UT04 = self.n8UT04()
if ( resultn8UT04 == 100):
print("[TEST] n8UT04\t[OK]\tn8.UT04 = 100%")
else:
print("[TEST] n8UT04\t[ERROR]\tn8.UT04 = "+(str(resultn8UT04))+"%")
resultn8UT05 = self.n8UT05()
if ( resultn8UT05 == 100):
print("[TEST] n8UT05\t[OK]\tn8.UT05 = 100%")
else:
print("[TEST] n8UT05\t[ERROR]\tn8.UT05 = "+(str(resultn8UT05))+"%")
self.NetworkListScroll.TestUTs()
def TestVT(self):
ThreadVT = threading.Thread(target=self.runVT, args=())
ThreadVT.daemon = True
ThreadVT.start()
#self.NetworkThread.TestVT()
| gpl-3.0 | -1,014,202,317,485,580,000 | 33.623413 | 147 | 0.45906 | false |
qevo/py_file_helper | tests/manage.py | 1 | 9930 | """Tests for the file_helper.manage module"""
import os, sys, unittest, inspect
from re import IGNORECASE
from BaseTest import BaseTestWrapper
class FindFileTestCase(BaseTestWrapper.BaseTest):
"""manage.find_file() test cases"""
def test_this_file(self):
"""Test if this same file can be found by searching this same directory"""
path = os.path.abspath(inspect.getfile(inspect.currentframe()).replace('.pyc', '.py'))
name = os.path.basename(path)
args = {
"re_filter": ['*' + name],
"branch": os.path.dirname(path),
"recurse": False,
"depth": 0,
"get_file": True,
"get_dir": False,
"case_i": False
}
result = self._bt['func'](**args)
self.assertIn(path, result)
def test_this_file_case_i(self):
"""Test if this same file can be found by searching this same directory case insensitive"""
path = os.path.abspath(inspect.getfile(inspect.currentframe()).replace('.pyc', '.py'))
name = os.path.basename(path)
args = {
"re_filter": ['*' + name.upper()],
"branch": os.path.dirname(path),
"recurse": False,
"depth": 0,
"get_file": True,
"get_dir": False,
"case_i": True
}
result = self._bt['func'](**args)
self.assertIn(path, result)
def test_this_dir(self):
"""Test if the current working directory can be found by searching its parent directory"""
path = os.getcwd()
name = os.path.basename(path)
args = {
"re_filter": ['*' + name],
"branch": os.path.dirname(path),
"recurse": False,
"depth": 0,
"get_file": False,
"get_dir": True,
"case_i": False
}
result = self._bt['func'](**args)
self.assertEqual(path, ''.join(result))
def test_this_file_recurse(self):
"""Test if this same file can be found recursively"""
path = os.path.abspath(inspect.getfile(inspect.currentframe()).replace('.pyc', '.py'))
name = os.path.basename(path)
args = {
"re_filter": ['*' + name],
"branch": os.path.abspath(os.path.dirname(path) + '/..'),
"recurse": True,
"depth": 0,
"get_file": True,
"get_dir": False,
"case_i": False
}
result = self._bt['func'](**args)
# it is only because 'branch' is an absolute path that we can expect 'path' to be in 'result'
self.assertIn(path, result)
def test_this_dir_recurse(self):
"""Test if the current working directory can be found by searching its parent's parent directory"""
path = os.getcwd()
name = os.path.basename(path)
args = {
"re_filter": ['*' + name],
"branch": os.path.abspath(os.path.dirname(path) + '/..'),
"recurse": True,
"depth": 0,
"get_file": False,
"get_dir": True,
"case_i": False
}
result = self._bt['func'](**args)
self.assertIn(path, result)
def test_os_mod_file(self):
"""Test if this same file can be found"""
path = inspect.getfile(sys.modules['os'])
name = os.path.basename(path)
args = {
"re_filter": ['*' + name],
"branch": os.path.dirname(path),
"recurse": False,
"depth": 0,
"get_file": True,
"get_dir": False,
"case_i": False
}
result = self._bt['func'](**args)
self.assertEqual(path, ''.join(result))
class FindFileReTestCase(BaseTestWrapper.BaseTest):
"""manage.find_file_re() test cases"""
def test_this_file(self):
"""Test if this same file can be found by searching this same directory"""
path = os.path.abspath(inspect.getfile(inspect.currentframe()).replace('.pyc', '.py'))
name = os.path.basename(path)
args = {
"re_filter": ['^.*' + name + '$'],
"branch": os.path.dirname(path),
"recurse": False,
"depth": 0,
"get_file": True,
"get_dir": False,
"re_flag": 0
}
result = self._bt['func'](**args)
self.assertEqual(path, ''.join(result))
def test_this_file_case_i(self):
"""Test if this same file can be found by searching this same directory case insensitive"""
path = os.path.abspath(inspect.getfile(inspect.currentframe()).replace('.pyc', '.py'))
name = os.path.basename(path)
args = {
"re_filter": ['^.*' + name + '$'],
"branch": os.path.dirname(path),
"recurse": False,
"depth": 0,
"get_file": True,
"get_dir": False,
"re_flag": IGNORECASE
}
result = self._bt['func'](**args)
self.assertEqual(path, ''.join(result))
def test_this_dir(self):
"""Test if the current working directory can be found by searching its parent directory"""
path = os.getcwd()
name = os.path.basename(path)
args = {
"re_filter": ['^.*' + name + '$'],
"branch": os.path.dirname(path),
"recurse": False,
"depth": 0,
"get_file": False,
"get_dir": True,
"re_flag": 0
}
result = self._bt['func'](**args)
self.assertEqual(path, ''.join(result))
def test_this_file_recurse(self):
"""Test if this same file can be found recursively"""
path = os.path.abspath(inspect.getfile(inspect.currentframe()).replace('.pyc', '.py'))
name = os.path.basename(path)
args = {
"re_filter": ['^.*' + name + '$'],
"branch": os.path.abspath(os.path.dirname(path) + '/..'),
"recurse": True,
"depth": 0,
"get_file": True,
"get_dir": False,
"re_flag": 0
}
result = self._bt['func'](**args)
# it is only because 'branch' is an absolute path that we can expect 'path' to be in 'result'
self.assertIn(path, result)
def test_this_dir_recurse(self):
"""Test if the current working directory can be found by searching its parent's parent directory"""
path = os.getcwd()
name = os.path.basename(path)
args = {
"re_filter": ['^.*' + name + '$'],
"branch": os.path.abspath(os.path.dirname(path) + '/..'),
"recurse": True,
"depth": 0,
"get_file": False,
"get_dir": True,
"re_flag": 0
}
result = self._bt['func'](**args)
self.assertIn(path, result)
def test_os_mod_file(self):
"""Test if this same file can be found"""
path = inspect.getfile(sys.modules['os'])
name = os.path.basename(path)
args = {
"re_filter": ['^.*' + name + '$'],
"branch": os.path.dirname(path),
"recurse": False,
"depth": 0,
"get_file": True,
"get_dir": False,
"re_flag": 0
}
result = self._bt['func'](**args)
self.assertEqual(path, ''.join(result))
class ListDirTestCase(BaseTestWrapper.BaseTest):
"""manage.list_dir() test cases"""
def test_this_file(self):
"""Test if this same file can be found by searching this same directory"""
path = os.path.abspath(inspect.getfile(inspect.currentframe()).replace('.pyc', '.py'))
name = os.path.basename(path)
args = {
"dir": os.path.dirname(path),
"recurse": False,
"depth": 0,
"get_file": True,
"get_dir": False
}
result = self._bt['func'](**args)
self.assertIn(path, result)
def test_this_file_recurse(self):
"""Test if this same file can be found by recursively searching this same directory"""
path = os.path.abspath(inspect.getfile(inspect.currentframe()).replace('.pyc', '.py'))
name = os.path.basename(path)
args = {
"dir": os.path.dirname(path),
"recurse": True,
"depth": 1,
"get_file": True,
"get_dir": False
}
result = self._bt['func'](**args)
self.assertIn(path, result)
def test_this_dir(self):
"""Test if the current working directory can be found by searching its parent directory"""
path = os.getcwd()
name = os.path.basename(path)
args = {
"dir": os.path.dirname(path),
"recurse": False,
"depth": 0,
"get_file": False,
"get_dir": True
}
result = self._bt['func'](**args)
self.assertIn(path, result)
def test_this_dir_recurse(self):
"""Test if the current working directory can be found by recursively searching its parent directory"""
path = os.getcwd()
name = os.path.basename(path)
args = {
"dir": os.path.dirname(path),
"recurse": True,
"depth": 1,
"get_file": False,
"get_dir": True
}
result = self._bt['func'](**args)
self.assertIn(path, result)
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.result.TestResult()
suite.run(result)
print result
for f in result.failures:
for t in f:
print t
print ''
for e in result.errors:
for t in e:
print t
print ''
| mit | 8,706,795,702,596,856,000 | 27.782609 | 110 | 0.506848 | false |
icea-dev/atn-sim-ng | atn/gui/dlg_traf_ui.py | 1 | 2923 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dlg_traf.ui'
#
# Created: Thu Apr 6 11:31:12 2017
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_dlg_trf(object):
def setupUi(self, dlg_trf):
dlg_trf.setObjectName(_fromUtf8("dlg_trf"))
dlg_trf.resize(578, 300)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(dlg_trf.sizePolicy().hasHeightForWidth())
dlg_trf.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(dlg_trf)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.gbx_tbl = QtGui.QGroupBox(dlg_trf)
self.gbx_tbl.setObjectName(_fromUtf8("gbx_tbl"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.gbx_tbl)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.qtw_trf = QtGui.QTableWidget(self.gbx_tbl)
self.qtw_trf.setObjectName(_fromUtf8("qtw_trf"))
self.qtw_trf.setColumnCount(0)
self.qtw_trf.setRowCount(0)
self.verticalLayout_2.addWidget(self.qtw_trf)
self.verticalLayout.addWidget(self.gbx_tbl)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btn_cancel = QtGui.QPushButton(dlg_trf)
self.btn_cancel.setObjectName(_fromUtf8("btn_cancel"))
self.horizontalLayout.addWidget(self.btn_cancel)
self.btn_create = QtGui.QPushButton(dlg_trf)
self.btn_create.setObjectName(_fromUtf8("btn_create"))
self.horizontalLayout.addWidget(self.btn_create)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(dlg_trf)
QtCore.QMetaObject.connectSlotsByName(dlg_trf)
def retranslateUi(self, dlg_trf):
dlg_trf.setWindowTitle(_translate("dlg_trf", "Scenario aircrafts:", None))
self.gbx_tbl.setTitle(_translate("dlg_trf", "Aircrafts", None))
self.btn_cancel.setText(_translate("dlg_trf", "Cancel", None))
self.btn_create.setText(_translate("dlg_trf", "Create aircrafts", None))
| gpl-3.0 | 2,851,538,455,398,838,300 | 42.626866 | 102 | 0.697571 | false |
patrickm/chromium.src | tools/lsan/PRESUBMIT.py | 12 | 1214 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
import re
def CheckChange(input_api, output_api):
errors = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('suppressions.txt'):
continue
for line_num, line in enumerate(f.NewContents()):
line = line.strip()
if line.startswith('#') or not line:
continue
if not line.startswith('leak:'):
errors.append('"%s" should be "leak:..." in %s line %d' %
(line, f.LocalPath(), line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
def GetPreferredTryMasters(project, change):
return {
'tryserver.chromium': {
'linux_asan': set(['compile']),
'mac_asan': set(['compile']),
}
}
| bsd-3-clause | 6,166,048,921,853,432,000 | 28.609756 | 75 | 0.663921 | false |
prajwalkman/PokemonGo-Map | pogom/search.py | 1 | 3957 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import time
from pgoapi import PGoApi
from pgoapi.utilities import f2i, get_cellid
from . import config
from .models import parse_map
log = logging.getLogger(__name__)
TIMESTAMP = '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000'
api = PGoApi()
def send_map_request(api, position):
try:
api.set_position(*position)
api.get_map_objects(latitude=f2i(position[0]),
longitude=f2i(position[1]),
since_timestamp_ms=TIMESTAMP,
cell_id=get_cellid(position[0], position[1]))
return api.call()
except Exception as e:
log.warn("Uncaught exception when downloading map " + str(e))
return False
def generate_location_steps(initial_location, num_steps):
pos, x, y, dx, dy = 1, 0, 0, 0, -1
while -num_steps / 2 < x <= num_steps / 2 and -num_steps / 2 < y <= num_steps / 2:
yield (x * 0.00125 + initial_location[0], y * 0.00175 + initial_location[1], 0)
if x == y or (x < 0 and x == -y) or (x > 0 and x == 1 - y):
dx, dy = -dy, dx
x, y = x + dx, y + dy
def login(args, position):
log.info('Attempting login to Pokemon Go.')
api.set_position(*position)
while not api.login(args.auth_service, args.username, args.password):
log.info('Failed to login to Pokemon Go. Trying again.')
time.sleep(config['REQ_SLEEP'])
log.info('Login to Pokemon Go successful.')
def search(args, i):
num_steps = args.step_limit
position = (config['ORIGINAL_LATITUDE'], config['ORIGINAL_LONGITUDE'], 0)
if api._auth_provider and api._auth_provider._ticket_expire:
remaining_time = api._auth_provider._ticket_expire/1000 - time.time()
if remaining_time > 60:
log.info("Skipping Pokemon Go login process since already logged in for another {:.2f} seconds".format(remaining_time))
else:
login(args, position)
else:
login(args, position)
for step, step_location in enumerate(generate_location_steps(position, num_steps), 1):
if 'NEXT_LOCATION' in config:
log.info('New location found. Starting new scan.')
config['ORIGINAL_LATITUDE'] = config['NEXT_LOCATION']['lat']
config['ORIGINAL_LONGITUDE'] = config['NEXT_LOCATION']['lon']
config.pop('NEXT_LOCATION', None)
search(args, i)
return
log.info('Scanning step {:d} of {:d}.'.format(step, num_steps**2))
log.debug('Scan location is {:f}, {:f}'.format(step_location[0], step_location[1]))
response_dict = {}
failed_consecutive = 0
while not response_dict:
response_dict = send_map_request(api, step_location)
if response_dict:
try:
parse_map(response_dict, i, step)
except KeyError:
log.error('Scan step {:d} failed. Response dictionary key error.'.format(step))
failed_consecutive += 1
if(failed_consecutive >= config['REQ_MAX_FAILED']):
log.error('Niantic servers under heavy load. Waiting before trying again')
time.sleep(config['REQ_HEAVY_SLEEP'])
failed_consecutive = 0
else:
log.info('Map Download failed. Trying again.')
log.info('Completed {:5.2f}% of scan.'.format(float(step) / num_steps**2*100))
time.sleep(config['REQ_SLEEP'])
def search_loop(args):
i = 0
while True:
log.info("Map iteration: {}".format(i))
search(args, i)
log.info("Scanning complete.")
if args.scan_delay > 1:
log.info('Waiting {:d} seconds before beginning new scan.'.format(args.scan_delay))
i += 1
time.sleep(args.scan_delay)
| agpl-3.0 | -7,033,355,135,374,107,000 | 34.648649 | 131 | 0.580743 | false |
chorizon/cromosoma | dbadmin.py | 1 | 10384 | #!/usr/bin/python3
import argparse
import os,traceback
import sys, inspect
import shutil
from datetime import date
from pathlib import Path
from colorama import init, Fore, Back, Style
from importlib import import_module, reload
from cromosoma.webmodel import WebModel
#from models import books
def start():
parser = argparse.ArgumentParser(description='A tool for create tables in databases using models from Cromosoma')
parser.add_argument('--model', help='Model python path', required=True)
parser.add_argument('--config', help='The config file', required=False)
args = parser.parse_args()
init()
#Import config
config_file='config'
if args.config!=None:
config_file=args.config
try:
config=import_module('settings.'+config_file)
except:
e = sys.exc_info()[0]
v = sys.exc_info()[1]
print(Fore.WHITE+Back.RED+Style.BRIGHT+"Config file not found: %s %s" % (e, v))
exit(1)
#print(WebModel.connections)
try:
model=import_module(args.model)
for name, obj in inspect.getmembers(sys.modules[model.__name__]):
if inspect.isclass(obj):
if obj.__module__==args.model:
WebModel.model[name.lower()]=obj()
#WebModel.modelobj
except:
"""
e = sys.exc_info()[0]
v = sys.exc_info()[1]
print(Fore.WHITE+Back.RED+Style.BRIGHT +"Error, file with model not found: %s %s" % (e, v))
"""
print("Exception in user code:")
print("-"*60)
traceback.print_exc(file=sys.stdout)
print("-"*60)
exit(1)
#load the table of databases
cursor=WebModel.query(WebModel, "show tables")
table_exists=[]
for row in cursor:
table=list(row.values())[0]
if table in WebModel.model:
table_exists.append(table)
#If don't want order
#set([1,2,3,4]) - set([2,5])
tables=list(WebModel.model.keys())
#Array diff ordered
new_tables=[x for x in tables if x not in table_exists]
#If don't want order
#new_tables=set(tables)-set(table_exists)
#Need order new_tables
changes=0
#Create new tables
if len(new_tables)>0:
print(Style.BRIGHT+"Creating new tables...")
changes+=1
for table in new_tables:
print(Style.NORMAL+"--Creating table "+table+"...")
WebModel.query(WebModel, WebModel.model[table].create_table())
print("--Adding indexes and constraints for the new table")
for k_field, index in WebModel.arr_sql_index[table].items():
print("---Added index to "+k_field)
WebModel.query(WebModel, index)
for k_set, index_set in WebModel.arr_sql_set_index[table].items():
if index_set!="":
WebModel.query(WebModel, index_set)
print("---Added constraint to "+k_set)
print("--Adding uniques elements for the new table")
#See if changes exists
#Check if created tables are modified.
try:
model_old=import_module('backups.'+args.model)
for name, obj in inspect.getmembers(sys.modules[model_old.__name__]):
if inspect.isclass(obj):
if obj.__module__=='backups.'+args.model:
WebModel.model['old_'+name.lower()]=obj()
print(Style.BRIGHT+"Checking old versions of model for find changes...")
for table in tables:
#WebModel.query(WebModel, "")
#Check if new table
#fields_to_add, fields_to_modify, fields_to_add_index, fields_to_add_constraint, fields_to_add_unique, fields_to_delete_index, fields_to_delete_unique, fields_to_delete_constraint, fields_to_delete
fields_to_add=[]
fields_to_modify=[]
fields_to_add_index=[]
fields_to_add_constraint=[]
fields_to_add_unique=[]
fields_to_delete_index=[]
fields_to_delete_unique=[]
fields_to_delete_constraint=[]
fields_to_delete=[]
old_table='old_'+table
for f, v in WebModel.model[table].fields.items():
if not f in WebModel.model[old_table].fields:
fields_to_add.append(f)
#Add index
if v.indexed==True:
fields_to_add_index.append(f)
changes+=1
#Add unique
if v.unique==True:
fields_to_add_unique.append(f)
changes+=1
#Add constraint
if v.foreignkey==True:
fields_to_add_constraint.append(f)
changes+=1
changes+=1
#If exists field in old webmodel and new
else:
v_old=WebModel.model[old_table].fields[f]
if v.get_type_sql()!=v_old.get_type_sql():
fields_to_modify.append(f)
changes+=1
#Add index
if v.indexed==True and v_old.indexed==False:
fields_to_add_index.append(f)
changes+=1
if v.indexed==False and v_old.indexed==True:
fields_to_delete_index.append(f)
changes+=1
#Add unique
if v.unique==True and v_old.unique==False:
fields_to_add_unique.append(f)
changes+=1
if v.unique==False and v_old.unique==True:
fields_to_delete_unique.append(f)
changes+=1
#Add constraint
if v.foreignkey==True and v_old.foreignkey==False:
fields_to_add_constraint.append(f)
changes+=1
if v.foreignkey==False and v_old.foreignkey==True:
fields_to_delete_constraint.append(f)
changes+=1
for f, v in WebModel.model[old_table].fields.items():
if not f in WebModel.model[table].fields:
#Add constraint
if v.foreignkey==True:
fields_to_delete_constraint.append(f)
changes+=1
fields_to_delete.append(f)
changes+=1
WebModel.model[table].update_table(fields_to_add, fields_to_modify, fields_to_add_index, fields_to_add_constraint, fields_to_add_unique, fields_to_delete_index, fields_to_delete_unique, fields_to_delete_constraint, fields_to_delete)
#for field_update in arr_update:
#Make a for in fields, if the field not exist in old model, create, if is not same type, recreate. If no have index now, delete index, if is a new index, create, same thing with uniques
#for field in WebModel.model
except ImportError:
pass
except:
print("Exception in user code:")
print("-"*60)
traceback.print_exc(file=sys.stdout)
print("-"*60)
exit(1)
original_file_path=args.model.replace('.', '/')+'.py'
backup_path='backups/'+original_file_path
if changes>0:
print(Style.BRIGHT+"Creating backup of the model. WARNING: DON'T DELETE BACKUPS DIRECTORY IF YOU WANT MAKE CHANGES IN THE FUTURE WITHOUT MODIFY DIRECTLY THE DATABASE")
create_backup(original_file_path, backup_path)
else:
if not os.path.isfile(backup_path):
create_backup(original_file_path, backup_path)
print(Style.BRIGHT+"All tasks finished")
def create_backup(original_file_path, file_path):
#Create copy
path=os.path.dirname(file_path)
p=Path(path)
if not p.is_dir():
p.mkdir(0o755, True)
#Create path
if os.path.isfile(file_path):
today = date.today()
shutil.copy(file_path, file_path+'.'+today.strftime("%Y%M%d%H%M%S"))
new_file=""
f=open(original_file_path)
for line in f:
"""
new_line=line.replace("model[\"", "model[\"old_")
new_line=new_line.replace("model['", "model['old_")
new_line=new_line.replace("WebModel(\"", "WebModel(\"old_")
new_line=new_line.replace("WebModel('", "WebModel('old_")
"""
new_file+=line
f.close()
f=open(file_path, 'w')
f.write(new_file)
f.close()
| gpl-2.0 | 1,003,856,121,308,283,500 | 29.721893 | 244 | 0.454931 | false |
juliakreutzer/bandit-neuralmonkey | neuralmonkey/encoders/cnn_encoder.py | 1 | 14231 | """CNN for image processing."""
from typing import cast, Callable, List, Tuple, Set, Union
from typeguard import check_argument_types
import numpy as np
import tensorflow as tf
from neuralmonkey.dataset import Dataset
from neuralmonkey.decorators import tensor
from neuralmonkey.model.model_part import ModelPart, FeedDict, InitializerSpecs
from neuralmonkey.model.stateful import (SpatialStatefulWithOutput,
TemporalStatefulWithOutput)
from neuralmonkey.nn.projection import multilayer_projection
# Tuples used for configuration of the convolutional layers. See docstring of
# CNNEncoder initialization for more details.
# pylint: disable=invalid-name
ConvSpec = Tuple[str, int, int, str, int]
ResNetSpec = Tuple[str, int, int]
MaxPoolSpec = Tuple[str, int, int, str]
# pylint: enable=invalid-name
class CNNEncoder(ModelPart, SpatialStatefulWithOutput):
"""An image encoder.
It projects the input image through a serie of convolutioal operations. The
projected image is vertically cut and fed to stacked RNN layers which
encode the image into a single vector.
"""
# pylint: disable=too-many-arguments, too-many-locals
def __init__(self,
name: str,
data_id: str,
convolutions: List[Union[ConvSpec, ResNetSpec, MaxPoolSpec]],
image_height: int, image_width: int, pixel_dim: int,
fully_connected: List[int] = None,
batch_normalize: bool = False,
dropout_keep_prob: float = 0.5,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
"""Initialize a convolutional network for image processing.
The convolutional network can consist of plain convolutions,
max-pooling layers and residual block. In the configuration, they are
specified using the following tuples.
* convolution: ("C", kernel_size, stride, padding, out_channel);
* max / average pooling: ("M"/"A", kernel_size, stride, padding);
* residual block: ("R", kernel_size, out_channels).
Padding must be either "valid" or "same".
Args:
convolutions: Configuration of convolutional layers.
data_id: Identifier of the data series in the dataset.
image_height: Height of the input image in pixels.
image_width: Width of the image.
pixel_dim: Number of color channels in the input images.
dropout_keep_prob: Probability of keeping neurons active in
dropout. Dropout is done between all convolutional layers and
fully connected layer.
"""
check_argument_types()
ModelPart.__init__(
self, name, save_checkpoint, load_checkpoint, initializers)
self.data_id = data_id
self.dropout_keep_prob = dropout_keep_prob
self.image_height = image_height
self.image_width = image_width
self.pixel_dim = pixel_dim
self.convolutions = convolutions
self.fully_connected = fully_connected
self.batch_normalize = batch_normalize
# pylint: enable=too-many-arguments, too-many-locals
# pylint: disable=no-self-use
@tensor
def train_mode(self) -> tf.Tensor:
return tf.placeholder(tf.bool, shape=[], name="mode_placeholder")
# pylint: enable=no-self-use
@tensor
def image_input(self) -> tf.Tensor:
return tf.placeholder(
tf.float32,
shape=(None, self.image_height, self.image_width,
self.pixel_dim),
name="input_images")
@tensor
def image_mask(self) -> tf.Tensor:
return tf.placeholder(
tf.float32,
shape=(None, self.image_height, self.image_width, 1),
name="input_mask")
def batch_norm_callback(self, layer_output: tf.Tensor) -> tf.Tensor:
if self.batch_normalize:
return tf.layers.batch_normalization(
layer_output, training=self.train_mode)
return layer_output
@tensor
def image_processing_layers(self) -> List[Tuple[tf.Tensor, tf.Tensor]]:
"""Do all convolutions and return the last conditional map.
No dropout is applied between the convolutional layers. By default, the
activation function is ReLU.
"""
last_layer = self.image_input
last_mask = self.image_mask
last_channels = self.pixel_dim
image_processing_layers = [] # type: List[Tuple[tf.Tensor, tf.Tensor]]
with tf.variable_scope("convolutions"):
for i, specification in enumerate(self.convolutions):
if specification[0] == "C":
(last_layer, last_mask,
last_channels) = plain_convolution(
last_layer, last_mask,
cast(ConvSpec, specification),
self.batch_norm_callback, i)
image_processing_layers.append((last_layer, last_mask))
elif specification[0] in ["M", "A"]:
last_layer, last_mask = pooling(
last_layer, last_mask,
cast(MaxPoolSpec, specification), i)
image_processing_layers.append((last_layer, last_mask))
elif specification[0] == "R":
if not self.batch_normalize:
raise ValueError(
"Using ResNet blocks requires batch normalization "
"to be turned on.")
(last_layer, last_mask,
last_channels) = residual_block(
last_layer, last_mask, last_channels,
cast(ResNetSpec, specification),
self.batch_norm_callback, i)
image_processing_layers.append((last_layer, last_mask))
else:
raise ValueError(
"Unknown type of convoutional layer #{}: '{}'".format(
i + 1, specification[0]))
return image_processing_layers
@tensor
def spatial_states(self):
# pylint: disable=unsubscriptable-object
return self.image_processing_layers[-1][0]
# pylint: enable=unsubscriptable-object
@tensor
def spatial_mask(self) -> tf.Tensor:
# pylint: disable=unsubscriptable-object
return self.image_processing_layers[-1][1]
# pylint: enable=unsubscriptable-object
@tensor
def output(self) -> tf.Tensor:
"""Output vector of the CNN.
If there are specified some fully connected layers, there are applied
on top of the last convolutional map. Dropout is applied between all
layers, default activation function is ReLU. There are only projection
layers, no softmax is applied.
If there is fully_connected layer specified, average-pooled last
convolutional map is used as a vector output.
"""
# pylint: disable=no-member
last_height, last_width, last_n_channels = [
s.value for s in self.spatial_states.get_shape()[1:]]
# pylint: enable=no-member
if self.fully_connected is None:
# we average out by the image size -> shape is number
# channels from the last convolution
encoded = tf.reduce_mean(self.spatial_states, [1, 2])
return encoded
states_flat = tf.reshape(
self.spatial_states,
[-1, last_width * last_height * last_n_channels])
return multilayer_projection(
states_flat, self.fully_connected,
activation=tf.nn.relu,
dropout_keep_prob=self.dropout_keep_prob,
train_mode=self.train_mode)
def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:
# if it is from the pickled file, it is list, not numpy tensor,
# so convert it as as a prevention
images = np.array(dataset.get_series(self.data_id))
f_dict = {}
f_dict[self.image_input] = images / 255.0
# the image mask is one everywhere where the image is non-zero, i.e.
# zero pixels are masked out
f_dict[self.image_mask] = np.sign(
np.sum(images, axis=3, keepdims=True))
f_dict[self.train_mode] = train
return f_dict
def plain_convolution(
prev_layer: tf.Tensor,
prev_mask: tf.Tensor,
specification: ConvSpec,
batch_norm_callback: Callable[[tf.Tensor], tf.Tensor],
layer_num: int) -> Tuple[tf.Tensor, tf.Tensor, int]:
try:
check_argument_types()
except TypeError as err:
raise ValueError((
"Specification of a convolutional layer (number {} in config) "
'needs to have 5 members: "C", kernel size, stride, '
"padding, output channels, was {}").format(
layer_num, specification)) from err
kernel_size, stride, pad, out_channels = specification[1:]
if pad not in ["same", "valid"]:
raise ValueError(
("Padding must be 'same' or 'valid', "
"was '{}' in layer {}.").format(pad, layer_num + 1))
with tf.variable_scope("layer_{}_convolution".format(layer_num)):
next_layer = tf.layers.conv2d(
prev_layer, out_channels, kernel_size,
activation=None, padding=pad)
next_layer = batch_norm_callback(next_layer)
next_layer = tf.nn.relu(next_layer)
next_mask = tf.layers.max_pooling2d(
prev_mask, kernel_size, stride, padding=pad)
return next_layer, next_mask, out_channels
def residual_block(
prev_layer: tf.Tensor,
prev_mask: tf.Tensor,
prev_channels: int,
specification: ResNetSpec,
batch_norm_callback: Callable[[tf.Tensor], tf.Tensor],
layer_num: int) -> Tuple[tf.Tensor, tf.Tensor, int]:
try:
check_argument_types()
except TypeError as err:
raise ValueError((
"Specification of a residual block (number {} in config) "
'needs to have 3 members: "R", kernel size, channels; '
"was {}").format(layer_num, specification)) from err
kernel_size, out_channels = specification[1:]
with tf.variable_scope("layer_{}_resnet_block".format(layer_num)):
if out_channels == prev_channels:
before_resnet_block = prev_layer
else:
with tf.variable_scope("project_input"):
before_resnet_block = tf.layers.conv2d(
prev_layer, out_channels, 1, 1,
"same", activation=None)
before_resnet_block = batch_norm_callback(before_resnet_block)
with tf.variable_scope("conv_a"):
after_cnn = batch_norm_callback(prev_layer)
after_cnn = tf.nn.relu(after_cnn)
after_cnn = tf.layers.conv2d(
after_cnn, out_channels, kernel_size,
padding="same", activation=None)
with tf.variable_scope("conv_b"):
after_cnn = batch_norm_callback(after_cnn)
after_cnn = tf.nn.relu(after_cnn)
after_cnn = tf.layers.conv2d(
after_cnn, out_channels, kernel_size,
padding="same", activation=None)
next_layer = after_cnn + before_resnet_block
return next_layer, prev_mask, out_channels
def pooling(
prev_layer: tf.Tensor,
prev_mask: tf.Tensor,
specification: MaxPoolSpec,
layer_num: int) -> Tuple[tf.Tensor, tf.Tensor]:
try:
check_argument_types()
except TypeError as err:
raise ValueError((
"Specification of a max-pooling layer (number {} in config) "
'needs to have 3 members: "M", pool size, stride, padding, '
"was {}").format(layer_num, specification)) from err
pool_type, pool_size, stride, pad = specification
if pool_type == "M":
pool_fn = tf.layers.max_pooling2d
elif pool_type == "A":
pool_fn = tf.layers.average_pooling2d
else:
raise ValueError(
("Unsupported type of pooling: {}, use 'M' for max-pooling or "
"'A' for average pooling.").format(pool_type))
if pad not in ["same", "valid"]:
raise ValueError(
"Padding must be 'same' or 'valid', was '{}' in layer {}."
.format(pad, layer_num + 1))
with tf.variable_scope("layer_{}_max_pool".format(layer_num)):
next_layer = pool_fn(prev_layer, pool_size, stride)
next_mask = tf.layers.max_pooling2d(prev_mask, pool_size, stride)
return next_layer, next_mask
class CNNTemporalView(ModelPart, TemporalStatefulWithOutput):
"""Slice the convolutional maps left to right."""
# pylint: disable=too-many-arguments
def __init__(self,
name: str,
cnn: CNNEncoder) -> None:
check_argument_types()
ModelPart.__init__(
self, name, save_checkpoint=None, load_checkpoint=None)
self._cnn = cnn
# pylint: enable=too-many-arguments
@tensor
def output(self) -> tf.Tensor:
# pylint: disable=unsubscriptable-object
return self._cnn.output
# pylint: enable=unsubscriptable-object
@tensor
def temporal_states(self):
states = tf.transpose(self._cnn.spatial_states, perm=[0, 2, 1, 3])
shape = states.get_shape()
res = tf.reshape(
states, [-1, shape[1].value, shape[2].value * shape[3].value])
return res
@tensor
def temporal_mask(self) -> tf.Tensor:
mask = tf.squeeze(self._cnn.spatial_mask, 3)
summed = tf.reduce_sum(mask, axis=1)
return tf.to_float(tf.greater(summed, 0))
def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:
return {}
def get_dependencies(self) -> Set["ModelPart"]:
"""Collect recusively all encoders and decoders."""
return self._cnn.get_dependencies().union([self])
| bsd-3-clause | -7,925,772,018,403,166,000 | 37.882514 | 79 | 0.594898 | false |
pauloricardomg/cassandra-dtest | transient_replication_test.py | 3 | 26647 | import re
import logging
import types
from struct import pack
from uuid import UUID
from cassandra import ConsistencyLevel, InvalidRequest
from cassandra.query import SimpleStatement
from cassandra.protocol import ConfigurationException
from ccmlib.node import Node
from dtest import Tester
from tools.jmxutils import JolokiaAgent, make_mbean
from tools.data import rows_to_list
from tools.assertions import (assert_all)
from cassandra.metadata import Murmur3Token, OrderedDict
import pytest
since = pytest.mark.since
logging.getLogger('cassandra').setLevel(logging.CRITICAL)
NODELOCAL = 11
class SSTable(object):
def __init__(self, name, repaired, pending_id):
self.name = name
self.repaired = repaired
self.pending_id = pending_id
class TableMetrics(object):
def __init__(self, node, keyspace, table):
assert isinstance(node, Node)
self.jmx = JolokiaAgent(node)
self.write_latency_mbean = make_mbean("metrics", type="Table", name="WriteLatency", keyspace=keyspace, scope=table)
self.speculative_reads_mbean = make_mbean("metrics", type="Table", name="SpeculativeRetries", keyspace=keyspace, scope=table)
self.transient_writes_mbean = make_mbean("metrics", type="Table", name="TransientWrites", keyspace=keyspace, scope=table)
@property
def write_count(self):
return self.jmx.read_attribute(self.write_latency_mbean, "Count")
@property
def speculative_reads(self):
return self.jmx.read_attribute(self.speculative_reads_mbean, "Count")
@property
def transient_writes(self):
return self.jmx.read_attribute(self.transient_writes_mbean, "Count")
def start(self):
self.jmx.start()
def stop(self):
self.jmx.stop()
def __enter__(self):
""" For contextmanager-style usage. """
self.start()
return self
def __exit__(self, exc_type, value, traceback):
""" For contextmanager-style usage. """
self.stop()
class StorageProxy(object):
def __init__(self, node):
assert isinstance(node, Node)
self.node = node
self.jmx = JolokiaAgent(node)
self.mbean = make_mbean("db", type="StorageProxy")
def start(self):
self.jmx.start()
def stop(self):
self.jmx.stop()
@property
def blocking_read_repair(self):
return self.jmx.read_attribute(self.mbean, "ReadRepairRepairedBlocking")
@property
def speculated_data_request(self):
return self.jmx.read_attribute(self.mbean, "ReadRepairSpeculatedRequest")
@property
def speculated_data_repair(self):
return self.jmx.read_attribute(self.mbean, "ReadRepairSpeculatedRepair")
def __enter__(self):
""" For contextmanager-style usage. """
self.start()
return self
def __exit__(self, exc_type, value, traceback):
""" For contextmanager-style usage. """
self.stop()
class StorageService(object):
def __init__(self, node):
assert isinstance(node, Node)
self.node = node
self.jmx = JolokiaAgent(node)
self.mbean = make_mbean("db", type="StorageService")
def start(self):
self.jmx.start()
def stop(self):
self.jmx.stop()
def get_replicas(self, ks, cf, key):
return self.jmx.execute_method(self.mbean, "getNaturalEndpointsWithPort(java.lang.String,java.lang.String,java.lang.String,boolean)", [ks, cf, key, True])
def __enter__(self):
""" For contextmanager-style usage. """
self.start()
return self
def __exit__(self, exc_type, value, traceback):
""" For contextmanager-style usage. """
self.stop()
def patch_start(startable):
old_start = startable.start
def new_start(self, *args, **kwargs):
kwargs['jvm_args'] = kwargs.get('jvm_args', []) + ['-XX:-PerfDisableSharedMem',
'-Dcassandra.enable_nodelocal_queries=true']
return old_start(*args, **kwargs)
startable.start = types.MethodType(new_start, startable)
return startable
def get_sstable_data(cls, node, keyspace):
_sstable_name = re.compile(r'SSTable: (.+)')
_repaired_at = re.compile(r'Repaired at: (\d+)')
_pending_repair = re.compile(r'Pending repair: (\-\-|null|[a-f0-9\-]+)')
out = node.run_sstablemetadata(keyspace=keyspace).stdout
def matches(pattern):
return filter(None, [pattern.match(l) for l in out.decode("utf-8").split('\n')])
names = [m.group(1) for m in matches(_sstable_name)]
repaired_times = [int(m.group(1)) for m in matches(_repaired_at)]
def uuid_or_none(s):
return None if s == 'null' or s == '--' else UUID(s)
pending_repairs = [uuid_or_none(m.group(1)) for m in matches(_pending_repair)]
assert names
assert repaired_times
assert pending_repairs
assert len(names) == len(repaired_times) == len(pending_repairs)
return [SSTable(*a) for a in zip(names, repaired_times, pending_repairs)]
@since('4.0')
class TransientReplicationBase(Tester):
keyspace = "ks"
table = "tbl"
@pytest.fixture
def cheap_quorums(self):
return False
def populate(self):
self.cluster.populate(3, tokens=self.tokens, debug=True, install_byteman=True)
def set_nodes(self):
self.node1, self.node2, self.node3 = self.nodes
# Make sure digest is not attempted against the transient node
self.node3.byteman_submit(['./byteman/throw_on_digest.btm'])
def replication_factor(self):
return '3/1'
def tokens(self):
return [0, 1, 2]
def setup_schema(self):
session = self.exclusive_cql_connection(self.node1)
replication_params = OrderedDict()
replication_params['class'] = 'NetworkTopologyStrategy'
replication_params['datacenter1'] = self.replication_factor()
replication_params = ', '.join("'%s': '%s'" % (k, v) for k, v in replication_params.items())
session.execute("CREATE KEYSPACE %s WITH REPLICATION={%s}" % (self.keyspace, replication_params))
session.execute("CREATE TABLE %s.%s (pk int, ck int, value int, PRIMARY KEY (pk, ck)) WITH speculative_retry = 'NEVER' AND read_repair = 'NONE'" % (self.keyspace, self.table))
@pytest.fixture(scope='function', autouse=True)
def setup_cluster(self, fixture_dtest_setup):
self.tokens = self.tokens()
patch_start(self.cluster)
self.cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
'num_tokens': 1,
'commitlog_sync_period_in_ms': 500,
'enable_transient_replication': True,
'dynamic_snitch': False})
self.populate()
self.cluster.start(wait_other_notice=True, wait_for_binary_proto=True)
self.nodes = [patch_start(node) for node in self.cluster.nodelist()]
self.set_nodes()
session = self.exclusive_cql_connection(self.node3)
self.setup_schema()
def assert_has_sstables(self, node, flush=False, compact=False):
if flush:
node.flush()
if compact:
node.nodetool(' '.join(['compact', self.keyspace, self.table]))
sstables = node.get_sstables(self.keyspace, self.table)
assert sstables
def assert_has_no_sstables(self, node, flush=False, compact=False):
if flush:
node.flush()
if compact:
node.nodetool(' '.join(['compact', self.keyspace, self.table]))
sstables = node.get_sstables(self.keyspace, self.table)
assert not sstables
def quorum(self, session, stmt_str):
return session.execute(SimpleStatement(stmt_str, consistency_level=ConsistencyLevel.QUORUM))
def nodelocal(self, session, stmt_str):
return session.execute(SimpleStatement(stmt_str, consistency_level=NODELOCAL))
def assert_local_rows(self, node, rows, ignore_order=False):
assert_all(self.exclusive_cql_connection(node),
"SELECT * FROM %s.%s" % (self.keyspace, self.table),
rows,
cl=NODELOCAL,
ignore_order=ignore_order)
def insert_row(self, pk, ck, value, session=None, node=None):
session = session or self.exclusive_cql_connection(node or self.node1)
token = Murmur3Token.from_key(pack('>i', pk)).value
assert token < self.tokens[0] or self.tokens[-1] < token # primary replica should be node1
self.quorum(session, "INSERT INTO %s.%s (pk, ck, value) VALUES (%s, %s, %s)" % (self.keyspace, self.table, pk, ck, value))
def delete_row(self, pk, ck, session=None, node=None):
session = session or self.exclusive_cql_connection(node or self.node1)
token = Murmur3Token.from_key(pack('>i', pk)).value
assert token < self.tokens[0] or self.tokens[-1] < token # primary replica should be node1
self.quorum(session, "DELETE FROM %s.%s WHERE pk = %s AND ck = %s" % (self.keyspace, self.table, pk, ck))
def read_as_list(self, query, session=None, node=None):
session = session or self.exclusive_cql_connection(node or self.node1)
return rows_to_list(self.quorum(session, query))
def table_metrics(self, node):
return TableMetrics(node, self.keyspace, self.table)
def split(self, arr):
arr1 = []
arr2 = []
for idx, item in enumerate(arr):
if idx % 2 == 0:
arr1.append(item)
else:
arr2.append(item)
return (arr1, arr2)
def generate_rows(self, partitions, rows):
return [[pk, ck, pk+ck] for ck in range(rows) for pk in range(partitions)]
@since('4.0')
class TestTransientReplication(TransientReplicationBase):
@pytest.mark.no_vnodes
def test_transient_noop_write(self):
""" If both full replicas are available, nothing should be written to the transient replica """
for node in self.nodes:
self.assert_has_no_sstables(node)
tm = lambda n: self.table_metrics(n)
with tm(self.node1) as tm1, tm(self.node2) as tm2, tm(self.node3) as tm3:
assert tm1.write_count == 0
assert tm2.write_count == 0
assert tm3.write_count == 0
self.insert_row(1, 1, 1)
assert tm1.write_count == 1
assert tm2.write_count == 1
assert tm3.write_count == 0
self.assert_has_sstables(self.node1, flush=True)
self.assert_has_sstables(self.node2, flush=True)
self.assert_has_no_sstables(self.node3, flush=True)
@pytest.mark.no_vnodes
def test_transient_write(self):
""" If write can't succeed on full replica, it's written to the transient node instead """
for node in self.nodes:
self.assert_has_no_sstables(node)
tm = lambda n: self.table_metrics(n)
with tm(self.node1) as tm1, tm(self.node2) as tm2, tm(self.node3) as tm3:
self.insert_row(1, 1, 1)
# Stop writes to the other full node
self.node2.byteman_submit(['./byteman/stop_writes.btm'])
self.insert_row(1, 2, 2)
# node1 should contain both rows
self.assert_local_rows(self.node1,
[[1, 1, 1],
[1, 2, 2]])
# write couldn't succeed on node2, so it has only the first row
self.assert_local_rows(self.node2,
[[1, 1, 1]])
# transient replica should hold only the second row
self.assert_local_rows(self.node3,
[[1, 2, 2]])
@pytest.mark.no_vnodes
def test_transient_full_merge_read(self):
""" When reading, transient replica should serve a missing read """
for node in self.nodes:
self.assert_has_no_sstables(node)
tm = lambda n: self.table_metrics(n)
self.insert_row(1, 1, 1)
# Stop writes to the other full node
self.node2.byteman_submit(['./byteman/stop_writes.btm'])
self.insert_row(1, 2, 2)
# Stop reads from the node that will hold the second row
self.node1.stop()
# Whether we're reading from the full node or from the transient node, we should get consistent results
for node in [self.node2, self.node3]:
assert_all(self.exclusive_cql_connection(node),
"SELECT * FROM %s.%s" % (self.keyspace, self.table),
[[1, 1, 1],
[1, 2, 2]],
cl=ConsistencyLevel.QUORUM)
@pytest.mark.no_vnodes
def test_srp(self):
""" When reading, transient replica should serve a missing read """
for node in self.nodes:
self.assert_has_no_sstables(node)
tm = lambda n: self.table_metrics(n)
self.insert_row(1, 1, 1)
self.insert_row(1, 2, 2)
# Stop writes to the other full node
self.node2.byteman_submit(['./byteman/stop_writes.btm'])
self.delete_row(1, 1, node = self.node1)
# Stop reads from the node that will hold the second row
self.node1.stop()
# Whether we're reading from the full node or from the transient node, we should get consistent results
assert_all(self.exclusive_cql_connection(self.node3),
"SELECT * FROM %s.%s LIMIT 1" % (self.keyspace, self.table),
[[1, 2, 2]],
cl=ConsistencyLevel.QUORUM)
@pytest.mark.no_vnodes
def test_transient_full_merge_read_with_delete_transient_coordinator(self):
self._test_transient_full_merge_read_with_delete(self.node3)
@pytest.mark.no_vnodes
def test_transient_full_merge_read_with_delete_full_coordinator(self):
self._test_transient_full_merge_read_with_delete(self.node2)
@pytest.mark.no_vnodes
def _test_transient_full_merge_read_with_delete(self, coordinator):
""" When reading, transient replica should serve a missing read """
for node in self.nodes:
self.assert_has_no_sstables(node)
tm = lambda n: self.table_metrics(n)
self.insert_row(1, 1, 1)
self.insert_row(1, 2, 2)
# Stop writes to the other full node
self.node2.byteman_submit(['./byteman/stop_writes.btm'])
self.delete_row(1, 2)
self.assert_local_rows(self.node3,
[])
# Stop reads from the node that will hold the second row
self.node1.stop()
assert_all(self.exclusive_cql_connection(coordinator),
"SELECT * FROM %s.%s" % (self.keyspace, self.table),
[[1, 1, 1]],
cl=ConsistencyLevel.QUORUM)
def _test_speculative_write_repair_cycle(self, primary_range, optimized_repair, repair_coordinator, expect_node3_data):
"""
if one of the full replicas is not available, data should be written to the transient replica, but removed after incremental repair
"""
for node in self.nodes:
self.assert_has_no_sstables(node)
self.node2.byteman_submit(['./byteman/stop_writes.btm'])
# self.insert_row(1)
tm = lambda n: self.table_metrics(n)
with tm(self.node1) as tm1, tm(self.node2) as tm2, tm(self.node3) as tm3:
assert tm1.write_count == 0
assert tm2.write_count == 0
assert tm3.write_count == 0
self.insert_row(1, 1, 1)
assert tm1.write_count == 1
assert tm2.write_count == 0
assert tm3.write_count == 1
self.assert_has_sstables(self.node1, flush=True)
self.assert_has_no_sstables(self.node2, flush=True)
self.assert_has_sstables(self.node3, flush=True)
repair_opts = ['repair', self.keyspace]
if primary_range: repair_opts.append('-pr')
if optimized_repair: repair_opts.append('-os')
self.node1.nodetool(' '.join(repair_opts))
self.assert_has_sstables(self.node1, compact=True)
self.assert_has_sstables(self.node2, compact=True)
if expect_node3_data:
self.assert_has_sstables(self.node3, compact=True)
else:
self.assert_has_no_sstables(self.node3, compact=True)
@pytest.mark.no_vnodes
def test_speculative_write_repair_cycle(self):
""" incremental repair from full replica should remove data on node3 """
self._test_speculative_write_repair_cycle(primary_range=False,
optimized_repair=False,
repair_coordinator=self.node1,
expect_node3_data=False)
@pytest.mark.no_vnodes
def test_primary_range_repair(self):
""" optimized primary range incremental repair from full replica should remove data on node3 """
self._test_speculative_write_repair_cycle(primary_range=True,
optimized_repair=False,
repair_coordinator=self.node1,
expect_node3_data=False)
@pytest.mark.no_vnodes
def test_optimized_primary_range_repair(self):
""" optimized primary range incremental repair from full replica should remove data on node3 """
self._test_speculative_write_repair_cycle(primary_range=True,
optimized_repair=True,
repair_coordinator=self.node1,
expect_node3_data=False)
@pytest.mark.no_vnodes
def test_transient_incremental_repair(self):
""" transiently replicated ranges should be skipped when coordinating repairs """
self._test_speculative_write_repair_cycle(primary_range=True,
optimized_repair=False,
repair_coordinator=self.node1,
expect_node3_data=False)
@pytest.mark.no_vnodes
def test_cheap_quorums(self):
""" writes shouldn't make it to transient nodes """
session = self.exclusive_cql_connection(self.node1)
for node in self.nodes:
self.assert_has_no_sstables(node)
tm = lambda n: self.table_metrics(n)
with tm(self.node1) as tm1, tm(self.node2) as tm2, tm(self.node3) as tm3:
assert tm1.write_count == 0
assert tm2.write_count == 0
assert tm3.write_count == 0
self.insert_row(1, 1, 1, session=session)
assert tm1.write_count == 1
assert tm2.write_count == 1
assert tm3.write_count == 0
@pytest.mark.no_vnodes
def test_speculative_write(self):
""" if a full replica isn't responding, we should send the write to the transient replica """
session = self.exclusive_cql_connection(self.node1)
self.node2.byteman_submit(['./byteman/slow_writes.btm'])
self.insert_row(1, 1, 1, session=session)
self.assert_local_rows(self.node1, [[1,1,1]])
self.assert_local_rows(self.node2, [])
self.assert_local_rows(self.node3, [[1,1,1]])
@pytest.mark.no_vnodes
def test_full_repair_from_full_replica(self):
""" full repairs shouldn't replicate data to transient replicas """
session = self.exclusive_cql_connection(self.node1)
for node in self.nodes:
self.assert_has_no_sstables(node)
self.insert_row(1, 1, 1, session=session)
self.assert_has_sstables(self.node1, flush=True)
self.assert_has_sstables(self.node2, flush=True)
self.assert_has_no_sstables(self.node3, flush=True)
self.node1.nodetool(' '.join(['repair', self.keyspace, '-full']))
self.assert_has_sstables(self.node1, flush=True)
self.assert_has_sstables(self.node2, flush=True)
self.assert_has_no_sstables(self.node3, flush=True)
@pytest.mark.no_vnodes
def test_full_repair_from_transient_replica(self):
""" full repairs shouldn't replicate data to transient replicas """
session = self.exclusive_cql_connection(self.node1)
for node in self.nodes:
self.assert_has_no_sstables(node)
self.insert_row(1, 1, 1, session=session)
self.assert_has_sstables(self.node1, flush=True)
self.assert_has_sstables(self.node2, flush=True)
self.assert_has_no_sstables(self.node3, flush=True)
self.node3.nodetool(' '.join(['repair', self.keyspace, '-full']))
self.assert_has_sstables(self.node1, flush=True)
self.assert_has_sstables(self.node2, flush=True)
self.assert_has_no_sstables(self.node3, flush=True)
@pytest.mark.skip(reason="Doesn't test quite the right combination of forbidden RF changes right now")
def test_keyspace_rf_changes(self):
""" they should throw an exception """
session = self.exclusive_cql_connection(self.node1)
replication_params = OrderedDict()
replication_params['class'] = 'NetworkTopologyStrategy'
assert self.replication_factor() == '3/1'
replication_params['datacenter1'] = '5/2'
replication_params = ', '.join("'%s': '%s'" % (k, v) for k, v in replication_params.items())
with pytest.raises(ConfigurationException):
session.execute("ALTER KEYSPACE %s WITH REPLICATION={%s}" % (self.keyspace, replication_params))
def test_disabled_read_repair(self):
""" shouldn't allow creating tables without read repair disabled """
session = self.exclusive_cql_connection(self.node1)
with pytest.raises(InvalidRequest):
session.execute("CREATE TABLE %s.tbl2 (pk int, ck int, value int, PRIMARY KEY (pk, ck))" % self.keyspace)
with pytest.raises(InvalidRequest):
session.execute("ALTER TABLE %s.%s WITH read_repair = 'BLOCKING'" % (self.keyspace, self.table))
@since('4.0')
class TestTransientReplicationSpeculativeQueries(TransientReplicationBase):
def setup_schema(self):
session = self.exclusive_cql_connection(self.node1)
replication_params = OrderedDict()
replication_params['class'] = 'NetworkTopologyStrategy'
replication_params['datacenter1'] = self.replication_factor()
replication_params = ', '.join("'%s': '%s'" % (k, v) for k, v in replication_params.items())
session.execute("CREATE KEYSPACE %s WITH REPLICATION={%s}" % (self.keyspace, replication_params))
session.execute("CREATE TABLE %s.%s (pk int, ck int, value int, PRIMARY KEY (pk, ck)) WITH speculative_retry = 'NEVER' AND read_repair = 'NONE';" % (self.keyspace, self.table))
@pytest.mark.no_vnodes
def test_always_speculate(self):
""" If write can't succeed on full replica, it's written to the transient node instead """
session = self.exclusive_cql_connection(self.node1)
session.execute("ALTER TABLE %s.%s WITH speculative_retry = 'ALWAYS';" % (self.keyspace, self.table))
self.insert_row(1, 1, 1)
# Stop writes to the other full node
self.node2.byteman_submit(['./byteman/stop_writes.btm'])
self.insert_row(1, 2, 2)
for node in self.nodes:
assert_all(self.exclusive_cql_connection(node),
"SELECT * FROM %s.%s WHERE pk = 1" % (self.keyspace, self.table),
[[1, 1, 1],
[1, 2, 2]],
cl=ConsistencyLevel.QUORUM)
@pytest.mark.no_vnodes
def test_custom_speculate(self):
""" If write can't succeed on full replica, it's written to the transient node instead """
session = self.exclusive_cql_connection(self.node1)
session.execute("ALTER TABLE %s.%s WITH speculative_retry = '99.99PERCENTILE';" % (self.keyspace, self.table))
self.insert_row(1, 1, 1)
# Stop writes to the other full node
self.node2.byteman_submit(['./byteman/stop_writes.btm'])
self.insert_row(1, 2, 2)
for node in self.nodes:
assert_all(self.exclusive_cql_connection(node),
"SELECT * FROM %s.%s WHERE pk = 1" % (self.keyspace, self.table),
[[1, 1, 1],
[1, 2, 2]],
cl=ConsistencyLevel.QUORUM)
@since('4.0')
class TestMultipleTransientNodes(TransientReplicationBase):
def populate(self):
self.cluster.populate(5, tokens=self.tokens, debug=True, install_byteman=True)
def set_nodes(self):
self.node1, self.node2, self.node3, self.node4, self.node5 = self.nodes
def replication_factor(self):
return '5/2'
def tokens(self):
return [0, 1, 2, 3, 4]
@pytest.mark.resource_intensive
@pytest.mark.no_vnodes
def test_transient_full_merge_read(self):
""" When reading, transient replica should serve a missing read """
for node in self.nodes:
self.assert_has_no_sstables(node)
tm = lambda n: self.table_metrics(n)
self.insert_row(1, 1, 1)
# Stop writes to the other full node
self.node2.byteman_submit(['./byteman/stop_writes.btm'])
self.insert_row(1, 2, 2)
self.assert_local_rows(self.node1,
[[1, 1, 1],
[1, 2, 2]])
self.assert_local_rows(self.node2,
[[1, 1, 1]])
self.assert_local_rows(self.node3,
[[1, 1, 1],
[1, 2, 2]])
self.assert_local_rows(self.node4,
[[1, 2, 2]])
self.assert_local_rows(self.node5,
[[1, 2, 2]])
# Stop reads from the node that will hold the second row
self.node1.stop()
# Whether we're reading from the full node or from the transient node, we should get consistent results
for node in [self.node2, self.node3, self.node4, self.node5]:
assert_all(self.exclusive_cql_connection(node),
"SELECT * FROM %s.%s" % (self.keyspace, self.table),
[[1, 1, 1],
[1, 2, 2]],
cl=ConsistencyLevel.QUORUM)
| apache-2.0 | 6,229,363,424,580,439,000 | 39.682443 | 184 | 0.600518 | false |
sirex/Misago | misago/core/errorpages.py | 8 | 1974 | from django.http import JsonResponse
from django.shortcuts import render
from django.utils.translation import ugettext as _
from misago.core.utils import is_request_to_misago
from misago.admin.views.errorpages import admin_error_page, admin_csrf_failure
def _ajax_error(code=406, message=None):
return JsonResponse({'detail': message}, status=code)
@admin_error_page
def _error_page(request, code, message=None):
response = render(request,
'misago/errorpages/%s.html' % code,
{'message': message})
response.status_code = code
return response
def permission_denied(request, message=None):
if request.is_ajax():
return _ajax_error(403, message or _("Permission denied."))
else:
return _error_page(request, 403, message)
def page_not_found(request):
if request.is_ajax():
return _ajax_error(404, "Not found.")
else:
return _error_page(request, 404)
@admin_csrf_failure
def csrf_failure(request, reason=""):
if request.is_ajax():
return _ajax_error(403, _("Request authentication is invalid."))
else:
response = render(request, 'misago/errorpages/csrf_failure.html')
response.status_code = 403
return response
def not_allowed(request):
response = render(request, 'misago/errorpages/405.html')
response.status_code = 405
return response
# Decorators for custom error page handlers
def shared_403_exception_handler(f):
def page_decorator(request, *args, **kwargs):
if is_request_to_misago(request):
return permission_denied(request)
else:
return f(request, *args, **kwargs)
return page_decorator
def shared_404_exception_handler(f):
def page_decorator(request, *args, **kwargs):
if is_request_to_misago(request):
return page_not_found(request)
else:
return f(request, *args, **kwargs)
return page_decorator
| gpl-2.0 | -5,229,900,041,804,731,000 | 28.029412 | 78 | 0.662107 | false |
codefisher/djangopress | djangopress/accounts/menus.py | 1 | 1165 | from djangopress.menus.menu import register
from django.template import Template, RequestContext
from django.urls import reverse
class LoginRender(object):
def __init__(self):
self._members = Template("""
<li><a href="{% url 'accounts-profile' %}">Members</a>
<ul>
<li><a href="{% url 'logout' %}">Logout</a></li>
<li><a href="{% url 'accounts-profile' %}">Profile</a></li>
</ul></li>
""")
def render_menu(self, context, tree, menu=None):
raise #we don't know how to do this
def render_item(self, context, item, sub_menu):
if context.get("user").is_authenticated():
return self._members.render(RequestContext(context.get("request"), {"user": context.get("user")}))
else:
if context.get("request").path == reverse('logout'):
return Template("""<li><a href="{% url 'login' %}?next={{ request.path }}">Login</a></li>""").render(context)
return Template("""<li><a href="{% url 'login' %}">Login</a></li>""").render(context)
register('member', LoginRender()) | mit | -5,298,869,993,011,078,000 | 43.846154 | 125 | 0.551073 | false |
AlphaSmartDog/DeepLearningNotes | Note-6 A3CNet/Note-6.2.1 代码阅读顺序/sonnet/python/modules/util_test.py | 6 | 27677 | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.python.modules.util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tempfile
# Dependency imports
import numpy as np
import sonnet as snt
import sonnet.python.modules.util as util
from sonnet.testing import parameterized
import tensorflow as tf
_EXPECTED_FORMATTED_VARIABLE_LIST = (
"Variable Shape Type Collections Device\n"
"m1/v1 3x4 float32 global_variables, trainable_variables\n"
"m2/v2 5x6 float32 local_variables "
"/device:GPU:*"
)
_EXPECTED_FORMATTED_VARIABLE_MAP = (
"Key Variable Shape Type Collections "
"Device\n"
"vv1 m1/v1 3x4 float32 global_variables, trainable_variables\n"
"vv2 m2/v2 5x6 float32 local_variables "
"/device:GPU:*"
)
class UtilTest(parameterized.ParameterizedTestCase, tf.test.TestCase):
def testQueryInModule(self):
module = snt.Linear(output_size=42, name="linear")
with self.assertRaisesRegexp(snt.Error, "not instantiated yet"):
module.get_variables()
# Compare to the desired result set, after connection.
input_ = tf.placeholder(tf.float32, shape=[3, 4])
_ = module(input_)
self.assertEqual(set(module.get_variables()),
{module.w, module.b})
self.assertEqual(set(snt.get_variables_in_module(module)),
{module.w, module.b})
def testScopeQuery(self):
with tf.variable_scope("prefix") as s1:
v1 = tf.get_variable("a", shape=[3, 4])
with tf.variable_scope("prefix_with_more_stuff") as s2:
v2 = tf.get_variable("b", shape=[5, 6])
v3 = tf.get_variable("c", shape=[7])
# get_variables_in_scope should add a "/" to only search that scope, not
# any others which share the same prefix.
self.assertEqual(snt.get_variables_in_scope(s1), (v1,))
self.assertEqual(set(snt.get_variables_in_scope(s2)), {v2, v3})
self.assertEqual(snt.get_variables_in_scope(s1.name), (v1,))
self.assertEqual(set(snt.get_variables_in_scope(s2.name)), {v2, v3})
def testIsScopePrefix(self):
self.assertTrue(util._is_scope_prefix("a/b/c", ""))
self.assertTrue(util._is_scope_prefix("a/b/c", "a/b/c"))
self.assertTrue(util._is_scope_prefix("a/b/c", "a/b"))
self.assertTrue(util._is_scope_prefix("a/b/c", "a"))
self.assertTrue(util._is_scope_prefix("a/b/c", "a/"))
self.assertFalse(util._is_scope_prefix("a/b/c", "b"))
self.assertFalse(util._is_scope_prefix("ab/c", "a"))
def testGetNormalizedVariableMapScope(self):
with tf.variable_scope("prefix") as s1:
v1 = tf.get_variable("a", shape=[5, 6])
v2 = tf.get_variable("b", shape=[7])
variable_map = snt.get_normalized_variable_map(s1)
self.assertEqual(len(variable_map), 2)
self.assertIn("a", variable_map)
self.assertIn("b", variable_map)
self.assertIs(variable_map["a"], v1)
self.assertIs(variable_map["b"], v2)
def testGetNormalizedVariableMapScopeContext(self):
with tf.variable_scope("prefix1") as s1:
with tf.variable_scope("prefix2") as s2:
v1 = tf.get_variable("a", shape=[5, 6])
v2 = tf.get_variable("b", shape=[7])
with tf.variable_scope("prefix") as s3:
_ = tf.get_variable("c", shape=[8])
err = r"Scope 'prefix1/prefix2' is not prefixed by 'prefix'."
with self.assertRaisesRegexp(ValueError, err):
variable_map = snt.get_normalized_variable_map(s2, context=s3)
variable_map = snt.get_normalized_variable_map(s2, context=s1)
self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s1),
variable_map)
self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s1.name),
variable_map)
self.assertEqual(len(variable_map), 2)
self.assertIn("prefix2/a", variable_map)
self.assertIn("prefix2/b", variable_map)
self.assertIs(variable_map["prefix2/a"], v1)
self.assertIs(variable_map["prefix2/b"], v2)
with tf.variable_scope("") as s4:
self.assertEqual(s4.name, "")
variable_map = snt.get_normalized_variable_map(s2, context=s4)
self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s4),
variable_map)
self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s4.name),
variable_map)
self.assertEqual(len(variable_map), 2)
self.assertIn("prefix1/prefix2/a", variable_map)
self.assertIn("prefix1/prefix2/b", variable_map)
self.assertIs(variable_map["prefix1/prefix2/a"], v1)
self.assertIs(variable_map["prefix1/prefix2/b"], v2)
def testGetNormalizedVariableMapModule(self):
input_ = tf.placeholder(tf.float32, shape=[1, 10, 10, 3])
conv = snt.Conv2D(output_channels=3, kernel_shape=3)
conv(input_)
variable_map = snt.get_normalized_variable_map(conv)
self.assertEqual(len(variable_map), 2)
self.assertIn("w", variable_map)
self.assertIn("b", variable_map)
self.assertIs(variable_map["w"], conv.w)
self.assertIs(variable_map["b"], conv.b)
def testGetNormalizedVariableMapWithPartitionedVariable(self):
hidden = tf.ones(shape=(1, 16, 16, 3))
partitioner = tf.variable_axis_size_partitioner(4)
conv = snt.Conv2D(output_channels=3,
kernel_shape=3,
stride=1,
partitioners={"w": partitioner})
conv(hidden)
variable_map = snt.get_normalized_variable_map(conv,
group_sliced_variables=True)
self.assertEqual(len(variable_map), 2)
self.assertEqual(variable_map["b"], conv.b)
self.assertEqual(len(variable_map["w"]), 3)
variable_map = snt.get_normalized_variable_map(conv,
group_sliced_variables=False)
self.assertEqual(variable_map["b"], conv.b)
self.assertEqual(set(variable_map), set(["b", "w/part_0", "w/part_1",
"w/part_2"]))
def testVariableMapItems(self):
hidden = tf.ones(shape=(1, 16, 16, 3))
partitioner = tf.variable_axis_size_partitioner(4)
conv = snt.Conv2D(output_channels=3,
kernel_shape=3,
stride=1,
partitioners={"w": partitioner})
conv(hidden)
variable_map = snt.get_normalized_variable_map(conv)
items = snt.variable_map_items(variable_map)
items_str = sorted((key, var.op.name) for key, var in items)
self.assertEqual(
items_str,
[(u"b", u"conv_2d/b"), ("w", u"conv_2d/w/part_0"),
("w", u"conv_2d/w/part_1"), ("w", u"conv_2d/w/part_2")])
def testGetSaverScope(self):
with tf.variable_scope("prefix") as s1:
tf.get_variable("a", shape=[5, 6])
tf.get_variable("b", shape=[7])
saver = snt.get_saver(s1)
self.assertIsInstance(saver, tf.train.Saver)
self.assertEqual(set(saver._var_list), set(["a", "b"]))
def testGetSaverModule(self):
input_ = tf.placeholder(tf.float32, shape=[1, 10, 10, 3])
conv = snt.Conv2D(output_channels=3, kernel_shape=3)
conv(input_)
saver = snt.get_saver(conv)
self.assertIsInstance(saver, tf.train.Saver)
self.assertIn("w", saver._var_list)
self.assertIn("b", saver._var_list)
def _create_conv(self, partitioned, name):
hidden = tf.ones(shape=(1, 16, 16, 3))
if partitioned:
partitioners = {"w": tf.variable_axis_size_partitioner(4)}
else:
partitioners = None
conv = snt.Conv2D(output_channels=3, kernel_shape=3, stride=1,
partitioners=partitioners, name=name)
conv(hidden)
return conv
@parameterized.Parameters(
{"save_partitioned": True, "load_partitioned": True},
{"save_partitioned": True, "load_partitioned": False},
{"save_partitioned": False, "load_partitioned": True},
{"save_partitioned": False, "load_partitioned": False})
def testGetSaverPartitioned(self, save_partitioned, load_partitioned):
path = os.path.join(tempfile.mkdtemp(), "ckpt")
# Save checkpoint.
with self.test_session() as sess:
conv = self._create_conv(partitioned=save_partitioned, name="a")
saver = snt.get_saver(conv)
sess.run(tf.global_variables_initializer())
saver.save(sess, path)
w = tf.identity(conv.w)
w_value = sess.run(w)
# Restore checkpoint.
with self.test_session() as sess:
conv = self._create_conv(partitioned=load_partitioned, name="b")
saver = snt.get_saver(conv)
saver.restore(sess, path)
w = tf.identity(conv.w)
self.assertAllEqual(sess.run(w), w_value)
def testCollectionGetVariableInScope(self):
with tf.variable_scope("prefix") as s1:
tf.get_variable("a", shape=[1], collections=["test"], trainable=False)
self.assertEqual(len(snt.get_variables_in_scope(s1)), 0)
self.assertEqual(len(snt.get_variables_in_scope(s1, collection="test2")), 0)
self.assertEqual(len(snt.get_variables_in_scope(s1, collection="test")), 1)
def testCollectionGetSaver(self):
with tf.variable_scope("prefix") as s1:
input_ = tf.placeholder(tf.float32, shape=[3, 4])
net = snt.Linear(10)(input_)
net = snt.BatchNorm()(net, is_training=True)
saver1 = snt.get_saver(s1)
saver2 = snt.get_saver(s1, collections=(tf.GraphKeys.TRAINABLE_VARIABLES,))
self.assertIsInstance(saver1, tf.train.Saver)
self.assertIsInstance(saver2, tf.train.Saver)
self.assertEqual(len(saver1._var_list), 5)
self.assertIn("linear/w", saver1._var_list)
self.assertIn("linear/b", saver1._var_list)
self.assertIn("batch_norm/beta", saver1._var_list)
self.assertIn("batch_norm/moving_mean", saver1._var_list)
self.assertIn("batch_norm/moving_variance", saver1._var_list)
self.assertEqual(len(saver2._var_list), 3)
self.assertIn("linear/w", saver2._var_list)
self.assertIn("linear/b", saver2._var_list)
self.assertIn("batch_norm/beta", saver2._var_list)
self.assertNotIn("batch_norm/moving_mean", saver2._var_list)
self.assertNotIn("batch_norm/moving_variance", saver2._var_list)
def testCheckInitializers(self):
initializers = {"key_a": tf.truncated_normal_initializer(mean=0,
stddev=1),
"key_c": tf.truncated_normal_initializer(mean=0,
stddev=1)}
keys = ["key_a", "key_b"]
self.assertRaisesRegexp(KeyError,
"Invalid initializer keys.*",
snt.check_initializers,
initializers=initializers,
keys=keys)
del initializers["key_c"]
initializers["key_b"] = "not a function"
self.assertRaisesRegexp(TypeError,
"Initializer for.*",
snt.check_initializers,
initializers=initializers,
keys=keys)
initializers["key_b"] = {"key_c": "not a function"}
self.assertRaisesRegexp(TypeError,
"Initializer for.*",
snt.check_initializers,
initializers=initializers,
keys=keys)
initializers["key_b"] = {"key_c": tf.truncated_normal_initializer(mean=0,
stddev=1),
"key_d": tf.truncated_normal_initializer(mean=0,
stddev=1)}
snt.check_initializers(initializers=initializers, keys=keys)
def testCheckPartitioners(self):
partitioners = {"key_a": tf.variable_axis_size_partitioner(10),
"key_c": tf.variable_axis_size_partitioner(10)}
keys = ["key_a", "key_b"]
self.assertRaisesRegexp(KeyError,
"Invalid partitioner keys.*",
snt.check_partitioners,
partitioners=partitioners,
keys=keys)
del partitioners["key_c"]
partitioners["key_b"] = "not a function"
self.assertRaisesRegexp(TypeError,
"Partitioner for.*",
snt.check_partitioners,
partitioners=partitioners,
keys=keys)
partitioners["key_b"] = {"key_c": "not a function"}
self.assertRaisesRegexp(TypeError,
"Partitioner for.*",
snt.check_partitioners,
partitioners=partitioners,
keys=keys)
partitioners["key_b"] = {"key_c": tf.variable_axis_size_partitioner(10),
"key_d": tf.variable_axis_size_partitioner(10)}
snt.check_partitioners(partitioners=partitioners, keys=keys)
def testCheckRegularizers(self):
regularizers = {"key_a": tf.contrib.layers.l1_regularizer(scale=0.5),
"key_c": tf.contrib.layers.l2_regularizer(scale=0.5)}
keys = ["key_a", "key_b"]
self.assertRaisesRegexp(KeyError,
"Invalid regularizer keys.*",
snt.check_regularizers,
regularizers=regularizers,
keys=keys)
del regularizers["key_c"]
regularizers["key_b"] = "not a function"
self.assertRaisesRegexp(TypeError,
"Regularizer for.*",
snt.check_regularizers,
regularizers=regularizers,
keys=keys)
regularizers["key_b"] = {"key_c": "not a function"}
self.assertRaisesRegexp(TypeError,
"Regularizer for.*",
snt.check_regularizers,
regularizers=regularizers,
keys=keys)
regularizers["key_b"] = {
"key_c": tf.contrib.layers.l1_regularizer(scale=0.5),
"key_d": tf.contrib.layers.l2_regularizer(scale=0.5)}
snt.check_regularizers(regularizers=regularizers, keys=keys)
def testHasVariableScope(self):
self.assertFalse(snt.has_variable_scope("string"))
linear = snt.Linear(10)
self.assertTrue(snt.has_variable_scope(linear))
linear(tf.ones((10, 10)))
self.assertTrue(snt.has_variable_scope(linear))
def testFormatVariables(self):
with tf.variable_scope("m1"):
v1 = tf.get_variable("v1", shape=[3, 4])
with tf.device("/gpu"):
with tf.variable_scope("m2"):
v2 = tf.get_local_variable("v2", shape=[5, 6])
self.assertEqual(snt.format_variables([v2, v1]),
_EXPECTED_FORMATTED_VARIABLE_LIST)
def testFormatVariableMap(self):
with tf.variable_scope("m1"):
v1 = tf.get_variable("v1", shape=[3, 4])
with tf.device("/gpu"):
with tf.variable_scope("m2"):
v2 = tf.get_local_variable("v2", shape=[5, 6])
var_map = {"vv1": v1, "vv2": v2}
self.assertEqual(snt.format_variable_map(var_map),
_EXPECTED_FORMATTED_VARIABLE_MAP)
def testLogVariables(self):
tf.get_default_graph().add_to_collection("config", {"version": 1})
with tf.variable_scope("m1"):
tf.get_variable("v1", shape=[3, 4])
with tf.device("/gpu"):
with tf.variable_scope("m2"):
tf.get_local_variable("v2", shape=[5, 6])
snt.log_variables()
def testLogVariables_with_arg(self):
tf.get_default_graph().add_to_collection("config", {"version": 1})
with tf.variable_scope("m1"):
v1 = tf.get_variable("v1", shape=[3, 4])
with tf.device("/gpu"):
with tf.variable_scope("m2"):
v2 = tf.get_local_variable("v2", shape=[5, 6])
snt.log_variables([v2, v1])
class ReuseVarsTest(tf.test.TestCase):
class VariableContainer(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
@util.reuse_variables
def method_with_reuse(self):
return tf.get_variable("a", shape=[1])
def method_without_reuse(self):
return tf.get_variable("b", shape=[1])
class InheritedVariableContainer(VariableContainer):
@util.reuse_variables
def not_inherited_method_with_reuse(self):
return tf.get_variable("c", shape=[1])
def test_reuse_method(self):
obj1 = ReuseVarsTest.VariableContainer("scope1")
obj2 = ReuseVarsTest.VariableContainer("scope2")
self.assertEqual("b", obj1.method_without_reuse().op.name)
self.assertRaisesRegexp(ValueError,
r"Variable b already exists, disallowed.*",
obj1.method_without_reuse)
self.assertRaisesRegexp(ValueError,
r"Variable b already exists, disallowed.*",
obj2.method_without_reuse)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope2/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope2/a", obj2.method_with_reuse().op.name)
def test_multiple_objects_per_variable_scope(self):
obj1 = ReuseVarsTest.VariableContainer("scope1")
obj2 = ReuseVarsTest.VariableContainer("scope1")
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
def test_reuse_inherited_method(self):
obj1 = ReuseVarsTest.InheritedVariableContainer("scope1")
obj2 = ReuseVarsTest.InheritedVariableContainer("scope2")
self.assertEqual("b", obj1.method_without_reuse().op.name)
self.assertRaisesRegexp(ValueError,
r"Variable b already exists, disallowed.*",
obj1.method_without_reuse)
self.assertRaisesRegexp(ValueError,
r"Variable b already exists, disallowed.*",
obj2.method_without_reuse)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/c", obj1.not_inherited_method_with_reuse().op.name)
self.assertEqual("scope1/c", obj1.not_inherited_method_with_reuse().op.name)
self.assertEqual("scope2/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope2/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope2/c", obj2.not_inherited_method_with_reuse().op.name)
self.assertEqual("scope2/c", obj2.not_inherited_method_with_reuse().op.name)
def test_reuse_abstract_module(self):
class ModuleReuse(snt.AbstractModule):
def __init__(self, shape, name="multi_template_test"):
super(ModuleReuse, self).__init__(name=name)
self._shape = shape
@util.reuse_variables
def a(self):
return tf.get_variable("a", shape=self._shape)
@util.reuse_variables
def add_b(self, inputs):
return inputs + tf.get_variable("b", shape=self._shape)
def _build(self, inputs):
return self.add_b(inputs + self.a())
np.random.seed(100)
batch_size = 3
in_size = 4
inputs = tf.placeholder(tf.float32, shape=[batch_size, in_size])
module1 = ModuleReuse(inputs.get_shape().as_list())
module2 = ModuleReuse(inputs.get_shape().as_list())
a1 = module1.a()
inputs_plus_b1 = module1.add_b(inputs)
inputs_plus_ab1 = module1(inputs) # pylint: disable=not-callable
inputs_plus_ab2 = module2(inputs) # pylint: disable=not-callable
inputs_plus_b2 = module2.add_b(inputs)
a2 = module2.a()
inputs_plus_ab1_again = module1(inputs) # pylint: disable=not-callable
inputs_plus_ab2_again = module2(inputs) # pylint: disable=not-callable
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
input_data = np.random.rand(batch_size, in_size)
out = sess.run([a1, inputs_plus_b1, inputs_plus_ab1, a2, inputs_plus_b2,
inputs_plus_ab2],
feed_dict={inputs: input_data})
self.assertNotAlmostEqual(np.linalg.norm(out[0] - out[3]), 0)
self.assertNotAlmostEqual(np.linalg.norm(out[1] - out[4]), 0)
self.assertNotAlmostEqual(np.linalg.norm(out[2] - out[5]), 0)
self.assertAllClose(out[0] + out[1], out[2])
self.assertAllClose(out[3] + out[4], out[5])
out = sess.run([inputs_plus_ab1, inputs_plus_ab1_again],
feed_dict={inputs: input_data})
self.assertAllEqual(out[0], out[1])
out = sess.run([inputs_plus_ab2, inputs_plus_ab2_again],
feed_dict={inputs: input_data})
self.assertAllEqual(out[0], out[1])
def test_variable_scope_call_order(self):
class TestModule(snt.AbstractModule):
def __init__(self, name="test_module"):
super(TestModule, self).__init__(name=name)
@util.reuse_variables
def a(self):
return self.scope_name
def _build(self):
pass
@property
def variable_scope(self):
# Needed to access `self.variable_scope` before calling `self.build()`.
return self._template.variable_scope
m1 = TestModule(name="m1")
m2 = TestModule(name="m2")
a1 = m1.a
a2 = m2.a
self.assertEqual("m1", a1())
self.assertEqual("m2", a2())
def test_multiple_graphs(self):
g1 = tf.Graph()
g2 = tf.Graph()
with g1.as_default():
obj1 = ReuseVarsTest.VariableContainer("scope1")
obj2 = ReuseVarsTest.VariableContainer("scope1")
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
with g2.as_default():
obj1 = ReuseVarsTest.VariableContainer("scope1")
obj2 = ReuseVarsTest.VariableContainer("scope1")
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
def test_name_scopes(self):
class VariableContainerWithOps(ReuseVarsTest.VariableContainer):
@util.reuse_variables
def add_b(self, tensor):
b = tf.get_variable("b", shape=[1])
return tensor + b
@util.reuse_variables
def add_a(self, tensor):
return tensor + self.method_with_reuse()
@util.reuse_variables
def nested_add(self, tensor):
return tf.ones(shape=[1]) + self.add_a(tensor)
def get_tensor_names_from_default_graph():
ops = [
op for op in tf.get_default_graph().get_operations()
if "Initializer" not in op.name and "Assign" not in op.name and
"read" not in op.name
]
tensor_names = []
for op in ops:
tensor_names.extend(tensor.name for tensor in op.outputs)
return tensor_names
obj1 = VariableContainerWithOps("scope1")
obj2 = VariableContainerWithOps("scope2")
zeros = tf.zeros(shape=[1])
self.assertEqual("scope1/add_b/add", obj1.add_b(zeros).op.name)
self.assertEqual("scope1/add_b_1/add", obj1.add_b(zeros).op.name)
self.assertEqual("scope1/add_a/add", obj1.add_a(zeros).op.name)
self.assertEqual("scope1/add_a_1/add", obj1.add_a(zeros).op.name)
self.assertEqual("scope1/nested_add/add", obj1.nested_add(zeros).op.name)
self.assertEqual("scope1/nested_add_1/add", obj1.nested_add(zeros).op.name)
ones = tf.ones(shape=[1])
self.assertEqual("scope2/add_b/add", obj2.add_b(ones).op.name)
self.assertEqual("scope2/add_b_1/add", obj2.add_b(ones).op.name)
self.assertEqual("scope2/add_a/add", obj2.add_a(ones).op.name)
self.assertEqual("scope2/add_a_1/add", obj2.add_a(ones).op.name)
self.assertEqual("scope2/nested_add/add", obj2.nested_add(ones).op.name)
self.assertEqual("scope2/nested_add_1/add", obj2.nested_add(ones).op.name)
tensor_names = [
"zeros:0",
"scope1/b:0",
"scope1/add_b/add:0",
"scope1/add_b_1/add:0",
"scope1/a:0",
"scope1/add_a/add:0",
"scope1/add_a_1/add:0",
"scope1/nested_add/ones:0",
"scope1/add_a_2/add:0",
"scope1/nested_add/add:0",
"scope1/nested_add_1/ones:0",
"scope1/add_a_3/add:0",
"scope1/nested_add_1/add:0",
"ones:0",
"scope2/b:0",
"scope2/add_b/add:0",
"scope2/add_b_1/add:0",
"scope2/a:0",
"scope2/add_a/add:0",
"scope2/add_a_1/add:0",
"scope2/nested_add/ones:0",
"scope2/add_a_2/add:0",
"scope2/nested_add/add:0",
"scope2/nested_add_1/ones:0",
"scope2/add_a_3/add:0",
"scope2/nested_add_1/add:0",
]
self.assertEqual(tensor_names, get_tensor_names_from_default_graph())
class NameFunctionTest(tf.test.TestCase):
def testToSnakeCase(self):
test_cases = [
("UpperCamelCase", "upper_camel_case"),
("lowerCamelCase", "lower_camel_case"),
("endsWithXYZ", "ends_with_xyz"),
("already_snake_case", "already_snake_case"),
("__private__", "private"),
("LSTMModule", "lstm_module"),
("version123p56vfxObject", "version_123p56vfx_object"),
("version123P56VFXObject", "version_123p56vfx_object"),
("versionVFX123P56Object", "version_vfx123p56_object"),
("versionVfx123P56Object", "version_vfx_123p56_object"),
("lstm1", "lstm_1"),
("LSTM1", "lstm1"),
]
for camel_case, snake_case in test_cases:
actual = util.to_snake_case(camel_case)
self.assertEqual(actual, snake_case, "_to_snake_case(%s) -> %s != %s" %
(camel_case, actual, snake_case))
def testNameForCallable_Function(self):
def test():
pass
self.assertName(test, "test")
def testNameForCallable_Lambda(self):
test = lambda x: x
self.assertName(test, None)
def testNameForCallable_Partial(self):
def test(*unused_args):
pass
test = functools.partial(functools.partial(test, "a"), "b")
self.assertName(test, "test")
def testNameForCallable_Instance(self):
class Test(object):
def __call__(self):
pass
self.assertName(Test(), None)
def assertName(self, func, expected):
name = util.name_for_callable(func)
self.assertEqual(name, expected)
if __name__ == "__main__":
tf.test.main()
| mit | 1,781,703,247,022,419,500 | 36.758527 | 80 | 0.610073 | false |
Subsets and Splits