repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edudev/web-reply | main.py | 1 | 3741 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from gi.repository import GLib
from gwebsockets.server import Server
from gwebsockets.server import Message
_PORT = 8080
class WebClient(object):
def __init__(self, session):
self._session = session
self._session_id = None
def send_json_message(self, data):
self._session.send_message(json.dumps(data))
def send_raw_message(self, data):
self._session.send_message(data)
def get_session_id(self):
return self._session_id
def set_session_id(self, value):
self._session_id = value
class WebServer(object):
def __init__(self):
self._sessions = {}
self._server = Server()
self._server.connect('session-started', self._session_started_cb)
self._port = self._server.start(_PORT)
def _session_started_cb(self, server, session):
# perhaps reject non-sugar connections
# how do we know if a connection comes from sugar?
client = WebClient(session)
session.connect('handshake-completed',
self._handshake_completed_cb, client)
session.connect('message-received',
self._message_received_cb, client)
# maybe disconnect the signal handler once it is recieved
if session.is_ready():
self._add_client(session, client)
def _add_client(self, session, client):
url = session.get_headers().get('http_path')
# this should be of the form '/hub/sessionID'
if not url or not url.startswith('/hub/'):
return
session_id = url[5:]
client.set_session_id(session_id)
if session_id in self._sessions:
self._sessions[session_id].append(client)
else:
self._sessions[session_id] = [client]
client.send_json_message(
{'type': 'init-connection',
'peerCount': len(self._sessions[session_id])})
def _handshake_completed_cb(self, session, client):
self._add_client(session, client)
def _message_received_cb(self, session, message, source):
if message.message_type == Message.TYPE_BINARY:
# FIXME: how to handle this?
return
session_id = source.get_session_id()
if session_id is None:
# perhaps queue
return
dictionary = json.loads(message.data)
# TODO: be more strict with the protocol
for client in self._sessions[session_id]:
if client != source or dictionary.get('server-echo', False):
client.send_raw_message(message.data)
def _session_ended_cb(self, session, client):
# FIXME: this callback is not called at all
self._add_client(session, client)
session_id = client.get_session_id()
if session_id is None:
return
self._sessions[session_id].remove(client)
if not self._sessions[session_id]:
del self._sessions[session_id]
if __name__ == "__main__":
server = WebServer()
main_loop = GLib.MainLoop()
main_loop.run()
| gpl-3.0 | -6,929,323,172,212,185,000 | 30.436975 | 73 | 0.62764 | false | 4.070729 | false | false | false |
ondrokrc/gramps | gramps/gui/filters/sidebar/_placesidebarfilter.py | 1 | 6941 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2008 Gary Burton
# Copyright (C) 2010,2015 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# gtk
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from ... import widgets
from gramps.gen.lib import Place, PlaceType
from .. import build_filter_model
from . import SidebarFilter
from gramps.gen.filters import GenericFilterFactory, rules
from gramps.gen.filters.rules.place import (RegExpIdOf, HasData, IsEnclosedBy,
HasTag, HasNoteRegexp,
MatchesFilter)
GenericPlaceFilter = GenericFilterFactory('Place')
#-------------------------------------------------------------------------
#
# PlaceSidebarFilter class
#
#-------------------------------------------------------------------------
class PlaceSidebarFilter(SidebarFilter):
def __init__(self, dbstate, uistate, clicked):
self.clicked_func = clicked
self.filter_id = widgets.BasicEntry()
self.filter_name = widgets.BasicEntry()
self.filter_place = Place()
self.filter_place.set_type((PlaceType.CUSTOM, ''))
self.ptype = Gtk.ComboBox(has_entry=True)
self.place_menu = widgets.MonitoredDataType(
self.ptype,
self.filter_place.set_type,
self.filter_place.get_type)
self.filter_code = widgets.BasicEntry()
self.filter_enclosed = widgets.PlaceEntry(dbstate, uistate, [])
self.filter_note = widgets.BasicEntry()
self.filter_regex = Gtk.CheckButton(label=_('Use regular expressions'))
self.tag = Gtk.ComboBox()
self.generic = Gtk.ComboBox()
SidebarFilter.__init__(self, dbstate, uistate, "Place")
def create_widget(self):
cell = Gtk.CellRendererText()
cell.set_property('width', self._FILTER_WIDTH)
cell.set_property('ellipsize', self._FILTER_ELLIPSIZE)
self.generic.pack_start(cell, True)
self.generic.add_attribute(cell, 'text', 0)
self.on_filters_changed('Place')
cell = Gtk.CellRendererText()
cell.set_property('width', self._FILTER_WIDTH)
cell.set_property('ellipsize', self._FILTER_ELLIPSIZE)
self.tag.pack_start(cell, True)
self.tag.add_attribute(cell, 'text', 0)
self.add_text_entry(_('ID'), self.filter_id)
self.add_text_entry(_('Name'), self.filter_name)
self.add_entry(_('Type'), self.ptype)
self.add_text_entry(_('Code'), self.filter_code)
self.add_text_entry(_('Enclosed By'), self.filter_enclosed)
self.add_text_entry(_('Note'), self.filter_note)
self.add_entry(_('Tag'), self.tag)
self.add_filter_entry(_('Custom filter'), self.generic)
self.add_regex_entry(self.filter_regex)
def clear(self, obj):
self.filter_id.set_text('')
self.filter_name.set_text('')
self.filter_code.set_text('')
self.filter_enclosed.set_text('')
self.filter_note.set_text('')
self.ptype.get_child().set_text('')
self.tag.set_active(0)
self.generic.set_active(0)
def get_filter(self):
gid = str(self.filter_id.get_text()).strip()
name = str(self.filter_name.get_text()).strip()
ptype = self.filter_place.get_type().xml_str()
code = str(self.filter_code.get_text()).strip()
enclosed = str(self.filter_enclosed.get_text()).strip()
note = str(self.filter_note.get_text()).strip()
regex = self.filter_regex.get_active()
tag = self.tag.get_active() > 0
gen = self.generic.get_active() > 0
empty = not (gid or name or ptype or code or enclosed or note or regex
or tag or gen)
if empty:
generic_filter = None
else:
generic_filter = GenericPlaceFilter()
if gid:
rule = RegExpIdOf([gid], use_regex=regex)
generic_filter.add_rule(rule)
if enclosed:
rule = IsEnclosedBy([enclosed])
generic_filter.add_rule(rule)
rule = HasData([name, ptype, code], use_regex=regex)
generic_filter.add_rule(rule)
if note:
rule = HasNoteRegexp([note], use_regex=regex)
generic_filter.add_rule(rule)
# check the Tag
if tag:
model = self.tag.get_model()
node = self.tag.get_active_iter()
attr = model.get_value(node, 0)
rule = HasTag([attr])
generic_filter.add_rule(rule)
if self.generic.get_active() != 0:
model = self.generic.get_model()
node = self.generic.get_active_iter()
obj = str(model.get_value(node, 0))
rule = MatchesFilter([obj])
generic_filter.add_rule(rule)
return generic_filter
def on_filters_changed(self, name_space):
if name_space == 'Place':
all_filter = GenericPlaceFilter()
all_filter.set_name(_("None"))
all_filter.add_rule(rules.place.AllPlaces([]))
self.generic.set_model(build_filter_model('Place', [all_filter]))
self.generic.set_active(0)
def on_tags_changed(self, tag_list):
"""
Update the list of tags in the tag filter.
"""
model = Gtk.ListStore(str)
model.append(('',))
for tag_name in tag_list:
model.append((tag_name,))
self.tag.set_model(model)
self.tag.set_active(0)
| gpl-2.0 | 3,899,255,514,207,689,700 | 36.928962 | 79 | 0.550209 | false | 4.08775 | false | false | false |
tomschr/dbassembly | tests/conftest.py | 1 | 5064 | #
# Copyright (c) 2016 SUSE Linux GmbH. All rights reserved.
#
# This file is part of dbassembly.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact SUSE LLC.
#
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com
from functools import wraps
import pytest
from py.path import local
from dbassembly.core import NSMAP
from dbassembly.cli import parsecli
class raises(object): # pragma: no cover
"""
exception decorator as used in nose, tools/nontrivial.py
"""
def __init__(self, *exceptions):
self.exceptions = exceptions
self.valid = ' or '.join([e.__name__ for e in exceptions])
def __call__(self, func):
name = func.__name__
def newfunc(*args, **kw):
try:
func(*args, **kw)
except self.exceptions:
pass
except:
raise
else:
message = "%s() did not raise %s" % (name, self.valid)
raise AssertionError(message)
newfunc = wraps(func)(newfunc)
return newfunc
def xmldump(tree, indent=2):
"""Dump XML tree into hierarchical string
:param element: ElementTree or Element
:return: generator, yields strings
"""
for i, elem in enumerate(tree.iter()):
indstr=indent*" "
if elem.text is None or (not elem.text.strip()):
text = 'None'
else:
text = repr(elem.text.strip())
yield i*indstr + "%s = %s" % (elem.tag, text)
for attr in sorted(elem.attrib):
yield (i+1)*indstr+"* %s = %r" % (attr, elem.attrib[attr])
# ------------------------------------------------------
# Fixtures
#
@pytest.fixture
def docoptdict():
"""Fixture: creates a faked dictionary object from docopt.
:return: dictionary
:rtype: dict
"""
return parsecli(['foo.xml'])
# ------------------------------------------------------
# General
#
# http://pytest.org/latest/parametrize.html#basic-pytest-generate-tests-example
def casesdir():
"""Fixture: returns the "cases" directory relative to
'conftest.py'
:return: directory pointing to 'cases'
:rtype: :py:class:'py.path.local'
"""
return local(__file__).dirpath() / "cases"
def structdir():
"""Fixture: returns the "cases" directory relative to
'conftest.py'
:return: directory pointing to 'cases'
:rtype: :py:class:'py.path.local'
"""
return local(__file__).dirpath() / "struct"
def get_test_cases(testcasesdir,
casesxml='.case.xml',
patternout='.out.xml',
patternerr='.err.xml'):
"""Generator: yield name tuple of (casexmlfile, outputfile, errorfile)
:param str casesxml: file extension of XML case file
:param str patternout: file extension of output file
:param str patternerr: file extension of error file
"""
for case in testcasesdir:
b = case.basename
out = b.replace(casesxml, patternout)
err = b.replace(casesxml, patternerr)
out = case.new(basename=out)
err = case.new(basename=err)
yield (case, out, err)
def xmltestcase(metafunc, cases):
"""Compares .cases.xml files with .out.xml / .err.xml files
HINT: The out file has to be an *exact* output. Each spaces
is considered to be significant.
"""
testcases = cases.listdir('*.case.xml', sort=True)
# Create tuple of (original, outputfile, errorfile)
result = get_test_cases(testcases)
ids=[i.basename for i in testcases]
metafunc.parametrize("xmltestcase", result, ids=ids)
def xmlteststruct(metafunc, struct):
"""Compares .cases.xml files with .struct.xml / .err.xml files
"""
# cases = local(__file__).dirpath() / "cases"
testcases = struct.listdir('*.case.xml', sort=True)
# Create tuple of (original, outputfile, errorfile)
result = get_test_cases(testcases, patternout='.out.struct')
ids=[i.basename for i in testcases]
metafunc.parametrize("xmlteststruct", result, ids=ids)
def pytest_generate_tests(metafunc):
"""Generate testcases for all *.case.xml files.
"""
funcdict = dict(xmltestcase=[xmltestcase, casesdir()],
xmlteststruct=[xmlteststruct, structdir()],
)
if not metafunc.fixturenames:
return
func, subdir = funcdict.get(metafunc.fixturenames[0], [None, None])
if func is not None:
func(metafunc, subdir)
| gpl-3.0 | -6,572,018,554,139,390,000 | 29.142857 | 79 | 0.615916 | false | 3.968652 | true | false | false |
joeyac/JudgeServer | server/oj_hdu.py | 1 | 6321 | # -*- coding: utf-8 -*-
from utils import logger
from update_status import update_submission_status
from exception import VLoginFailed, VSubmitFailed
from bs4 import BeautifulSoup
import html5lib
import urllib, urllib2, cookielib
import time
class HDU:
# base information:
URL_HOME = 'http://acm.hdu.edu.cn/'
URL_LOGIN = URL_HOME + 'userloginex.php?action=login'
URL_SUBMIT = URL_HOME + 'submit.php?action=submit'
URL_STATUS = URL_HOME + 'status.php?'
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Ubuntu Chromium/52.0.2743.116 Chrome/52.0.2743.116 Safari/537.36',
'Origin': "http://acm.hdu.edu.cn",
'Host': "acm.hdu.edu.cn",
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
}
# result
INFO = ['Run ID', 'Submit Time', 'Judge Status', 'Pro.ID', 'Exe.Time', 'Exe.Memory', 'Code Len.', 'Language',
'Author']
# map to compatible result
# vid v_run_id v_submit_time status time memory length language v_user
MAP = {
'Run ID': 'v_run_id',
'Submit Time': 'v_submit_time',
'Judge Status': 'status',
'Pro.ID': 'vid',
'Exe.Time': 'time',
'Exe.Memory': 'memory',
'Code Len.': 'length',
'Language': 'language',
'Author': 'v_user',
}
# language
LANGUAGE = {
'G++': '0',
'GCC': '1',
'C++': '2',
'C': '3',
'PASCAL': '4',
'JAVA': '5',
'C#': '6',
}
def __init__(self, user_id, password):
self.user_id = user_id
self.password = password
self.problem_id = ''
self.run_id = ''
# 声明一个CookieJar对象实例来保存cookie
cookie = cookielib.CookieJar()
# 利用urllib2库的HTTPCookieProcessor对象来创建cookie处理器
handler = urllib2.HTTPCookieProcessor(cookie)
# 通过handler来构建opener
self.opener = urllib2.build_opener(handler)
# 此处的open方法同urllib2的urlopen方法,也可以传入request
def login(self):
data = dict(
username=self.user_id,
userpass=self.password,
login='Sign In'
)
try:
post_data = urllib.urlencode(data)
request = urllib2.Request(HDU.URL_LOGIN, post_data, HDU.headers)
response = self.opener.open(request).read()
if response.find('signout') > 0:
return True
else:
logger.warning("Login failed.")
return False
except:
logger.error("Login method error.")
return False
def submit(self, problem_id, language, src_code):
submit_data = dict(
problemid=problem_id,
language=HDU.LANGUAGE[language.upper()],
usercode=src_code,
check='0',
)
self.problem_id = problem_id
post_data = urllib.urlencode(submit_data)
try:
request = urllib2.Request(HDU.URL_SUBMIT, post_data, HDU.headers)
self.opener.open(request)
return True
except:
logger.info('Submit method error.')
return False
@staticmethod
def str2int(string):
if not string:
return 0
try:
return int(string[:-1])
except:
return int(string[:-2])
def result(self):
data = {
'first': '',
'pid': '',
'user': self.user_id,
}
if self.run_id:
data['first'] = self.run_id
if self.problem_id:
data['pid'] = self.problem_id
url = HDU.URL_STATUS + urllib.urlencode(data)
try:
request = urllib2.Request(url, '', HDU.headers)
page = self.opener.open(request, timeout=5)
soup = BeautifulSoup(page, 'html5lib')
table = soup.find('table', {'class': 'table_text'})
table_body = table.find('tbody')
rows = table_body.find_all('tr')
data = []
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols]) # No need:Get rid of empty values
if len(data) <= 1:
logger.warning('get result error!')
return False, {}
name = data[0]
latest = data[1]
if not self.run_id:
self.run_id = latest[0]
wait = ['queuing', 'compiling', 'running']
res = {}
for i in range(9):
res[HDU.MAP[name[i]]] = str(latest[i]).lower()
res['time'] = self.str2int(res['time'])
res['memory'] = self.str2int(res['memory'])
for i in range(3):
if res['status'] == wait[i]:
return False, res
return True, res
except Exception as e:
logger.error(e)
return False, {}
def hdu_submit(problem_id, language_name, src_code, ip=None, sid=None, username='USTBVJ', password='USTBVJ'):
hdu = HDU(username, password)
if hdu.login():
if hdu.submit(problem_id, language_name, src_code):
status, result = hdu.result()
while not status:
status, result = hdu.result()
if result and ip:
update_submission_status(ip, sid, result['status'])
time.sleep(2)
return result
else:
info = 'HDU [{pid},{lang},{sid}] submit error.'.format(pid=problem_id, lang=language_name, sid=sid)
logger.exception(info)
raise VSubmitFailed(info)
else:
info = 'HDU [{user},{sid}] login failed.'.format(user=username, sid=sid)
logger.exception(info)
raise VLoginFailed(info)
if __name__ == '__main__':
pid = 1000
lang = 'g++'
src = '''
#include<bits/stdc++.h>
using namespace std;
int main()
{
int a,b;
while(cin>>a>>b)cout<<a-b<<endl;
return 0;
}
'''
print hdu_submit(pid,lang,src)
| mit | 749,099,588,081,799,400 | 29.414634 | 113 | 0.517562 | false | 3.625 | false | false | false |
egnyte/python-egnyte | egnyte/tests/test_events.py | 1 | 1095 | from egnyte.tests.config import EgnyteTestCase
FOLDER_NAME = 'EVENT'
class TestEvents(EgnyteTestCase):
def setUp(self):
super(TestEvents, self).setUp()
self.root_folder.create()
def test_filter_poll(self):
events = self.egnyte.events
events = events.filter(events.oldest_event_id)
results = events.poll(count=1)
self.assertNotEqual(0, len(results), "Poll results should not be empty")
self.assertNotEqual(events.start_id, events.oldest_event_id,
"latest_event_id should have been bumped after non-empty poll")
def test_register_new_events(self):
folder = self.root_folder.folder(FOLDER_NAME).create()
events = self.egnyte.events
events = events.filter(events.latest_event_id - 1)
results = events.poll(count=1)
self.assertEqual(results[0].action_source, 'PublicAPI')
self.assertEqual(results[0].action, 'create')
self.assertEqual(results[0].data['target_path'], folder.path)
self.assertEqual(results[0].data['is_folder'], True)
| mit | -4,517,023,570,790,461,400 | 36.758621 | 91 | 0.655708 | false | 3.625828 | true | false | false |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactbroker/test_obligations.py | 1 | 1240 | from dataactbroker.handlers.submission_handler import get_submission_stats
from tests.unit.dataactcore.factories.job import SubmissionFactory
from tests.unit.dataactcore.factories.staging import TotalObligationsFactory
def test_obligation_stats_for_submission_nonzero(database):
submission = SubmissionFactory()
database.session.add(submission)
database.session.commit()
financials = [
TotalObligationsFactory(total_obligations=5000, total_proc_obligations=2000, total_asst_obligations=3000,
submission_id=submission.submission_id)
]
database.session.add_all(financials)
database.session.commit()
assert get_submission_stats(submission.submission_id) == {
"total_obligations": 5000,
"total_procurement_obligations": 2000,
"total_assistance_obligations": 3000
}
def test_obligation_stats_for_submission_zero(database):
submission = SubmissionFactory()
# no financials in db
database.session.add(submission)
database.session.commit()
assert get_submission_stats(submission.submission_id) == {
"total_obligations": 0,
"total_procurement_obligations": 0,
"total_assistance_obligations": 0
}
| cc0-1.0 | 4,771,918,235,712,121,000 | 36.575758 | 113 | 0.718548 | false | 3.780488 | false | false | false |
ericblau/ipf-xsede | ipf/run_workflow.py | 1 | 4459 |
###############################################################################
# Copyright 2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import logging
import logging.config
import optparse
import os
import sys
min_version = (3,6)
max_version = (3,9)
if sys.version_info < min_version or sys.version_info > max_version:
print(sys.stderr,"Python version 3.6 or newer is required")
sys.exit(1)
from ipf.daemon import OneProcessWithRedirect,Daemon
from ipf.engine import WorkflowEngine
from ipf.paths import *
#######################################################################################################################
logging.config.fileConfig(os.path.join(IPF_ETC_PATH,"logging.conf"))
#######################################################################################################################
class WorkflowDaemon(Daemon):
def __init__(self, workflow_path):
self.workflow_path = workflow_path
(path,workflow_filename) = os.path.split(workflow_path)
name = workflow_filename.split(".")[0]
Daemon.__init__(self,
pidfile=os.path.join(IPF_VAR_PATH,name+".pid"),
stdout=os.path.join(IPF_LOG_PATH,name+".log"),
stderr=os.path.join(IPF_LOG_PATH,name+".log"))
def run(self):
engine = WorkflowEngine()
engine.run(self.workflow_path)
#######################################################################################################################
class OneWorkflowOnly(OneProcessWithRedirect):
def __init__(self, workflow_path):
self.workflow_path = workflow_path
(path,workflow_filename) = os.path.split(workflow_path)
name = workflow_filename.split(".")[0]
OneProcessWithRedirect.__init__(self,
pidfile=os.path.join(IPF_VAR_PATH,name+".pid"),
stdout=os.path.join(IPF_LOG_PATH,name+".log"),
stderr=os.path.join(IPF_LOG_PATH,name+".log"))
def run(self):
engine = WorkflowEngine()
engine.run(self.workflow_path)
#######################################################################################################################
def main():
usage = "Usage: %prog [options] <workflow file>"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-d","--daemon",action="store_true",default=False,dest="daemon",
help="run as a daemon")
parser.add_option("-c","--cron",action="store_true",default=False,dest="cron",
help="running out of cron")
(options, args) = parser.parse_args()
if options.daemon and options.cron:
parser.error("can't run as both daemon and cron")
if len(args) != 1:
parser.error("exactly one positional argument expected - a path to a workflow file")
if options.daemon:
daemon = WorkflowDaemon(args[0])
daemon.start()
elif options.cron:
# don't let processes pile up if workflows aren't finishing
workflow = OneWorkflowOnly(args[0])
workflow.start()
else:
engine = WorkflowEngine()
engine.run(args[0])
#######################################################################################################################
if __name__ == "__main__":
main()
| apache-2.0 | 7,934,452,383,603,104,000 | 42.291262 | 119 | 0.462884 | false | 4.852013 | false | false | false |
Statoil/libecl | python/ecl/grid/ecl_grid.py | 1 | 58619 | # Copyright (C) 2011 Equinor ASA, Norway.
#
# The file 'ecl_grid.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module to load and query ECLIPSE GRID/EGRID files.
The ecl_grid module contains functionality to load and query an
ECLIPSE grid file; it is currently not possible to manipulate or let
alone create a grid with ecl_grid module. The functionality is
implemented in the EclGrid class. The ecl_grid module is a thin
wrapper around the ecl_grid.c implementation from the libecl library.
"""
import ctypes
import warnings
import numpy
import pandas
import sys
import os.path
import math
import itertools
from cwrap import CFILE, BaseCClass, load, open as copen
from ecl import EclPrototype
from ecl.util.util import monkey_the_camel
from ecl.util.util import IntVector
from ecl import EclDataType, EclUnitTypeEnum, EclTypeEnum
from ecl.eclfile import EclKW, FortIO
from ecl.grid import Cell
class EclGrid(BaseCClass):
"""
Class for loading and internalizing ECLIPSE GRID/EGRID files.
"""
TYPE_NAME = "ecl_grid"
_fread_alloc = EclPrototype("void* ecl_grid_load_case__(char*, bool)", bind = False)
_grdecl_create = EclPrototype("ecl_grid_obj ecl_grid_alloc_GRDECL_kw(int, int, int, ecl_kw, ecl_kw, ecl_kw, ecl_kw)", bind = False)
_alloc_rectangular = EclPrototype("ecl_grid_obj ecl_grid_alloc_rectangular(int, int, int, double, double, double, int*)", bind = False)
_exists = EclPrototype("bool ecl_grid_exists(char*)", bind = False)
_get_numbered_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_lgr_from_lgr_nr(ecl_grid, int)")
_get_named_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_lgr(ecl_grid, char*)")
_get_cell_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_cell_lgr1(ecl_grid, int)")
_num_coarse_groups = EclPrototype("int ecl_grid_get_num_coarse_groups(ecl_grid)")
_in_coarse_group1 = EclPrototype("bool ecl_grid_cell_in_coarse_group1(ecl_grid, int)")
_free = EclPrototype("void ecl_grid_free(ecl_grid)")
_get_nx = EclPrototype("int ecl_grid_get_nx(ecl_grid)")
_get_ny = EclPrototype("int ecl_grid_get_ny(ecl_grid)")
_get_nz = EclPrototype("int ecl_grid_get_nz(ecl_grid)")
_get_global_size = EclPrototype("int ecl_grid_get_global_size(ecl_grid)")
_get_active = EclPrototype("int ecl_grid_get_active_size(ecl_grid)")
_get_active_fracture = EclPrototype("int ecl_grid_get_nactive_fracture(ecl_grid)")
_get_name = EclPrototype("char* ecl_grid_get_name(ecl_grid)")
_ijk_valid = EclPrototype("bool ecl_grid_ijk_valid(ecl_grid, int, int, int)")
_get_active_index3 = EclPrototype("int ecl_grid_get_active_index3(ecl_grid, int, int, int)")
_get_global_index3 = EclPrototype("int ecl_grid_get_global_index3(ecl_grid, int, int, int)")
_get_active_index1 = EclPrototype("int ecl_grid_get_active_index1(ecl_grid, int)")
_get_active_fracture_index1 = EclPrototype("int ecl_grid_get_active_fracture_index1(ecl_grid, int)")
_get_global_index1A = EclPrototype("int ecl_grid_get_global_index1A(ecl_grid, int)")
_get_global_index1F = EclPrototype("int ecl_grid_get_global_index1F(ecl_grid, int)")
_get_ijk1 = EclPrototype("void ecl_grid_get_ijk1(ecl_grid, int, int*, int*, int*)")
_get_ijk1A = EclPrototype("void ecl_grid_get_ijk1A(ecl_grid, int, int*, int*, int*)")
_get_xyz3 = EclPrototype("void ecl_grid_get_xyz3(ecl_grid, int, int, int, double*, double*, double*)")
_get_xyz1 = EclPrototype("void ecl_grid_get_xyz1(ecl_grid, int, double*, double*, double*)")
_get_cell_corner_xyz1 = EclPrototype("void ecl_grid_get_cell_corner_xyz1(ecl_grid, int, int, double*, double*, double*)")
_get_corner_xyz = EclPrototype("void ecl_grid_get_corner_xyz(ecl_grid, int, int, int, double*, double*, double*)")
_get_xyz1A = EclPrototype("void ecl_grid_get_xyz1A(ecl_grid, int, double*, double*, double*)")
_get_ij_xy = EclPrototype("bool ecl_grid_get_ij_from_xy(ecl_grid, double, double, int, int*, int*)")
_get_ijk_xyz = EclPrototype("int ecl_grid_get_global_index_from_xyz(ecl_grid, double, double, double, int)")
_cell_contains = EclPrototype("bool ecl_grid_cell_contains_xyz1(ecl_grid, int, double, double, double)")
_cell_regular = EclPrototype("bool ecl_grid_cell_regular1(ecl_grid, int)")
_num_lgr = EclPrototype("int ecl_grid_get_num_lgr(ecl_grid)")
_has_numbered_lgr = EclPrototype("bool ecl_grid_has_lgr_nr(ecl_grid, int)")
_has_named_lgr = EclPrototype("bool ecl_grid_has_lgr(ecl_grid, char*)")
_grid_value = EclPrototype("double ecl_grid_get_property(ecl_grid, ecl_kw, int, int, int)")
_get_cell_volume = EclPrototype("double ecl_grid_get_cell_volume1(ecl_grid, int)")
_get_cell_thickness = EclPrototype("double ecl_grid_get_cell_thickness1(ecl_grid, int)")
_get_cell_dx = EclPrototype("double ecl_grid_get_cell_dx1(ecl_grid, int)")
_get_cell_dy = EclPrototype("double ecl_grid_get_cell_dy1(ecl_grid, int)")
_get_depth = EclPrototype("double ecl_grid_get_cdepth1(ecl_grid, int)")
_fwrite_grdecl = EclPrototype("void ecl_grid_grdecl_fprintf_kw(ecl_grid, ecl_kw, char*, FILE, double)")
_load_column = EclPrototype("void ecl_grid_get_column_property(ecl_grid, ecl_kw, int, int, double_vector)")
_get_top = EclPrototype("double ecl_grid_get_top2(ecl_grid, int, int)")
_get_top1A = EclPrototype("double ecl_grid_get_top1A(ecl_grid, int)")
_get_bottom = EclPrototype("double ecl_grid_get_bottom2(ecl_grid, int, int)")
_locate_depth = EclPrototype("int ecl_grid_locate_depth(ecl_grid, double, int, int)")
_invalid_cell = EclPrototype("bool ecl_grid_cell_invalid1(ecl_grid, int)")
_valid_cell = EclPrototype("bool ecl_grid_cell_valid1(ecl_grid, int)")
_get_distance = EclPrototype("void ecl_grid_get_distance(ecl_grid, int, int, double*, double*, double*)")
_fprintf_grdecl2 = EclPrototype("void ecl_grid_fprintf_grdecl2(ecl_grid, FILE, ecl_unit_enum) ")
_fwrite_GRID2 = EclPrototype("void ecl_grid_fwrite_GRID2(ecl_grid, char*, ecl_unit_enum)")
_fwrite_EGRID2 = EclPrototype("void ecl_grid_fwrite_EGRID2(ecl_grid, char*, ecl_unit_enum)")
_equal = EclPrototype("bool ecl_grid_compare(ecl_grid, ecl_grid, bool, bool)")
_dual_grid = EclPrototype("bool ecl_grid_dual_grid(ecl_grid)")
_init_actnum = EclPrototype("void ecl_grid_init_actnum_data(ecl_grid, int*)")
_compressed_kw_copy = EclPrototype("void ecl_grid_compressed_kw_copy(ecl_grid, ecl_kw, ecl_kw)")
_global_kw_copy = EclPrototype("void ecl_grid_global_kw_copy(ecl_grid, ecl_kw, ecl_kw)")
_create_volume_keyword = EclPrototype("ecl_kw_obj ecl_grid_alloc_volume_kw(ecl_grid, bool)")
_use_mapaxes = EclPrototype("bool ecl_grid_use_mapaxes(ecl_grid)")
_export_coord = EclPrototype("ecl_kw_obj ecl_grid_alloc_coord_kw(ecl_grid)")
_export_zcorn = EclPrototype("ecl_kw_obj ecl_grid_alloc_zcorn_kw(ecl_grid)")
_export_actnum = EclPrototype("ecl_kw_obj ecl_grid_alloc_actnum_kw(ecl_grid)")
_export_mapaxes = EclPrototype("ecl_kw_obj ecl_grid_alloc_mapaxes_kw(ecl_grid)")
_get_unit_system = EclPrototype("ecl_unit_enum ecl_grid_get_unit_system(ecl_grid)")
_export_index_frame = EclPrototype("void ecl_grid_export_index(ecl_grid, int*, int*, bool)")
_export_data_as_int = EclPrototype("void ecl_grid_export_data_as_int(int, int*, ecl_kw, int*)", bind = False)
_export_data_as_double = EclPrototype("void ecl_grid_export_data_as_double(int, int*, ecl_kw, double*)", bind = False)
_export_volume = EclPrototype("void ecl_grid_export_volume(ecl_grid, int, int*, double*)")
_export_position = EclPrototype("void ecl_grid_export_position(ecl_grid, int, int*, double*)")
_export_corners = EclPrototype("void export_corners(ecl_grid, int, int*, double*)")
@classmethod
def load_from_grdecl(cls, filename):
"""Will create a new EclGrid instance from grdecl file.
This function will scan the input file @filename and look for
the keywords required to build a grid. The following keywords
are required:
SPECGRID ZCORN COORD
In addition the function will look for and use the ACTNUM and
MAPAXES keywords if they are found; if ACTNUM is not found all
cells are assumed to be active.
Slightly more exotic grid concepts like dual porosity, NNC
mapping, LGR and coarsened cells will be completely ignored;
if you need such concepts you must have an EGRID file and use
the default EclGrid() constructor - that is also considerably
faster.
"""
if os.path.isfile(filename):
with copen(filename) as f:
specgrid = EclKW.read_grdecl(f, "SPECGRID", ecl_type=EclDataType.ECL_INT, strict=False)
zcorn = EclKW.read_grdecl(f, "ZCORN")
coord = EclKW.read_grdecl(f, "COORD")
try:
actnum = EclKW.read_grdecl(f, "ACTNUM", ecl_type=EclDataType.ECL_INT)
except ValueError:
actnum = None
try:
mapaxes = EclKW.read_grdecl(f, "MAPAXES")
except ValueError:
mapaxes = None
return EclGrid.create(specgrid, zcorn, coord, actnum, mapaxes)
else:
raise IOError("No such file:%s" % filename)
@classmethod
def load_from_file(cls, filename):
"""
Will inspect the @filename argument and create a new EclGrid instance.
"""
if FortIO.isFortranFile(filename):
return EclGrid(filename)
else:
return EclGrid.loadFromGrdecl(filename)
@classmethod
def create(cls, specgrid, zcorn, coord, actnum, mapaxes=None):
"""
Create a new grid instance from existing keywords.
This is a class method which can be used to create an EclGrid
instance based on the EclKW instances @specgrid, @zcorn,
@coord and @actnum. An ECLIPSE EGRID file contains the
SPECGRID, ZCORN, COORD and ACTNUM keywords, so a somewhat
involved way to create a EclGrid instance could be:
file = ecl.EclFile("ECLIPSE.EGRID")
specgrid_kw = file.iget_named_kw("SPECGRID", 0)
zcorn_kw = file.iget_named_kw("ZCORN", 0)
coord_kw = file.iget_named_kw("COORD", 0)
actnum_kw = file.iget_named_kw("ACTNUM", 0)
grid = EclGrid.create(specgrid_kw, zcorn_kw, coord_kw, actnum_kw)
If you are so inclined ...
"""
return cls._grdecl_create(specgrid[0], specgrid[1], specgrid[2], zcorn, coord, actnum, mapaxes)
@classmethod
def create_rectangular(cls, dims, dV, actnum=None):
"""
Will create a new rectangular grid. @dims = (nx,ny,nz) @dVg = (dx,dy,dz)
With the default value @actnum == None all cells will be active,
"""
warnings.warn("EclGrid.createRectangular is deprecated. " +
"Please used the similar method in EclGridGenerator!",
DeprecationWarning)
if actnum is None:
ecl_grid = cls._alloc_rectangular(dims[0], dims[1], dims[2], dV[0], dV[1], dV[2], None)
else:
if not isinstance(actnum, IntVector):
tmp = IntVector(initial_size=len(actnum))
for (index, value) in enumerate(actnum):
tmp[index] = value
actnum = tmp
if not len(actnum) == dims[0] * dims[1] * dims[2]:
raise ValueError("ACTNUM size mismatch: len(ACTNUM):%d Expected:%d" % (len(actnum), dims[0] * dims[1] * dims[2]))
ecl_grid = cls._alloc_rectangular(dims[0], dims[1], dims[2], dV[0], dV[1], dV[2], actnum.getDataPtr())
# If we have not succeeded in creatin the grid we *assume* the
# error is due to a failed malloc.
if ecl_grid is None:
raise MemoryError("Failed to allocated regualar grid")
return ecl_grid
def __init__(self, filename, apply_mapaxes=True):
"""
Will create a grid structure from an EGRID or GRID file.
"""
c_ptr = self._fread_alloc(filename, apply_mapaxes)
if c_ptr:
super(EclGrid, self).__init__(c_ptr)
else:
raise IOError("Loading grid from:%s failed" % filename)
def free(self):
self._free()
def _nicename(self):
"""name is often full path to grid, if so, output basename, else name"""
name = self.getName()
if os.path.isfile(name):
name = os.path.basename(name)
return name
def __repr__(self):
"""Returns, e.g.:
EclGrid("NORNE_ATW2013.EGRID", 46x112x22, global_size=113344, active_size=44431) at 0x28c4a70
"""
name = self._nicename()
if name:
name = '"%s", ' % name
g_size = self.getGlobalSize()
a_size = self.getNumActive()
xyz_s = '%dx%dx%d' % (self.getNX(),self.getNY(),self.getNZ())
return self._create_repr('%s%s, global_size=%d, active_size=%d' % (name, xyz_s, g_size, a_size))
def __len__(self):
"""
len(grid) wil return the total number of cells.
"""
return self._get_global_size()
def equal(self, other, include_lgr=True, include_nnc=False, verbose=False):
"""
Compare the current grid with the other grid.
"""
if not isinstance(other, EclGrid):
raise TypeError("The other argument must be an EclGrid instance")
return self._equal(other, include_lgr, include_nnc, verbose)
def dual_grid(self):
"""Is this grid dual porosity model?"""
return self._dual_grid()
def get_dims(self):
"""A tuple of four elements: (nx, ny, nz, nactive)."""
return (self.getNX(),
self.getNY(),
self.getNZ(),
self.getNumActive())
@property
def nx(self):
return self._get_nx()
def get_nx(self):
""" The number of elements in the x direction"""
return self._get_nx()
@property
def ny(self):
return self._get_ny()
def get_ny(self):
""" The number of elements in the y direction"""
return self._get_ny()
@property
def nz(self):
return self._get_nz()
def get_nz(self):
""" The number of elements in the z direction"""
return self._get_nz()
def get_global_size(self):
"""Returns the total number of cells in this grid"""
return self._get_global_size()
def get_num_active(self):
"""The number of active cells in the grid."""
return self._get_active()
def get_num_active_fracture(self):
"""The number of active cells in the grid."""
return self._get_active_fracture()
def get_bounding_box_2d(self, layer=0, lower_left=None, upper_right=None):
if 0 <= layer <= self.getNZ():
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
if lower_left is None:
i1 = 0
j1 = 0
else:
i1,j1 = lower_left
if not 0 < i1 < self.getNX():
raise ValueError("lower_left i coordinate invalid")
if not 0 < j1 < self.getNY():
raise ValueError("lower_left j coordinate invalid")
if upper_right is None:
i2 = self.getNX()
j2 = self.getNY()
else:
i2,j2 = upper_right
if not 1 < i2 <= self.getNX():
raise ValueError("upper_right i coordinate invalid")
if not 1 < j2 <= self.getNY():
raise ValueError("upper_right j coordinate invalid")
if not i1 < i2:
raise ValueError("Must have lower_left < upper_right")
if not j1 < j2:
raise ValueError("Must have lower_left < upper_right")
self._get_corner_xyz(i1, j1, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p0 = (x.value, y.value)
self._get_corner_xyz(i2, j1, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p1 = (x.value, y.value )
self._get_corner_xyz( i2, j2, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p2 = (x.value, y.value )
self._get_corner_xyz(i1, j2, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p3 = (x.value, y.value )
return (p0,p1,p2,p3)
else:
raise ValueError("Invalid layer value:%d Valid range: [0,%d]" % (layer, self.getNZ()))
def get_name(self):
"""
Name of the current grid, returns a string.
For the main grid this is the filename given to the
constructor when loading the grid; for an LGR this is the name
of the LGR. If the grid instance has been created with the
create() classmethod this can be None.
"""
n = self._get_name()
return str(n) if n else ''
def cell(self, global_index=None, active_index=None, i=None, j=None, k=None):
if global_index is not None:
return Cell(self, global_index)
if active_index is not None:
return Cell(self, self.global_index(active_index=active_index))
if i is not None:
return Cell(self, self.global_index(ijk=(i,j,k)))
def __getitem__(self, global_index):
if isinstance(global_index, tuple):
i,j,k = global_index
return self.cell(i=i, j=j, k=k)
return self.cell(global_index=global_index)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def cells(self, active=False):
"""Iterator over all the (active) cells"""
if not active:
for c in self:
yield c
else:
for i in range(self.get_num_active()):
yield self.cell(active_index=i)
def global_index(self, active_index=None, ijk=None):
"""
Will convert either active_index or (i,j,k) to global index.
"""
return self.__global_index(active_index=active_index, ijk=ijk)
def __global_index(self, active_index=None, global_index=None, ijk=None):
"""
Will convert @active_index or @ijk to global_index.
This method will convert @active_index or @ijk to a global
index. Exactly one of the arguments @active_index,
@global_index or @ijk must be supplied.
The method is used extensively internally in the EclGrid
class; most methods which take coordinate input pass through
this method to normalize the coordinate representation.
"""
set_count = 0
if not active_index is None:
set_count += 1
if not global_index is None:
set_count += 1
if ijk:
set_count += 1
if not set_count == 1:
raise ValueError("Exactly one of the kewyord arguments active_index, global_index or ijk must be set")
if not active_index is None:
global_index = self._get_global_index1A( active_index)
elif ijk:
nx = self.getNX()
ny = self.getNY()
nz = self.getNZ()
i,j,k = ijk
if not 0 <= i < nx:
raise IndexError("Invalid value i:%d Range: [%d,%d)" % (i, 0, nx))
if not 0 <= j < ny:
raise IndexError("Invalid value j:%d Range: [%d,%d)" % (j, 0, ny))
if not 0 <= k < nz:
raise IndexError("Invalid value k:%d Range: [%d,%d)" % (k, 0, nz))
global_index = self._get_global_index3(i,j,k)
else:
if not 0 <= global_index < self.getGlobalSize():
raise IndexError("Invalid value global_index:%d Range: [%d,%d)" % (global_index, 0, self.getGlobalSize()))
return global_index
def get_active_index(self, ijk=None, global_index=None):
"""
Lookup active index based on ijk or global index.
Will determine the active_index of a cell, based on either
@ijk = (i,j,k) or @global_index. If the cell specified by the
input arguments is not active the function will return -1.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk)
return self._get_active_index1(gi)
def get_active_fracture_index(self, ijk=None, global_index=None):
"""
For dual porosity - get the active fracture index.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk)
return self._get_active_fracture_index1(gi)
def get_global_index1F(self, active_fracture_index):
"""
Will return the global index corresponding to active fracture index.
"""
return self._get_global_index1F(active_fracture_index)
def cell_invalid(self, ijk=None, global_index=None, active_index=None):
"""
Tries to check if a cell is invalid.
Cells which are used to represent numerical aquifers are
typically located in UTM position (0,0); these cells have
completely whacked up shape and size, and should **NOT** be
used in calculations involving real world coordinates. To
protect against this a heuristic is used identify such cells
and mark them as invalid. There might be other sources than
numerical aquifers to this problem.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk, active_index=active_index)
return self._invalid_cell(gi)
def valid_cell_geometry(self, ijk=None, global_index=None, active_index=None):
"""Checks if the cell has valid geometry.
There are at least two reasons why a cell might have invalid
gemetry:
1. In the case of GRID files it is not necessary to supply
the geometry for all the cells; in that case this
function will return false for cells which do not have
valid coordinates.
2. Cells which are used to represent numerical aquifers are
typically located in UTM position (0,0); these cells have
completely whacked up shape and size; these cells are
identified by a heuristic - which might fail
If the validCellGeometry() returns false for a particular
cell functions which calculate cell volumes, real world
coordinates and so on - should not be used.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk, active_index=active_index)
return self._valid_cell(gi)
def active(self, ijk=None, global_index=None):
"""
Is the cell active?
See documentation og get_xyz() for explanation of parameters
@ijk and @global_index.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk)
active_index = self._get_active_index1(gi)
if active_index >= 0:
return True
else:
return False
def get_global_index(self, ijk=None, active_index=None):
"""
Lookup global index based on ijk or active index.
"""
gi = self.__global_index(active_index=active_index, ijk=ijk)
return gi
def get_ijk(self, active_index=None, global_index=None):
"""
Lookup (i,j,k) for a cell, based on either active index or global index.
The return value is a tuple with three elements (i,j,k).
"""
i = ctypes.c_int()
j = ctypes.c_int()
k = ctypes.c_int()
gi = self.__global_index(active_index=active_index, global_index=global_index)
self._get_ijk1(gi, ctypes.byref(i), ctypes.byref(j), ctypes.byref(k))
return (i.value, j.value, k.value)
def get_xyz(self, active_index=None, global_index=None, ijk=None):
"""
Find true position of cell center.
Will return world position of the center of a cell in the
grid. The return value is a tuple of three elements:
(utm_x, utm_y, depth).
The cells of a grid can be specified in three different ways:
(i,j,k) : As a tuple of i,j,k values.
global_index : A number in the range [0,nx*ny*nz). The
global index is related to (i,j,k) as:
global_index = i + j*nx + k*nx*ny
active_index : A number in the range [0,nactive).
For many of the EclGrid methods a cell can be specified using
any of these three methods. Observe that one and only method is
allowed:
OK:
pos1 = grid.get_xyz(active_index=100)
pos2 = grid.get_xyz(ijk=(10,20,7))
Crash and burn:
pos3 = grid.get_xyz(ijk=(10,20,7), global_index=10)
pos4 = grid.get_xyz()
All the indices in the EclGrid() class are zero offset, this
is in contrast to ECLIPSE which has an offset 1 interface.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_xyz1(gi, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
return (x.value, y.value, z.value)
def get_node_pos(self, i, j, k):
"""Will return the (x,y,z) for the node given by (i,j,k).
Observe that this method does not consider cells, but the
nodes in the grid. This means that the valid input range for
i,j and k are are upper end inclusive. To get the four
bounding points of the lower layer of the grid:
p0 = grid.getNodePos(0, 0, 0)
p1 = grid.getNodePos(grid.getNX(), 0, 0)
p2 = grid.getNodePos(0, grid.getNY(), 0)
p3 = grid.getNodePos(grid.getNX(), grid.getNY(), 0)
"""
if not 0 <= i <= self.getNX():
raise IndexError("Invalid I value:%d - valid range: [0,%d]" % (i, self.getNX()))
if not 0 <= j <= self.getNY():
raise IndexError("Invalid J value:%d - valid range: [0,%d]" % (j, self.getNY()))
if not 0 <= k <= self.getNZ():
raise IndexError("Invalid K value:%d - valid range: [0,%d]" % (k, self.getNZ()))
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_corner_xyz(i,j,k, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
return (x.value, y.value, z.value)
def get_cell_corner(self, corner_nr, active_index=None, global_index=None, ijk=None):
"""
Will look up xyz of corner nr @corner_nr
lower layer: upper layer
2---3 6---7
| | | |
0---1 4---5
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_cell_corner_xyz1(gi, corner_nr, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
return (x.value, y.value, z.value)
def get_node_xyz(self, i,j,k):
"""
This function returns the position of Vertex (i,j,k).
The coordinates are in the inclusive interval [0,nx] x [0,ny] x [0,nz].
"""
nx = self.getNX()
ny = self.getNY()
nz = self.getNZ()
corner = 0
if i == nx:
i -= 1
corner += 1
if j == ny:
j -= 1
corner += 2
if k == nz:
k -= 1
corner += 4
if self._ijk_valid(i, j, k):
return self.getCellCorner(corner, global_index=i + j*nx + k*nx*ny)
else:
raise IndexError("Invalid coordinates: (%d,%d,%d) " % (i,j,k))
def get_layer_xyz(self, xy_corner, layer):
nx = self.getNX()
(j, i) = divmod(xy_corner, nx + 1)
k = layer
return self.getNodeXYZ(i,j,k)
def distance(self, global_index1, global_index2):
dx = ctypes.c_double()
dy = ctypes.c_double()
dz = ctypes.c_double()
self._get_distance(global_index1, global_index2, ctypes.byref(dx), ctypes.byref(dy), ctypes.byref(dz))
return (dx.value, dy.value, dz.value)
def depth(self, active_index=None, global_index=None, ijk=None):
"""
Depth of the center of a cell.
Returns the depth of the center of the cell given by
@active_index, @global_index or @ijk. See method get_xyz() for
documentation of @active_index, @global_index and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._get_depth( gi)
def top(self, i, j):
"""
Top of the reservoir; in the column (@i, @j).
Returns average depth of the four top corners.
"""
return self._get_top(i, j)
def top_active(self, i, j):
"""
Top of the active part of the reservoir; in the column (@i, @j).
Raises ValueError if (i,j) column is inactive.
"""
for k in range(self.getNZ()):
a_idx = self.get_active_index(ijk=(i,j,k))
if a_idx >= 0:
return self._get_top1A(a_idx)
raise ValueError('No active cell in column (%d,%d)' % (i,j))
def bottom(self, i, j):
"""
Bottom of the reservoir; in the column (@i, @j).
"""
return self._get_bottom( i, j)
def locate_depth(self, depth, i, j):
"""
Will locate the k value of cell containing specified depth.
Will scan through the grid column specified by the input
arguments @i and @j and search for a cell containing the depth
given by input argument @depth. The return value is the k
value of cell containing @depth.
If @depth is above the top of the reservoir the function will
return -1, and if @depth is below the bottom of the reservoir
the function will return -nz.
"""
return self._locate_depth( depth, i, j)
def find_cell(self, x, y, z, start_ijk=None):
"""
Lookup cell containg true position (x,y,z).
Will locate the cell in the grid which contains the true
position (@x,@y,@z), the return value is as a triplet
(i,j,k). The underlying C implementation is not veeery
efficient, and can potentially take quite long time. If you
provide a good intial guess with the parameter @start_ijk (a
tuple (i,j,k)) things can speed up quite substantially.
If the location (@x,@y,@z) can not be found in the grid, the
method will return None.
"""
start_index = 0
if start_ijk:
start_index = self.__global_index(ijk=start_ijk)
global_index = self._get_ijk_xyz(x, y, z, start_index)
if global_index >= 0:
i = ctypes.c_int()
j = ctypes.c_int()
k = ctypes.c_int()
self._get_ijk1(global_index,
ctypes.byref(i), ctypes.byref(j), ctypes.byref(k))
return (i.value, j.value, k.value)
return None
def cell_contains(self, x, y, z, active_index=None, global_index=None, ijk=None):
"""
Will check if the cell contains point given by world
coordinates (x,y,z).
See method get_xyz() for documentation of @active_index,
@global_index and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._cell_contains(gi, x,y,z)
def find_cell_xy(self, x, y, k):
"""Will find the i,j of cell with utm coordinates x,y.
The @k input is the layer you are interested in, the allowed
values for k are [0,nz]. If the coordinates (x,y) are found to
be outside the grid a ValueError exception is raised.
"""
if 0 <= k <= self.getNZ():
i = ctypes.c_int()
j = ctypes.c_int()
ok = self._get_ij_xy(x,y,k, ctypes.byref(i), ctypes.byref(j))
if ok:
return (i.value, j.value)
else:
raise ValueError("Could not find the point:(%g,%g) in layer:%d" % (x,y,k))
else:
raise IndexError("Invalid layer value:%d" % k)
def find_cell_corner_xy(self, x, y, k):
"""Will find the corner nr of corner closest to utm coordinates x,y.
The @k input is the layer you are interested in, the allowed
values for k are [0,nz]. If the coordinates (x,y) are found to
be outside the grid a ValueError exception is raised.
"""
i,j = self.findCellXY(x,y,k)
if k == self.getNZ():
k -= 1
corner_shift = 4
else:
corner_shift = 0
nx = self.getNX()
x0,y0,z0 = self.getCellCorner(corner_shift, ijk=(i,j,k))
d0 = math.sqrt((x0 - x)*(x0 - x) + (y0 - y)*(y0 - y))
c0 = i + j*(nx + 1)
x1,y1,z1 = self.getCellCorner(1 + corner_shift, ijk=(i,j,k))
d1 = math.sqrt((x1 - x)*(x1 - x) + (y1 - y)*(y1 - y))
c1 = i + 1 + j*(nx + 1)
x2,y2,z2 = self.getCellCorner(2 + corner_shift, ijk=(i,j,k))
d2 = math.sqrt((x2 - x)*(x2 - x) + (y2 - y)*(y2 - y))
c2 = i + (j + 1)*(nx + 1)
x3,y3,z3 = self.getCellCorner(3 + corner_shift, ijk=(i,j,k))
d3 = math.sqrt((x3 - x)*(x3 - x) + (y3 - y)*(y3 - y))
c3 = i + 1 + (j + 1)*(nx + 1)
l = [(d0, c0), (d1,c1), (d2, c2), (d3,c3)]
l.sort(key=lambda k: k[0])
return l[0][1]
def cell_regular(self, active_index=None, global_index=None, ijk=None):
"""
The ECLIPSE grid models often contain various degenerate cells,
which are twisted, have overlapping corners or what not. This
function gives a moderate sanity check on a cell, essentially
what the function does is to check if the cell contains it's
own centerpoint - which is actually not as trivial as it
sounds.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._cell_regular( gi)
def cell_volume(self, active_index=None, global_index=None, ijk=None):
"""
Calculate the volume of a cell.
Will calculate the total volume of the cell. See method
get_xyz() for documentation of @active_index, @global_index
and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._get_cell_volume(gi)
def cell_dz(self, active_index=None, global_index=None, ijk=None):
"""
The thickness of a cell.
Will calculate the (average) thickness of the cell. See method
get_xyz() for documentation of @active_index, @global_index
and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._get_cell_thickness( gi)
def get_cell_dims(self, active_index=None, global_index=None, ijk=None):
"""Will return a tuple (dx,dy,dz) for cell dimension.
The dx and dy values are best effor estimates of the cell size
along the i and j directions respectively. The three values
are guaranteed to satisfy:
dx * dy * dz = dV
See method get_xyz() for documentation of @active_index,
@global_index and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
dx = self._get_cell_dx(gi)
dy = self._get_cell_dy(gi)
dz = self._get_cell_thickness( gi)
return (dx,dy,dz)
def get_num_lgr(self):
"""
How many LGRs are attached to this main grid?
How many LGRs are attached to this main grid; the grid
instance doing the query must itself be a main grid.
"""
return self._num_lgr()
def has_lgr(self, lgr_name):
"""
Query if the grid has an LGR with name @lgr_name.
"""
if self._has_named_lgr(lgr_name):
return True
else:
return False
def get_lgr(self, lgr_key):
"""Get EclGrid instance with LGR content.
Return an EclGrid instance based on the LGR @lgr, the input
argument can either be the name of an LGR or the grid number
of the LGR. The LGR grid instance is mostly like an ordinary
grid instance; the only difference is that it can not be used
for further queries about LGRs.
If the grid does not contain an LGR with this name/nr
exception KeyError will be raised.
"""
lgr = None
if isinstance(lgr_key, int):
if self._has_numbered_lgr(lgr_key):
lgr = self._get_numbered_lgr(lgr_key)
else:
if self._has_named_lgr(lgr_key):
lgr = self._get_named_lgr(lgr_key)
if lgr is None:
raise KeyError("No such LGR: %s" % lgr_key)
lgr.setParent(self)
return lgr
def get_cell_lgr(self, active_index=None, global_index=None, ijk=None):
"""
Get EclGrid instance located in cell.
Will query the current grid instance if the cell given by
@active_index, @global_index or @ijk has been refined with an
LGR. Will return None if the cell in question has not been
refined, the return value can be used for further queries.
See get_xyz() for documentation of the input parameters.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
lgr = self._get_cell_lgr(gi)
if lgr:
lgr.setParent(self)
return lgr
else:
raise IndexError("No LGR defined for this cell")
def grid_value(self, kw, i, j, k):
"""
Will evalute @kw in location (@i,@j,@k).
The ECLIPSE properties and solution vectors are stored in
restart and init files as 1D vectors of length nx*nx*nz or
nactive. The grid_value() method is a minor convenience
function to convert the (@i,@j,@k) input values to an
appropriate 1D index.
Depending on the length of kw the input arguments are
converted either to an active index or to a global index. If
the length of kw does not fit with either the global size of
the grid or the active size of the grid things will fail hard.
"""
return self._grid_value(kw, i, j, k)
def load_column(self, kw, i, j, column):
"""
Load the values of @kw from the column specified by (@i,@j).
The method will scan through all k values of the input field
@kw for fixed values of i and j. The size of @kw must be
either nactive or nx*ny*nz.
The input argument @column should be a DoubleVector instance,
observe that if size of @kw == nactive k values corresponding
to inactive cells will not be modified in the @column
instance; in that case it is important that @column is
initialized with a suitable default value.
"""
self._load_column( kw, i, j, column)
def create_kw(self, array, kw_name, pack):
"""
Creates an EclKW instance based on existing 3D numpy object.
The method create3D() does the inverse operation; creating a
3D numpy object from an EclKW instance. If the argument @pack
is true the resulting keyword will have length 'nactive',
otherwise the element will have length nx*ny*nz.
"""
if array.ndim == 3:
dims = array.shape
if dims[0] == self.getNX() and dims[1] == self.getNY() and dims[2] == self.getNZ():
dtype = array.dtype
if dtype == numpy.int32:
type = EclDataType.ECL_INT
elif dtype == numpy.float32:
type = EclDataType.ECL_FLOAT
elif dtype == numpy.float64:
type = EclDataType.ECL_DOUBLE
else:
sys.exit("Do not know how to create ecl_kw from type:%s" % dtype)
if pack:
size = self.getNumActive()
else:
size = self.getGlobalSize()
if len(kw_name) > 8:
# Silently truncate to length 8 - ECLIPSE has it's challenges.
kw_name = kw_name[0:8]
kw = EclKW(kw_name, size, type)
active_index = 0
global_index = 0
for k in range(self.getNZ()):
for j in range(self.getNY()):
for i in range(self.getNX()):
if pack:
if self.active(global_index=global_index):
kw[active_index] = array[i,j,k]
active_index += 1
else:
if dtype == numpy.int32:
kw[global_index] = int(array[i,j,k])
else:
kw[global_index] = array[i,j,k]
global_index += 1
return kw
raise ValueError("Wrong size / dimension on array")
def coarse_groups(self):
"""
Will return the number of coarse groups in this grid.
"""
return self._num_coarse_groups()
def in_coarse_group(self, global_index=None, ijk=None, active_index=None):
"""
Will return True or False if the cell is part of coarse group.
"""
global_index = self.__global_index(active_index=active_index, ijk=ijk, global_index=global_index)
return self._in_coarse_group1(global_index)
def create_3d(self, ecl_kw, default = 0):
"""
Creates a 3D numpy array object with the data from @ecl_kw.
Observe that 3D numpy object is a copy of the data in the
EclKW instance, i.e. modification to the numpy object will not
be reflected in the ECLIPSE keyword.
The methods createKW() does the inverse operation; creating an
EclKW instance from a 3D numpy object.
Alternative: Creating the numpy array object is not very
efficient; if you only need a limited number of elements from
the ecl_kw instance it might be wiser to use the grid_value()
method:
value = grid.grid_value(ecl_kw, i, j, k)
"""
if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():
array = numpy.ones([ self.getGlobalSize() ], dtype=ecl_kw.dtype) * default
kwa = ecl_kw.array
if len(ecl_kw) == self.getGlobalSize():
for i in range(kwa.size):
array[i] = kwa[i]
else:
for global_index in range(self.getGlobalSize()):
active_index = self._get_active_index1(global_index)
array[global_index] = kwa[active_index]
array = array.reshape([self.getNX(), self.getNY(), self.getNZ()], order='F')
return array
else:
err_msg_fmt = 'Keyword "%s" has invalid size %d; must be either nactive=%d or nx*ny*nz=%d'
err_msg = err_msg_fmt % (ecl_kw, len(ecl_kw), self.getNumActive(),
self.getGlobalSize())
raise ValueError(err_msg)
def save_grdecl(self, pyfile, output_unit=EclUnitTypeEnum.ECL_METRIC_UNITS):
"""
Will write the the grid content as grdecl formatted keywords.
Will only write the main grid.
"""
cfile = CFILE(pyfile)
self._fprintf_grdecl2(cfile, output_unit)
def save_EGRID(self, filename, output_unit=None):
if output_unit is None:
output_unit = self.unit_system
self._fwrite_EGRID2(filename, output_unit)
def save_GRID(self, filename, output_unit=EclUnitTypeEnum.ECL_METRIC_UNITS):
"""
Will save the current grid as a GRID file.
"""
self._fwrite_GRID2( filename, output_unit)
def write_grdecl(self, ecl_kw, pyfile, special_header=None, default_value=0):
"""
Writes an EclKW instance as an ECLIPSE grdecl formatted file.
The input argument @ecl_kw must be an EclKW instance of size
nactive or nx*ny*nz. If the size is nactive the inactive cells
will be filled with @default_value; hence the function will
always write nx*ny*nz elements.
The data in the @ecl_kw argument can be of type integer,
float, double or bool. In the case of bool the default value
must be specified as 1 (True) or 0 (False).
The input argument @pyfile should be a valid python filehandle
opened for writing; i.e.
pyfile = open("PORO.GRDECL", "w")
grid.write_grdecl(poro_kw , pyfile, default_value=0.0)
grid.write_grdecl(permx_kw, pyfile, default_value=0.0)
pyfile.close()
"""
if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():
cfile = CFILE(pyfile)
self._fwrite_grdecl(ecl_kw, special_header, cfile, default_value)
else:
raise ValueError("Keyword: %s has invalid size(%d), must be either nactive:%d or nx*ny*nz:%d" % (ecl_kw.getName(), len(ecl_kw), self.getNumActive(), self.getGlobalSize()))
def exportACTNUM(self):
actnum = IntVector(initial_size=self.getGlobalSize())
self._init_actnum(actnum.getDataPtr())
return actnum
def compressed_kw_copy(self, kw):
if len(kw) == self.getNumActive():
return kw.copy()
elif len(kw) == self.getGlobalSize():
kw_copy = EclKW(kw.getName(), self.getNumActive(), kw.data_type)
self._compressed_kw_copy(kw_copy, kw)
return kw_copy
else:
raise ValueError("The input keyword must have nx*n*nz or nactive elements. Size:%d invalid" % len(kw))
def global_kw_copy(self, kw, default_value):
if len(kw) == self.getGlobalSize():
return kw.copy()
elif len(kw) == self.getNumActive():
kw_copy = EclKW(kw.getName(), self.getGlobalSize(), kw.data_type)
kw_copy.assign(default_value)
self._global_kw_copy(kw_copy, kw)
return kw_copy
else:
raise ValueError("The input keyword must have nx*n*nz or nactive elements. Size:%d invalid" % len(kw))
def export_ACTNUM_kw(self):
actnum = EclKW("ACTNUM", self.getGlobalSize(), EclDataType.ECL_INT)
self._init_actnum(actnum.getDataPtr())
return actnum
def create_volume_keyword(self, active_size=True):
"""Will create a EclKW initialized with cell volumes.
The purpose of this method is to create a EclKW instance which
is initialized with all the cell volumes, this can then be
used to perform volume summation; i.e. to calculate the total
oil volume:
soil = 1 - sgas - swat
cell_volume = grid.createVolumeKeyword()
tmp = cell_volume * soil
oip = tmp.sum()
The oil in place calculation shown above could easily be
implemented by iterating over the soil kw, however using the
volume keyword has two advantages:
1. The calculation of cell volumes is quite time consuming,
by storing the results in a kw they can be reused.
2. By using the compact form 'oip = cell_volume * soil' the
inner loop iteration will go in C - which is faster.
By default the kw will only have values for the active cells,
but by setting the optional variable @active_size to False you
will get volume values for all cells in the grid.
"""
return self._create_volume_keyword(active_size)
def export_index(self, active_only = False):
"""
Exports a pandas dataframe containing index data of grid cells.
The global_index of the cells is used as index in the pandas frame.
columns 0, 1, 2 are i, j, k, respectively
column 3 contains the active_index
if active_only == True, only active cells are listed,
otherwise all cells are listed.
This index frame should typically be passed to the epxport_data(),
export_volume() and export_corners() functions.
"""
if active_only:
size = self.get_num_active()
else:
size = self.get_global_size()
indx = numpy.zeros(size, dtype=numpy.int32)
data = numpy.zeros([size, 4], dtype=numpy.int32)
self._export_index_frame( indx.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), active_only )
df = pandas.DataFrame(data=data, index=indx, columns=['i', 'j', 'k', 'active'])
return df
def export_data(self, index_frame, kw, default = 0):
"""
Exports keywoard data to a numpy vector.
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
kw must have size of either global_size or num_active.
The length of the numpy vector is the number of rows in index_frame.
If kw is of length num_active, values in the output vector
corresponding to inactive cells are set to default.
"""
if not isinstance(index_frame, pandas.DataFrame):
raise TypeError("index_frame must be pandas.DataFrame")
if len(kw) == self.get_global_size():
index = numpy.array( index_frame.index, dtype=numpy.int32 )
elif len(kw) == self.get_num_active():
index = numpy.array( index_frame["active"], dtype=numpy.int32 )
else:
raise ValueError("The keyword must have a 3D compatible length")
if kw.type is EclTypeEnum.ECL_INT_TYPE:
data = numpy.full( len(index), default, dtype=numpy.int32 )
self._export_data_as_int( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
kw,
data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)) )
return data
elif kw.type is EclTypeEnum.ECL_FLOAT_TYPE or kw.type is EclTypeEnum.ECL_DOUBLE_TYPE:
data = numpy.full( len(index), default, dtype=numpy.float64 )
self._export_data_as_double( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
kw,
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
else:
raise TypeError("Keyword must be either int, float or double.")
def export_volume(self, index_frame):
"""
Exports cell volume data to a numpy vector.
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
"""
index = numpy.array( index_frame.index, dtype=numpy.int32 )
data = numpy.zeros( len(index ), dtype=numpy.float64 )
self._export_volume( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
def export_position(self, index_frame):
"""Exports cell position coordinates to a numpy vector (matrix), with columns
0, 1, 2 denoting coordinates x, y, and z, respectively.
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
"""
index = numpy.array( index_frame.index, dtype=numpy.int32 )
data = numpy.zeros( [len(index), 3], dtype=numpy.float64 )
self._export_position( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
def export_corners(self, index_frame):
"""Exports cell corner position coordinates to a numpy vector (matrix).
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
Example of a row of the output matrix:
0 1 2 .... 21 22 23
x1 y1 z1 .... x8 y8 z8
In total there are eight 8 corners. They are described as follows:
The corners in a cell are numbered 0 - 7, where corners 0-3 constitute
one layer and the corners 4-7 consitute the other layer. Observe
that the numbering does not follow a consistent rotation around the face:
j
6---7 /|\
| | |
4---5 |
|
o----------> i
2---3
| |
0---1
Many grids are left-handed, i.e. the direction of increasing z will
point down towards the center of the earth. Hence in the figure above
the layer 4-7 will be deeper down in the reservoir than layer 0-3, and
also have higher z-value.
Warning: The main author of this code suspects that the coordinate
system can be right-handed as well, giving a z axis which will
increase 'towards the sky'; the safest way is probably to check this
explicitly if it matters for the case at hand.
"""
index = numpy.array( index_frame.index, dtype=numpy.int32 )
data = numpy.zeros( [len(index), 24], dtype=numpy.float64 )
self._export_corners( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
def export_coord(self):
return self._export_coord()
def export_zcorn(self):
return self._export_zcorn()
def export_actnum(self):
return self._export_actnum()
def export_mapaxes(self):
if not self._use_mapaxes():
return None
return self._export_mapaxes()
@property
def unit_system(self):
return self._get_unit_system()
monkey_the_camel(EclGrid, 'loadFromGrdecl', EclGrid.load_from_grdecl, classmethod)
monkey_the_camel(EclGrid, 'loadFromFile', EclGrid.load_from_file, classmethod)
monkey_the_camel(EclGrid, 'createRectangular', EclGrid.create_rectangular, classmethod)
monkey_the_camel(EclGrid, 'dualGrid', EclGrid.dual_grid)
monkey_the_camel(EclGrid, 'getDims', EclGrid.get_dims)
monkey_the_camel(EclGrid, 'getNX', EclGrid.get_nx)
monkey_the_camel(EclGrid, 'getNY', EclGrid.get_ny)
monkey_the_camel(EclGrid, 'getNZ', EclGrid.get_nz)
monkey_the_camel(EclGrid, 'getGlobalSize', EclGrid.get_global_size)
monkey_the_camel(EclGrid, 'getNumActive', EclGrid.get_num_active)
monkey_the_camel(EclGrid, 'getNumActiveFracture', EclGrid.get_num_active_fracture)
monkey_the_camel(EclGrid, 'getBoundingBox2D', EclGrid.get_bounding_box_2d)
monkey_the_camel(EclGrid, 'getName', EclGrid.get_name)
monkey_the_camel(EclGrid, 'validCellGeometry', EclGrid.valid_cell_geometry)
monkey_the_camel(EclGrid, 'getNodePos', EclGrid.get_node_pos)
monkey_the_camel(EclGrid, 'getCellCorner', EclGrid.get_cell_corner)
monkey_the_camel(EclGrid, 'getNodeXYZ', EclGrid.get_node_xyz)
monkey_the_camel(EclGrid, 'getLayerXYZ', EclGrid.get_layer_xyz)
monkey_the_camel(EclGrid, 'findCellXY', EclGrid.find_cell_xy)
monkey_the_camel(EclGrid, 'findCellCornerXY', EclGrid.find_cell_corner_xy)
monkey_the_camel(EclGrid, 'getCellDims', EclGrid.get_cell_dims)
monkey_the_camel(EclGrid, 'getNumLGR', EclGrid.get_num_lgr)
monkey_the_camel(EclGrid, 'createKW', EclGrid.create_kw)
monkey_the_camel(EclGrid, 'create3D', EclGrid.create_3d)
monkey_the_camel(EclGrid, 'compressedKWCopy', EclGrid.compressed_kw_copy)
monkey_the_camel(EclGrid, 'globalKWCopy', EclGrid.global_kw_copy)
monkey_the_camel(EclGrid, 'exportACTNUMKw', EclGrid.export_ACTNUM_kw)
monkey_the_camel(EclGrid, 'createVolumeKeyword', EclGrid.create_volume_keyword)
| gpl-3.0 | -7,425,087,570,827,664,000 | 39.764256 | 184 | 0.584401 | false | 3.580005 | false | false | false |
dankolbman/NumericalAnalysis | Homeworks/HW2/Problem5ii.py | 1 | 3007 | import math
import scipy.interpolate as intrp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
font = {'family' : 'normal',
'size' : 24}
rc('font', **font)
### The function
def f(t):
return 1/(1+t**2)
# Spline
def spline(xpts, ypts):
n = len(xpts)
mat = np.zeros(( n, n))
rhs = np.zeros(( n,1 ))
for i in range(1,n-1):
rhs[i] = 6 * ( (ypts[i+1]-ypts[i]) / (xpts[i+1]-xpts[i]) \
-(ypts[i]-ypts[i-1]) / (xpts[i]-xpts[i-1]) )
for j in range(0,n-1):
# Set triagonal elements
if(j==i-1): mat[i][j] += xpts[i] - xpts[i-1]
elif(j==i): mat[i][j] += 2*(xpts[i+1]-xpts[i-1])
elif(j==i+1): mat[i][j] += xpts[i+1]-xpts[i]
# BCs
mat[0][0] = 1
mat[-1][-1] = 1
rhs[0] = 0
rhs[-1] = 0
# Solve it
x_vec = np.linalg.solve(mat, rhs)
return x_vec
#######
# The function
x = [ i/100 for i in range(-500,500) ]
fx = [ f(i) for i in x ]
plt.plot(x,fx, 'k--',label='f(t)', linewidth=5)
### 5 points
xpts = np.linspace(-5, 5, 5)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'r', label='5 Points')
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 5 Points:', rmse)
### 10 points
xpts = np.linspace(-5, 5, 10)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'b', label='10 Points')
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 10 Points:', rmse)
### 15 points
xpts = np.linspace(-5, 5, 15)
ypts = [ f(t) for t in xpts ]
sol = spline(xpts, ypts)
n = len(xpts)
x = []
fx = []
t = 1000
for i in range(0,n-1):
dx = xpts[i+1]-xpts[i]
for j in range(t):
bb = 1*j/(t)
aa = 1 - bb
x.append(xpts[i]+bb*dx)
cc = dx**2*aa*(aa**2-1)/6
dd = dx**2*bb*(bb**2-1)/6
fx.append(aa*ypts[i]+bb*ypts[i+1]+cc*sol[i]+dd*sol[i+1])
plt.plot(x,fx, 'g', label='15 Points',linewidth=3)
diffs = [ f( x[i] ) - fx[i] for i in range(len(x)) ]
rmse=np.linalg.norm( diffs )/np.sqrt(len(fx))
print('Error for 15 Points:', rmse)
plt.legend(fontsize=16)
plt.ylim( [-0.2, 1.1] )
plt.title('Natural Cubic Splines for $f(t)$')
plt.savefig('Problem5ii.png')
plt.show()
| mit | 68,324,508,218,959,170 | 22.492188 | 63 | 0.55005 | false | 2.194891 | false | false | false |
Exa-Networks/scavengerexa | lib/scavenger/policy/factory.py | 1 | 7596 | #!/usr/bin/env python
# encoding: utf-8
"""
factory.py
Created by Thomas Mangin on 2009-01-10.
Copyright (c) 2008 Exa Networks. All rights reserved.
See LICENSE for details.
"""
import time
from zope.interface import Interface, implements
from plugin import response
class IMailPolicyFactory (Interface):
def policeMessage (message):
"""returns a list of what the plugin are saying about the message"""
def sanitiseMessage (message):
"""return what the version of the protocol of the request"""
def getPlugins (state):
"""returns a list of plugin which can run at this level"""
def validateMessage (message):
"""check that the message have the key we need"""
from twisted.python import log
from twisted.internet import protocol
from twisted.internet import defer
from scavenger.policy.protocol import PostfixPolicyProtocol,ScavengerPolicyProtocol
from scavenger.policy.service import IMailPolicyService
message = "[policy server reports] message %(msg)s\ncode %(code)s"
class MailPolicyFactoryFromService (protocol.ServerFactory):
implements(IMailPolicyFactory)
debug = False
postfix_21 = ['request','protocol_state','protocol_name','helo_name','queue_id','sender','recipient','recipient_count','client_address','client_name','reverse_client_name','instance',]
postfix_22 = ['sasl_method','sasl_username','sasl_sender','size','ccert_subject','ccert_issuer','ccert_fingerprint',]
postfix_23 = ['encryption_protocol','encryption_cipher','encryption_keysize','etrn_domain',]
postfix_25 = ['stress',]
scavenger_10 = ['server_address','code','origin']
states = ['VRFY','ETRN','CONNECT','EHLO','HELO','MAIL','RCPT','DATA','END-OF-DATA',]
def __init__ (self,service):
if service.getType() == 'scavenger':
self.protocol = ScavengerPolicyProtocol
elif service.getType() == 'postfix':
self.protocol = PostfixPolicyProtocol
else:
raise ValueError('unknow protocol option (scavenger,postfix)')
log.msg('+'*80)
self.plugins = {}
self.service = service
self.template = self.service.configuration.get('message',message)
self.type = self.service.getType()
self.version = {'postfix':{},'scavenger':{}}
for kv,ver in ((self.postfix_21,'2.1'),(self.postfix_22,'2.2'),(self.postfix_23,'2.3'),(self.postfix_25,'2.5')):
for k in kv:
self.version['postfix'][k] = ver
for kv,ver in ((self.postfix_21,'2.1'),(self.scavenger_10,'1.0')):
for k in kv:
self.version['scavenger'][k] = ver
for state in self.states:
self.plugins[state] = []
for plugin in self.service.getPlugins():
states = plugin.getStates()
for state in states:
self.plugins[state].append(plugin)
def getPlugins (self,message):
protocol = message['request']
state = message['protocol_state']
for plugin in self.plugins[state]:
yield plugin
def policeMessage (self,message):
self._storeMessage(message)
response = self._checkMessage(message)
print "%-15s %4s : %s" % (message['client_address'],message['protocol_state'],str(response))
return response
def _storeMessage (self,message):
# Perform database storage functions
for plugin in self.getPlugins(message):
try:
plugin.store(message)
# Errors
except response.InternalError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'plugin had an internal error',str(r)))
continue
except response.DataError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'the plugin does not like the data provided',str(r)))
continue
except response.UncheckableError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'uncheckable',str(r)))
continue
except response.NoResponseError, r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'no answer from the plugin',str(r)))
continue
# Uncaught Exception
except response.PluginError,r:
log.msg('plugin %s : %s' % (plugin.getName(),'no reponse'))
continue
except Exception, r:
log.msg('plugin %s : %s' % (plugin.getName(),'unknown response '+str(r)))
continue
def _checkMessage (self,message):
# Run all the plugin in order and act depending on the response returned
for plugin in self.getPlugins(message):
if self.debug: log.msg('running pluging ' + plugin.getName())
try:
r = plugin.police(message)
except Exception, e:
if plugin.debug:
import traceback
traceback.print_exc()
else:
log.msg("Plugin %s is raising an error - %s %s" % (plugin.getName(),str(type(e)), e.message))
continue
try:
raise r
# Nothing can be said about the data
except response.ResponseContinue:
if self.debug: log.msg('plugin %s : %s' % (plugin.getName(),'continue'))
continue
# Allow or Block the mail
except response.PostfixResponse, r:
# XXX: Need to create a dict class which reply '' to every unknown key
log.msg('plugin %s : %s' % (plugin.getName(),r.message))
if r.delay: log.msg('plugin %s : forcing a time of %d' % (plugin.getName(), r.delay))
return r
except response.ScavengerResponse, r:
# XXX: Need to create a dict class which reply '' to every unknown key
log.msg('plugin %s : %s' % (plugin.getName(),r.message))
if r.duration: log.msg('plugin %s : forcing a duration of %d' % (plugin.getName(), r.duration))
return r
# Nothing can be said about the data
except response.ResponseContinue:
log.msg('plugin %s : %s' % (plugin.getName(),'continue'))
continue
# Errors
except response.InternalError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'plugin had an internal error',str(r)))
continue
except response.DataError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'the plugin does not like the data provided',str(r)))
continue
except response.UncheckableError,r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'uncheckable',str(r)))
continue
except response.NoResponseError, r:
log.msg('plugin %s : %s (%s)' % (plugin.getName(),'no answer from the plugin',str(r)))
continue
# Uncaught Exception
except response.PluginError,r:
log.msg('plugin %s : %s' % (plugin.getName(),'no reponse'))
continue
except Exception, r:
log.msg('plugin %s : %s' % (plugin.getName(),'unknown response '+str(r)))
continue
if self.debug: log.msg('plugins could not say anything about this message')
return response.ResponseUndetermined(self.type)
def sanitiseMessage (self,message):
r = {}
for k in self.version[self.type].keys():
if not message.has_key(k):
r[k]=''
else:
r[k] = message[k]
then = time.time()
if message.has_key('timestamp'):
try:
then = float(message['timestamp'])
except (TypeError, ValueError):
pass
r['timestamp'] = int(then)
return r
def validateMessage (self,message):
for k in ['client_address','protocol_state']:
if not message.has_key(k):
return False
if message['protocol_state'] not in self.states:
log.msg('invalid protocol state %s' % message['protocol_state'])
return False
if message['request'] not in ['smtpd_access_policy','scavenger_access_policy']:
log.msg('invalid request type %s' % message['request'])
return False
if message['request'] != 'scavenger_access_policy':
return True
for k in ['server_address','code','origin']:
if not message.has_key(k):
log.msg('scavenger message must have key %s' % k)
return False
return True
from twisted.python import components
components.registerAdapter(MailPolicyFactoryFromService,
IMailPolicyService,
IMailPolicyFactory)
| agpl-3.0 | 507,496,064,263,646,660 | 30.38843 | 185 | 0.674039 | false | 3.312691 | false | false | false |
mvs-org/metaverse | test/test-rpc-v3/utils/bitcoin_script.py | 3 | 1920 | from pybitcoin.transactions.scripts import script_to_hex
from pybitcoin.hash import bin_double_sha256, bin_hash160
from pybitcoin.address import bin_hash160_to_address
template = '''
OP_IF
OP_2 %(Alice)s %(Bob)s OP_2 OP_CHECKMULTISIG
OP_ELSE
%(Sequence)s OP_CHECKSEQUENCEVERIFY OP_DROP
%(Alice)s OP_CHECKSIG
OP_ENDIF
'''
contract = template % {'Bob' : '02578ad340083e85c739f379bbe6c6937c5da2ced52e09ac1eec43dc4c64846573',
'Alice' : '0380990a7312b87abda80e5857ee6ebf798a2bf62041b07111287d19926c429d11',
'Sequence' : '0a000000'}
template2 = '''
OP_0
3045022100ff054d83e4f376b6b47705b8186fd1e2b61cabe70e717f052b6bf0fd00d883ec02203adaf168c7e4b32fbd66dd2adfdd42aaf6268f5e4c736978ab6c86d4e13bfcf401
304402200eab2db325b0c95dcfed00a4554b59d3422d2eef3eed50a341da55cd83e8e06302203fc97b96df2e803dfc3113cc6ee0dd5728ced316b63dfda72c808ab48826f7e601
OP_1
OP_PUSHDATA1
63522102578ad340083e85c739f379bbe6c6937c5da2ced52e09ac1eec43dc4c64846573210380990a7312b87abda80e5857ee6ebf798a2bf62041b07111287d19926c429d1152ae670164b3752102578ad340083e85c739f379bbe6c6937c5da2ced52e09ac1eec43dc4c64846573ac68
'''
template3 = '''
304402204d21c19216cad74e780bd70e04518cf8f1a20108dc3bf79f7b218865524661ac022049b5de8a05d9b524ae6de3b4b221c856d16d4e3a51f7f19e685e7fc33b51abac01
OP_1
OP_PUSHDATA1
6352210380990a7312b87abda80e5857ee6ebf798a2bf62041b07111287d19926c429d112102578ad340083e85c739f379bbe6c6937c5da2ced52e09ac1eec43dc4c6484657352ae67040a000000b375210380990a7312b87abda80e5857ee6ebf798a2bf62041b07111287d19926c429d11ac68
'''
def compile(ct):
ct = ct.split()
#for i in ct:
# print i, '->', script_to_hex(i)
return script_to_hex(' '.join(ct))
if __name__ == '__main__':
script = compile(contract)
script_hash = bin_hash160(script, hex_format=True)
p2sh = bin_hash160_to_address(script_hash, 5)
print (p2sh)
print (script)
print ('-' * 80)
print (compile(template3)) | agpl-3.0 | -3,748,611,327,729,627,000 | 39.87234 | 232 | 0.822396 | false | 2.209436 | false | false | false |
i3visio/osrframework | osrframework/wrappers/pending/bebee.py | 1 | 4625 | # !/usr/bin/python
# -*- coding: cp1252 -*-
#
##################################################################################
#
# Copyright 2016-2017 Félix Brezo and Yaiza Rubio (i3visio, [email protected])
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
__author__ = "John Doe <[email protected]>"
__version__ = "1.0"
import argparse
import json
import re
import sys
import urllib2
import osrframework.utils.browser as browser
from osrframework.utils.platforms import Platform
class Bebee(Platform):
"""
A <Platform> object for Bebee.
"""
def __init__(self):
"""
Constructor...
"""
self.platformName = "Bebee"
self.tags = ["jobs"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "https://bebee.com/bee/" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ['<link rel="canonical" href="https://.bebee.com/bees/search">']
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
self.fieldsRegExp["usufy"]["i3visio.fullname"] = {"start": '<title>', "end": '- beBee</title>'}
self.fieldsRegExp["usufy"]["i3visio.location"] = {"start": '<span itemprop="addressRegion">', "end": '</span>'}
self.fieldsRegExp["usufy"]["i3visio.alias.googleplus"] = {"start": '<div><a rel="nofollow" class="color_corp_three" href="https://plus.google.com/u/0/', "end": '"'}
self.fieldsRegExp["usufy"]["i3visio.alias.linkedin"] = {"start": '<div><a rel="nofollow" class="color_corp_three" href="http://br.linkedin.com/in/', "end": '"'}
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
| agpl-3.0 | 7,676,508,236,301,960,000 | 38.862069 | 172 | 0.535035 | false | 4.066843 | false | false | false |
josiah-wolf-oberholtzer/supriya | supriya/enums.py | 1 | 12624 | from collections.abc import Sequence
from uqbar.enums import IntEnumeration, StrictEnumeration
class AddAction(IntEnumeration):
"""
An enumeration of scsynth node add actions.
"""
### CLASS VARIABLES ###
ADD_TO_HEAD = 0
ADD_TO_TAIL = 1
ADD_BEFORE = 2
ADD_AFTER = 3
REPLACE = 4
class BinaryOperator(IntEnumeration):
### CLASS VARIABLES ###
ABSOLUTE_DIFFERENCE = 38 # |a - b|
ADDITION = 0
AMCLIP = 40
ATAN2 = 22
BIT_AND = 14
BIT_OR = 15
BIT_XOR = 16
CLIP2 = 42
DIFFERENCE_OF_SQUARES = 34 # a*a - b*b
EQUAL = 6
EXCESS = 43
EXPRANDRANGE = 48
FLOAT_DIVISION = 4
FILL = 29
FIRST_ARG = 46
FOLD2 = 44
GREATEST_COMMON_DIVISOR = 18
GREATER_THAN_OR_EQUAL = 11
GREATER_THAN = 9
HYPOT = 23
HYPOTX = 24
INTEGER_DIVISION = 3
LEAST_COMMON_MULTIPLE = 17
LESS_THAN_OR_EQUAL = 10
LESS_THAN = 8
MAXIMUM = 13
MINIMUM = 12
MODULO = 5
MULTIPLICATION = 2
NOT_EQUAL = 7
POWER = 25
RANDRANGE = 47
RING1 = 30 # a * (b + 1) == a * b + a
RING2 = 31 # a * b + a + b
RING3 = 32 # a*a*b
RING4 = 33 # a*a*b - a*b*b
ROUND = 19
ROUND_UP = 20
SCALE_NEG = 41
SHIFT_LEFT = 26
SHIFT_RIGHT = 27
SQUARE_OF_DIFFERENCE = 37 # (a - b)^2
SQUARE_OF_SUM = 36 # (a + b)^2
SUBTRACTION = 1
SUM_OF_SQUARES = 35 # a*a + b*b
THRESHOLD = 39
TRUNCATION = 21
UNSIGNED_SHIFT = 28
WRAP2 = 45
class CalculationRate(IntEnumeration):
"""
An enumeration of scsynth calculation-rates.
::
>>> import supriya.synthdefs
>>> supriya.CalculationRate.AUDIO
CalculationRate.AUDIO
::
>>> supriya.CalculationRate.from_expr("demand")
CalculationRate.DEMAND
"""
### CLASS VARIABLES ###
AUDIO = 2
CONTROL = 1
DEMAND = 3
SCALAR = 0
### PUBLIC METHODS ###
@classmethod
def from_expr(cls, expr):
"""
Gets calculation-rate.
::
>>> import supriya.synthdefs
>>> import supriya.ugens
::
>>> supriya.CalculationRate.from_expr(1)
CalculationRate.SCALAR
::
>>> supriya.CalculationRate.from_expr("demand")
CalculationRate.DEMAND
::
>>> collection = []
>>> collection.append(supriya.ugens.DC.ar(0))
>>> collection.append(supriya.ugens.DC.kr(1))
>>> collection.append(2.0)
>>> supriya.CalculationRate.from_expr(collection)
CalculationRate.AUDIO
::
>>> collection = []
>>> collection.append(supriya.ugens.DC.kr(1))
>>> collection.append(2.0)
>>> supriya.CalculationRate.from_expr(collection)
CalculationRate.CONTROL
Return calculation-rate.
"""
import supriya.synthdefs
import supriya.ugens
if isinstance(expr, (int, float)) and not isinstance(expr, cls):
return CalculationRate.SCALAR
elif isinstance(expr, (supriya.synthdefs.OutputProxy, supriya.synthdefs.UGen)):
return expr.calculation_rate
elif isinstance(expr, supriya.synthdefs.Parameter):
name = expr.parameter_rate.name
if name == "TRIGGER":
return CalculationRate.CONTROL
return CalculationRate.from_expr(name)
elif isinstance(expr, str):
return super().from_expr(expr)
elif isinstance(expr, Sequence):
return max(CalculationRate.from_expr(item) for item in expr)
elif hasattr(expr, "calculation_rate"):
return cls.from_expr(expr.calculation_rate)
return super().from_expr(expr)
### PUBLIC PROPERTIES ###
@property
def token(self):
if self == CalculationRate.SCALAR:
return "ir"
elif self == CalculationRate.CONTROL:
return "kr"
elif self == CalculationRate.AUDIO:
return "ar"
return "new"
class DoneAction(IntEnumeration):
"""
An enumeration of ``scsynth`` UGen "done" actions.
::
>>> import supriya.synthdefs
>>> supriya.DoneAction(2)
DoneAction.FREE_SYNTH
::
>>> supriya.DoneAction.from_expr("pause synth")
DoneAction.PAUSE_SYNTH
"""
### CLASS VARIABLES ###
NOTHING = 0
PAUSE_SYNTH = 1
FREE_SYNTH = 2
FREE_SYNTH_AND_PRECEDING_NODE = 3
FREE_SYNTH_AND_FOLLOWING_NODE = 4
FREE_SYNTH_AND_FREEALL_PRECEDING_NODE = 5
FREE_SYNTH_AND_FREEALL_FOLLOWING_NODE = 6
FREE_SYNTH_AND_ALL_PRECEDING_NODES_IN_GROUP = 7
FREE_SYNTH_AND_ALL_FOLLOWING_NODES_IN_GROUP = 8
FREE_SYNTH_AND_PAUSE_PRECEDING_NODE = 9
FREE_SYNTH_AND_PAUSE_FOLLOWING_NODE = 10
FREE_SYNTH_AND_DEEPFREE_PRECEDING_NODE = 11
FREE_SYNTH_AND_DEEPFREE_FOLLOWING_NODE = 12
FREE_SYNTH_AND_ALL_SIBLING_NODES = 13
FREE_SYNTH_AND_ENCLOSING_GROUP = 14
class EnvelopeShape(IntEnumeration):
### CLASS VARIABLES ###
CUBED = 7
CUSTOM = 5
EXPONENTIAL = 2
LINEAR = 1
SINE = 3
SQUARED = 6
STEP = 0
WELCH = 4
class HeaderFormat(IntEnumeration):
"""
An enumeration of soundfile header formats.
::
>>> supriya.HeaderFormat.AIFF
HeaderFormat.AIFF
::
>>> supriya.HeaderFormat.from_expr("wav")
HeaderFormat.WAV
::
>>> header_format = supriya.HeaderFormat.from_expr("wav")
>>> header_format.name.lower()
'wav'
"""
### CLASS VARIABLES ###
AIFF = 0
IRCAM = 1
NEXT = 2
RAW = 3
WAV = 4
class NodeAction(IntEnumeration):
### CLASS VARIABLES ###
NODE_CREATED = 0
NODE_REMOVED = 1
NODE_ACTIVATED = 2
NODE_DEACTIVATED = 3
NODE_MOVED = 4
NODE_QUERIED = 5
### PUBLIC METHODS ###
@classmethod
def from_address(cls, address):
addresses = {
"/n_end": cls.NODE_REMOVED,
"/n_go": cls.NODE_CREATED,
"/n_info": cls.NODE_QUERIED,
"/n_move": cls.NODE_MOVED,
"/n_off": cls.NODE_DEACTIVATED,
"/n_on": cls.NODE_ACTIVATED,
}
action = addresses[address]
return action
class ParameterRate(IntEnumeration):
"""
An enumeration of synthdef control rates.
"""
### CLASS VARIABLES ###
AUDIO = 2
CONTROL = 3
SCALAR = 0
TRIGGER = 1
class RequestId(IntEnumeration):
"""
An enumeration of scsynth request ids.
"""
### CLASS VARIABLES ###
BUFFER_ALLOCATE = 28
BUFFER_ALLOCATE_READ = 29
BUFFER_ALLOCATE_READ_CHANNEL = 54
BUFFER_CLOSE = 33
BUFFER_FILL = 37
BUFFER_FREE = 32
BUFFER_GENERATE = 38
BUFFER_GET = 42
BUFFER_GET_CONTIGUOUS = 43
BUFFER_QUERY = 47
BUFFER_READ = 30
BUFFER_READ_CHANNEL = 55
BUFFER_SET = 35
BUFFER_SET_CONTIGUOUS = 36
BUFFER_WRITE = 31
BUFFER_ZERO = 34
CLEAR_SCHEDULE = 51
COMMAND = 4
CONTROL_BUS_FILL = 27
CONTROL_BUS_GET = 40
CONTROL_BUS_GET_CONTIGUOUS = 41
CONTROL_BUS_SET = 25
CONTROL_BUS_SET_CONTIGUOUS = 26
DUMP_OSC = 39
ERROR = 58
GROUP_DEEP_FREE = 50
GROUP_DUMP_TREE = 56
GROUP_FREE_ALL = 24
GROUP_HEAD = 22
GROUP_NEW = 21
GROUP_QUERY_TREE = 57
GROUP_TAIL = 23
NODE_AFTER = 19
NODE_BEFORE = 18
NODE_COMMAND = 13
NODE_FILL = 17
NODE_FREE = 11
NODE_MAP_TO_CONTROL_BUS = 14
NODE_MAP_TO_AUDIO_BUS = 60
NODE_MAP_TO_AUDIO_BUS_CONTIGUOUS = 61
NODE_MAP_TO_CONTROL_BUS_CONTIGUOUS = 48
NODE_ORDER = 62
NODE_QUERY = 46
NODE_RUN = 12
NODE_SET = 15
NODE_SET_CONTIGUOUS = 16
NODE_TRACE = 10
NOTHING = 0
NOTIFY = 1
PARALLEL_GROUP_NEW = 63
QUIT = 3
STATUS = 2
SYNC = 52
SYNTHDEF_FREE = 53
SYNTHDEF_FREE_ALL = 8
SYNTHDEF_LOAD = 6
SYNTHDEF_LOAD_DIR = 7
SYNTHDEF_RECEIVE = 5
SYNTH_GET = 44
SYNTH_GET_CONTIGUOUS = 45
SYNTH_NEW = 9
SYNTH_NEWARGS = 59
SYNTH_NOID = 49
SYNTH_QUERY = 65
UGEN_COMMAND = 20
VERSION = 64
@property
def request_name(self):
return RequestName.from_expr(self.name)
class RequestName(StrictEnumeration):
"""
An enumeration of scsynth request names.
"""
### CLASS VARIABLES ###
BUFFER_ALLOCATE = "/b_alloc"
BUFFER_ALLOCATE_READ = "/b_allocRead"
BUFFER_ALLOCATE_READ_CHANNEL = "/b_allocReadChannel"
BUFFER_CLOSE = "/b_close"
BUFFER_FILL = "/b_fill"
BUFFER_FREE = "/b_free"
BUFFER_GENERATE = "/b_gen"
BUFFER_GET = "/b_get"
BUFFER_GET_CONTIGUOUS = "/b_getn"
BUFFER_QUERY = "/b_query"
BUFFER_READ = "/b_read"
BUFFER_READ_CHANNEL = "/b_readChannel"
BUFFER_SET = "/b_set"
BUFFER_SET_CONTIGUOUS = "/b_setn"
BUFFER_WRITE = "/b_write"
BUFFER_ZERO = "/b_zero"
CLEAR_SCHEDULE = "/clearSched"
COMMAND = "/cmd"
CONTROL_BUS_FILL = "/c_fill"
CONTROL_BUS_GET = "/c_get"
CONTROL_BUS_GET_CONTIGUOUS = "/c_getn"
CONTROL_BUS_SET = "/c_set"
CONTROL_BUS_SET_CONTIGUOUS = "/c_setn"
DUMP_OSC = "/dumpOSC"
ERROR = "/error"
GROUP_DEEP_FREE = "/g_deepFree"
GROUP_DUMP_TREE = "/g_dumpTree"
GROUP_FREE_ALL = "/g_freeAll"
GROUP_HEAD = "/g_head"
GROUP_NEW = "/g_new"
GROUP_QUERY_TREE = "/g_queryTree"
GROUP_TAIL = "/g_tail"
NODE_AFTER = "/n_after"
NODE_BEFORE = "/n_before"
# NODE_COMMAND = None
NODE_FILL = "/n_fill"
NODE_FREE = "/n_free"
NODE_MAP_TO_AUDIO_BUS = "/n_mapa"
NODE_MAP_TO_AUDIO_BUS_CONTIGUOUS = "/n_mapan"
NODE_MAP_TO_CONTROL_BUS = "/n_map"
NODE_MAP_TO_CONTROL_BUS_CONTIGUOUS = "/n_mapn"
NODE_ORDER = "/n_order"
NODE_QUERY = "/n_query"
NODE_RUN = "/n_run"
NODE_SET = "/n_set"
NODE_SET_CONTIGUOUS = "/n_setn"
NODE_TRACE = "/n_trace"
# NOTHING = None
NOTIFY = "/notify"
PARALLEL_GROUP_NEW = "/p_new"
QUIT = "/quit"
STATUS = "/status"
SYNC = "/sync"
SYNTHDEF_FREE = "/d_free"
# SYNTHDEF_FREE_ALL = None
SYNTHDEF_LOAD = "/d_load"
SYNTHDEF_LOAD_DIR = "/d_loadDir"
SYNTHDEF_RECEIVE = "/d_recv"
SYNTH_GET = "/s_get"
SYNTH_GET_CONTIGUOUS = "/s_getn"
SYNTH_NEW = "/s_new"
SYNTH_QUERY = "/s_query"
# SYNTH_NEWARGS = None
SYNTH_NOID = "/s_noid"
UGEN_COMMAND = "/u_cmd"
VERSION = "/version"
### PUBLIC PROPERTIES ###
@property
def request_id(self):
return RequestId.from_expr(self.name)
class SampleFormat(IntEnumeration):
"""
An enumeration of soundfile sample formats.
::
>>> supriya.SampleFormat.INT24
SampleFormat.INT24
::
>>> supriya.SampleFormat.from_expr("float")
SampleFormat.FLOAT
::
>>> sample_format = supriya.SampleFormat.INT24
>>> sample_format.name.lower()
'int24'
"""
### CLASS VARIABLES ###
INT24 = 0
ALAW = 1
DOUBLE = 2
FLOAT = 3
INT8 = 4
INT16 = 5
INT32 = 6
MULAW = 7
class SignalRange(IntEnumeration):
"""
An enumeration of scsynth UGen signal ranges.
::
>>> supriya.SignalRange.UNIPOLAR
SignalRange.UNIPOLAR
::
>>> supriya.SignalRange.from_expr("bipolar")
SignalRange.BIPOLAR
"""
### CLASS VARIABLES ###
UNIPOLAR = 0
BIPOLAR = 1
class UnaryOperator(IntEnumeration):
### CLASS VARIABLES ###
ABSOLUTE_VALUE = 5
AMPLITUDE_TO_DB = 22
ARCCOS = 32
ARCSIN = 31
ARCTAN = 33
AS_FLOAT = 6
AS_INT = 7
BILINRAND = 40
BIT_NOT = 4
CEILING = 8
COIN = 44
COS = 29
COSH = 35
CUBED = 13
DB_TO_AMPLITUDE = 21
DIGIT_VALUE = 45
DISTORT = 42
EXPONENTIAL = 15
FLOOR = 9
FRACTIONAL_PART = 10
HZ_TO_MIDI = 18
HZ_TO_OCTAVE = 24
HANNING_WINDOW = 49
IS_NIL = 2
LINRAND = 39
LOG = 25
LOG10 = 27
LOG2 = 26
MIDI_TO_HZ = 17
SEMITONES_TO_RATIO = 19
NEGATIVE = 0
NOT = 1
NOT_NIL = 3
OCTAVE_TO_HZ = 23
RAMP = 52
RAND = 37
RAND2 = 38
RATIO_TO_SEMITONES = 20
RECIPROCAL = 16
RECTANGLE_WINDOW = 48
S_CURVE = 53
SIGN = 11
SILENCE = 46
SIN = 28
SINH = 34
SOFTCLIP = 43
SQUARE_ROOT = 14
SQUARED = 12
SUM3RAND = 41
TAN = 30
TANH = 36
THRU = 47
TRIANGLE_WINDOW = 51
WELCH_WINDOW = 50
class Unit(IntEnumeration):
### CLASS VARIABLES ###
UNDEFINED = 0
DECIBELS = 1
AMPLITUDE = 2
SECONDS = 3
MILLISECONDS = 4
HERTZ = 5
SEMITONES = 6
| mit | 8,999,538,610,394,238,000 | 20.803109 | 87 | 0.570501 | false | 3.079024 | false | false | false |
eldie1984/Scripts | MPF/pentaho/daemon_barrio.py | 1 | 2521 | #coding: utf-8
import logging
import time
from sys import argv,exit,path
import threading
from os import environ
path.insert(0, environ['SCRIPTS_HOME'])
path.insert(1, environ['SCRIPTS_HOME'][:-8])
from commons import *
logging.basicConfig(format=FORMAT,level=logging.INFO)
from daemon import runner
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty'
self.stderr_path = '/dev/tty'
self.pidfile_path = '/tmp/mydaemon.pid'
self.pidfile_timeout = 5
def run(self):
logging.debug("Seteo variables")
threads = list()
logging.info("--------- Inicio del Script ----------------")
commitQuery("""update ft_modalidad set cod_bahra=null where cod_bahra=0;""")
datosSinCorregirModalidad="""select id_mod_ori,longitud,latitud from ft_modalidad
where cod_bahra is null
and longitud is not null
order by id_mod_ori asc
limit 500
"""
while True:
threads = list()
rows = executeQuery(datosSinCorregirModalidad)
barrio=""
print len(rows)
cantidad=len(rows)/10
for rows_sub in [rows[x:x+cantidad] for x in xrange(0, len(rows), cantidad)]:
print len(rows_sub)
t = threading.Thread(target=Modalidad, args=(rows_sub,))
threads.append(t)
t.start()
for t in threads:
t.join()
if cantidad < 500 :
time.sleep(10000)
def Modalidad(rows):
logging.info("Thread %s iniciado" %threading.currentThread().getName())
query=""
for row in rows:
try:
barrio=getBarrio(row[2],row[1])
except Exception,e:
logging.error(str(e))
barrio=0
if barrio == 0:
try:
barrio=getBarrio(row[2],row[1],8)
except Exception , e:
logging.error(str(e))
query=query+"""update ft_modalidad set
cod_bahra=%s
where id_mod_ori= '%s'; \n""" % (barrio,row[0])
commitQuery(query)
logging.debug("Se finalizo la carga en la base de modalidad")
logging.info("Thread %s finalizado" %threading.currentThread().getName())
#####################################
# Consultas a la base de datos #
#####################################
app = App()
daemon_runner = runner.DaemonRunner(app)
daemon_runner.do_action() | gpl-2.0 | -7,568,935,834,134,904,000 | 28.670588 | 89 | 0.550179 | false | 3.601429 | false | false | false |
jualjiman/knowledge-base | src/knowledge_base/utils/signals.py | 1 | 1415 | # -*- coding: utf-8 -*-
from knowledge_base.utils.decorators import skip_signal
from knowledge_base.utils.string_representations import make_slug
from knowledge_base.utils.thumbnails import make_thumbnail
@skip_signal()
def generate_slug(sender, instance, created, **kwargs):
"""
Generates a slug for every given instance.
"""
instance.slug = make_slug(instance, 'name')
instance.skip_signal = True
instance.save()
@skip_signal()
def generate_thumbnail(sender, instance, created, *args, **kwargs):
"""
Generates a thumbnail, with the given values that should be configurated in
thumbnail_settings property of the desired model.
the format of this settings should be as follows (for example):
@property
def thumbnail_settings(self)
return {
"dimension": "100x100",
"original_field": "image",
"thumbnail_field": "thumbnail"
}
"""
thumbnail_settings = instance.thumbnail_settings
original_field = getattr(
instance,
thumbnail_settings.get('original_field')
)
if original_field:
make_thumbnail(
instance,
thumbnail_settings.get('original_field'),
thumbnail_settings.get('thumbnail_field'),
thumbnail_settings.get('dimension')
)
instance.skip_signal = True
instance.save()
del instance.skip_signal
| apache-2.0 | 8,079,996,806,884,068,000 | 26.745098 | 79 | 0.650177 | false | 4.340491 | false | false | false |
insiderr/insiderr-app | app/screens/welcome.py | 1 | 5622 | from kivy.logger import Logger
from screens import InteractiveScreen
from config import tutorial_path
from kivy.properties import ObjectProperty, OptionProperty, BooleanProperty, StringProperty, NumericProperty
from kivy.uix.image import Image
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.label import Label
from os import listdir
from os.path import join
from authentication import user_authenticated
from widgets.linkedin import LinkedIn
from kivy.utils import platform
from theme import anonymous_nick
class TutorialImage(FloatLayout):
source = StringProperty()
class TutorialProgressImage(Image):
status = OptionProperty('dark', options=('dark', 'light'))
class TutorialSkipButton(ButtonBehavior, Label):
hide = BooleanProperty(False)
class HiddenButton(ButtonBehavior, Label):
__events__ = ('on_hidden_press',)
hold_threshold = NumericProperty(10.)
def on_hidden_press(self):
pass
def on_touch_down(self, touch):
if super(HiddenButton, self).on_touch_down(touch):
from time import time
self.ts = time()
def on_touch_up(self, touch):
if super(HiddenButton, self).on_touch_up(touch):
if platform is not 'android' and touch.is_double_tap:
self.dispatch('on_hidden_press')
else:
from time import time
self.ts = time()-self.ts
if self.ts > self.hold_threshold:
self.dispatch('on_hidden_press')
class WelcomeScreen(InteractiveScreen):
__events__ = ('on_complete',)
carousel = ObjectProperty()
progress_indicator = ObjectProperty()
skip_button = ObjectProperty()
def set_index(self, index):
if self.progress_indicator:
pis = list(reversed(self.progress_indicator.children))
for c in pis[:index + 1]:
c.status = 'dark'
for c in pis[index + 1:]:
c.status = 'light'
self.update_skip_button(index=index)
def update_skip_button(self, index=None):
index = index or self.carousel.index
self.skip_button.hide = (index == len(self.carousel.slides) - 1)
if self.skip_button.hide:
from modules.core.android_utils import LogTestFairy
LogTestFairy('Login Screen')
def _linkedin_login_completed(self, *largs):
user_profile = largs[1] if len(largs) >1 else None
if user_profile:
from config import linkedin_ds
industry = user_profile.get('industry','unknown')
expertise = 'unknown'
if user_profile.get('skills',None):
try:
skills = user_profile.get('skills').get('values', None)
expertise = skills[0]['skill']['name']
except:
print 'Error parsing linkedin skills -- %s' % user_profile
company = 'unknown'
position = 'unknown'
if user_profile.get('threeCurrentPositions',None):
try:
positions = user_profile.get('threeCurrentPositions').get('values', None)
company = positions[0]['company']['name']
position = positions[0]['title']
except:
print 'Error parsing linkedin company/position -- %s' % user_profile
def update(ds):
ds.update({
'anonymous': anonymous_nick,
'industry': industry,
'company': company,
'position': position,
'expertise': expertise
})
linkedin_ds.update(update)
self.dispatch('on_complete')
def on_pre_enter(self):
from modules.core.android_utils import LogTestFairy
LogTestFairy('Tutorial')
if self.carousel:
self.populate()
else:
self.bind(
carousel=self._populate_when_ready,
progress_indicator=self._populate_when_ready)
def _populate_when_ready(self, *largs):
if self.carousel and self.progress_indicator:
self.populate()
def populate(self):
if not self.carousel.slides:
self.populate_slides()
self.populate_progress()
def populate_slides(self):
for file in sorted(listdir(tutorial_path)):
self.carousel.add_widget(
TutorialImage(
source=join(tutorial_path, file)))
if not user_authenticated():
linkedin = LinkedIn()
linkedin.bind(on_complete=self._linkedin_login_completed)
self.carousel.add_widget(linkedin)
self.update_skip_button()
def populate_progress(self):
first = True
for c in self.carousel.slides:
self.progress_indicator.add_widget(
TutorialProgressImage(status='dark' if first else 'light'))
first = False
def on_leave(self, *args):
# Note: a bug in kivy will cause this to throw an index exception
if self.carousel:
self.carousel.clear_widgets()
def skip_to_last(self):
try:
self.carousel.load_slide(self.carousel.slides[-1])
self.set_index(len(self.carousel.slides) - 1)
except Exception as ex:
pass
def on_complete(self):
# store the login keys only when we complete the linkedin authentication
from utilities.auth_store import store_keys
store_keys()
| gpl-3.0 | -7,147,254,144,436,556,000 | 34.1375 | 108 | 0.59534 | false | 4.149077 | false | false | false |
flightcom/freqtrade | freqtrade/fiat_convert.py | 1 | 6182 | import logging
import time
from pymarketcap import Pymarketcap
logger = logging.getLogger(__name__)
class CryptoFiat():
# Constants
CACHE_DURATION = 6 * 60 * 60 # 6 hours
def __init__(self, crypto_symbol: str, fiat_symbol: str, price: float) -> None:
"""
Create an object that will contains the price for a crypto-currency in fiat
:param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
:param fiat_symbol: FIAT currency you want to convert to (e.g USD)
:param price: Price in FIAT
"""
# Public attributes
self.crypto_symbol = None
self.fiat_symbol = None
self.price = 0.0
# Private attributes
self._expiration = 0
self.crypto_symbol = crypto_symbol.upper()
self.fiat_symbol = fiat_symbol.upper()
self.set_price(price=price)
def set_price(self, price: float) -> None:
"""
Set the price of the Crypto-currency in FIAT and set the expiration time
:param price: Price of the current Crypto currency in the fiat
:return: None
"""
self.price = price
self._expiration = time.time() + self.CACHE_DURATION
def is_expired(self) -> bool:
"""
Return if the current price is still valid or needs to be refreshed
:return: bool, true the price is expired and needs to be refreshed, false the price is
still valid
"""
return self._expiration - time.time() <= 0
class CryptoToFiatConverter(object):
__instance = None
_coinmarketcap = None
# Constants
SUPPORTED_FIAT = [
"AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK",
"EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY",
"KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN",
"RUB", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR", "USD"
]
def __new__(cls):
if CryptoToFiatConverter.__instance is None:
CryptoToFiatConverter.__instance = object.__new__(cls)
try:
CryptoToFiatConverter._coinmarketcap = Pymarketcap()
except BaseException:
CryptoToFiatConverter._coinmarketcap = None
return CryptoToFiatConverter.__instance
def __init__(self) -> None:
self._pairs = []
def convert_amount(self, crypto_amount: float, crypto_symbol: str, fiat_symbol: str) -> float:
"""
Convert an amount of crypto-currency to fiat
:param crypto_amount: amount of crypto-currency to convert
:param crypto_symbol: crypto-currency used
:param fiat_symbol: fiat to convert to
:return: float, value in fiat of the crypto-currency amount
"""
price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol)
return float(crypto_amount) * float(price)
def get_price(self, crypto_symbol: str, fiat_symbol: str) -> float:
"""
Return the price of the Crypto-currency in Fiat
:param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
:param fiat_symbol: FIAT currency you want to convert to (e.g USD)
:return: Price in FIAT
"""
crypto_symbol = crypto_symbol.upper()
fiat_symbol = fiat_symbol.upper()
# Check if the fiat convertion you want is supported
if not self._is_supported_fiat(fiat=fiat_symbol):
raise ValueError('The fiat {} is not supported.'.format(fiat_symbol))
# Get the pair that interest us and return the price in fiat
for pair in self._pairs:
if pair.crypto_symbol == crypto_symbol and pair.fiat_symbol == fiat_symbol:
# If the price is expired we refresh it, avoid to call the API all the time
if pair.is_expired():
pair.set_price(
price=self._find_price(
crypto_symbol=pair.crypto_symbol,
fiat_symbol=pair.fiat_symbol
)
)
# return the last price we have for this pair
return pair.price
# The pair does not exist, so we create it and return the price
return self._add_pair(
crypto_symbol=crypto_symbol,
fiat_symbol=fiat_symbol,
price=self._find_price(
crypto_symbol=crypto_symbol,
fiat_symbol=fiat_symbol
)
)
def _add_pair(self, crypto_symbol: str, fiat_symbol: str, price: float) -> float:
"""
:param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
:param fiat_symbol: FIAT currency you want to convert to (e.g USD)
:return: price in FIAT
"""
self._pairs.append(
CryptoFiat(
crypto_symbol=crypto_symbol,
fiat_symbol=fiat_symbol,
price=price
)
)
return price
def _is_supported_fiat(self, fiat: str) -> bool:
"""
Check if the FIAT your want to convert to is supported
:param fiat: FIAT to check (e.g USD)
:return: bool, True supported, False not supported
"""
fiat = fiat.upper()
return fiat in self.SUPPORTED_FIAT
def _find_price(self, crypto_symbol: str, fiat_symbol: str) -> float:
"""
Call CoinMarketCap API to retrieve the price in the FIAT
:param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
:param fiat_symbol: FIAT currency you want to convert to (e.g USD)
:return: float, price of the crypto-currency in Fiat
"""
# Check if the fiat convertion you want is supported
if not self._is_supported_fiat(fiat=fiat_symbol):
raise ValueError('The fiat {} is not supported.'.format(fiat_symbol))
try:
return float(
self._coinmarketcap.ticker(
currency=crypto_symbol,
convert=fiat_symbol
)['price_' + fiat_symbol.lower()]
)
except BaseException:
return 0.0
| gpl-3.0 | -8,155,509,983,424,285,000 | 35.364706 | 98 | 0.574571 | false | 3.935073 | false | false | false |
CodigoSur/cyclope | cyclope/apps/articles/migrations/0016_remove_picture_article.py | 2 | 17075 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
# depends_on = (
# ("cyclope", "0015_fk_2_M2M_pictures_data"),
# )
def forwards(self, orm):
# Deleting field 'Article.picture'
db.delete_column('articles_article', 'picture_id')
def backwards(self, orm):
# Adding field 'Article.picture'
db.add_column('articles_article', 'picture',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='picture', null=True, to=orm['medialibrary.Picture'], on_delete=models.SET_NULL, blank=True),
keep_default=False)
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'articles.article': {
'Meta': {'ordering': "('-creation_date', 'name')", 'object_name': 'Article'},
'allow_comments': ('django.db.models.fields.CharField', [], {'default': "'SITE'", 'max_length': '4'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Author']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'pictures': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'pictures'", 'symmetrical': 'False', 'to': "orm['medialibrary.Picture']"}),
'pretitle': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_author': ('django.db.models.fields.CharField', [], {'default': "'SITE'", 'max_length': '6'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Source']", 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'collections.categorization': {
'Meta': {'ordering': "('order', '-id')", 'object_name': 'Categorization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categorizations'", 'to': "orm['collections.Category']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'collections.category': {
'Meta': {'unique_together': "(('collection', 'name'),)", 'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': "orm['collections.Collection']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '250', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['collections.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'collections.collection': {
'Meta': {'object_name': 'Collection'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'default_list_view': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'navigation_root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'blank': 'True'}),
'view_options': ('jsonfield.fields.JSONField', [], {'default': "'{}'"}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cyclope.author': {
'Meta': {'ordering': "['name']", 'object_name': 'Author'},
'content_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['contenttypes.ContentType']", 'db_index': 'True', 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'})
},
'cyclope.relatedcontent': {
'Meta': {'ordering': "['order']", 'object_name': 'RelatedContent'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'other_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'other_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_rt'", 'to': "orm['contenttypes.ContentType']"}),
'self_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'self_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_contents_lt'", 'to': "orm['contenttypes.ContentType']"})
},
'cyclope.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'db_index': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'medialibrary.picture': {
'Meta': {'object_name': 'Picture'},
'allow_comments': ('django.db.models.fields.CharField', [], {'default': "'SITE'", 'max_length': '4'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Author']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '100'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_author': ('django.db.models.fields.CharField', [], {'default': "'SITE'", 'max_length': '6'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cyclope.Source']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['articles']
| gpl-3.0 | -768,111,009,741,101,000 | 84.80402 | 204 | 0.553382 | false | 3.666524 | false | false | false |
ytsvetkov/TuringMachine | parsing/input_parser.py | 1 | 1622 | import sys
import re
class SyntacticError(Exception):
def __init__(self, message):
self.message = message
user_tape_regex = r'^\s*\(.*,.,.*\)\s*$'
user_states_regex = r'^([0-9]*,)*[0-9]+$'
user_initial_regex = r'^[0-9]+$'
user_rule_regex = r'^\([0-9]{1,},.,.,.,[0-9]{1,},(Left|None|Right)\)$'
def parse_tape_from_terminal(input_tape):
tape = re.match(user_tape_regex, input_tape.strip('\n '))
if tape is None:
raise SyntacticError('There is syntactic error with this tape !')
else:
return tape.group().strip(')(').split(',')
def parse_states_from_terminal(input_states):
states = re.match(user_states_regex, input_states.strip('\n'))
if states is None:
raise SyntacticError('There is syntactic error with these states !')
else:
machine_states = set()
for state in states.group().strip('}').split(','):
machine_states.add(int(state))
return machine_states
def parse_initial_from_terminal(input_initial_state):
initial = re.match(user_initial_regex, input_initial_state.strip('\n'))
if initial is None:
raise SyntacticError('There is syntactic error with the'
'initial state !')
else:
return int(initial.group())
def parse_rule_from_terminal(input_rule):
input_rule = re.match(user_rule_regex, input_rule)
if input_rule is None:
raise SyntacticError('There is syntactic error with this rule !')
else:
rule = input_rule.group().strip('\n)(').split(',')
rule[0] = int(rule[0])
rule[4] = int(rule[4])
return rule
| gpl-3.0 | 3,111,628,886,738,354,700 | 29.603774 | 76 | 0.606658 | false | 3.290061 | false | false | false |
macosforge/ccs-calendarserver | twistedcaldav/test/test_stdconfig.py | 1 | 6138 | # -*- coding: utf-8 -*-
##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from cStringIO import StringIO
from twext.python.filepath import CachingFilePath as FilePath
from twisted.trial.unittest import TestCase
from twistedcaldav.config import Config, ConfigDict
from twistedcaldav.stdconfig import NoUnicodePlistParser, PListConfigProvider,\
_updateDataStore, _updateMultiProcess, _updateUtilityLog
import twistedcaldav.stdconfig
import sys
import os
nonASCIIValue = "→←"
nonASCIIPlist = "<plist version='1.0'><string>%s</string></plist>" % (
nonASCIIValue,
)
nonASCIIConfigPList = """
<plist version="1.0">
<dict>
<key>DataRoot</key>
<string>%s</string>
</dict>
</plist>
""" % (nonASCIIValue,)
class ConfigParsingTests(TestCase):
"""
Tests to verify the behavior of the configuration parser.
"""
def test_noUnicodePListParser(self):
"""
L{NoUnicodePlistParser.parse} retrieves non-ASCII property list values
as (UTF-8 encoded) 'str' objects, so that a single type is consistently
used regardless of the input data.
"""
parser = NoUnicodePlistParser()
self.assertEquals(parser.parse(StringIO(nonASCIIPlist)),
nonASCIIValue)
def test_parseNonASCIIConfig(self):
"""
Non-ASCII <string>s found as part of a configuration file will be
retrieved as UTF-8 encoded 'str' objects, as parsed by
L{NoUnicodePlistParser}.
"""
cfg = Config(PListConfigProvider({"DataRoot": ""}))
tempfile = FilePath(self.mktemp())
tempfile.setContent(nonASCIIConfigPList)
cfg.load(tempfile.path)
self.assertEquals(cfg.DataRoot, nonASCIIValue)
def test_relativeDefaultPaths(self):
"""
The paths specified in the default configuration should be interpreted
as relative to the paths specified in the configuration file.
"""
cfg = Config(PListConfigProvider(
{"AccountingLogRoot": "some-path",
"LogRoot": "should-be-ignored"}))
cfg.addPostUpdateHooks([_updateDataStore])
tempfile = FilePath(self.mktemp())
tempfile.setContent("<plist version='1.0'><dict>"
"<key>LogRoot</key><string>/some/root</string>"
"</dict></plist>")
cfg.load(tempfile.path)
self.assertEquals(cfg.AccountingLogRoot, "/some/root/some-path")
tempfile.setContent("<plist version='1.0'><dict>"
"<key>LogRoot</key><string>/other/root</string>"
"</dict></plist>")
cfg.load(tempfile.path)
self.assertEquals(cfg.AccountingLogRoot, "/other/root/some-path")
def test_includes(self):
plist1 = """
<plist version="1.0">
<dict>
<key>ServerRoot</key>
<string>/root</string>
<key>DocumentRoot</key>
<string>defaultdoc</string>
<key>DataRoot</key>
<string>defaultdata</string>
<key>ConfigRoot</key>
<string>defaultconfig</string>
<key>LogRoot</key>
<string>defaultlog</string>
<key>RunRoot</key>
<string>defaultrun</string>
<key>Includes</key>
<array>
<string>%s</string>
</array>
</dict>
</plist>
"""
plist2 = """
<plist version="1.0">
<dict>
<key>DataRoot</key>
<string>overridedata</string>
</dict>
</plist>
"""
tempfile2 = FilePath(self.mktemp())
tempfile2.setContent(plist2)
tempfile1 = FilePath(self.mktemp())
tempfile1.setContent(plist1 % (tempfile2.path,))
cfg = Config(PListConfigProvider({
"ServerRoot": "",
"DocumentRoot": "",
"DataRoot": "",
"ConfigRoot": "",
"LogRoot": "",
"RunRoot": "",
"Includes": [],
}))
cfg.addPostUpdateHooks([_updateDataStore])
cfg.load(tempfile1.path)
self.assertEquals(cfg.DocumentRoot, "/root/overridedata/defaultdoc")
self.assertEquals(cfg.DataRoot, "/root/overridedata")
def test_updateDataStore(self):
configDict = {
"ServerRoot": "/a/b/c/",
}
_updateDataStore(configDict)
self.assertEquals(configDict["ServerRoot"], "/a/b/c")
def test_updateMultiProcess(self):
def stubProcessCount(*args):
return 3
self.patch(twistedcaldav.stdconfig, "computeProcessCount", stubProcessCount)
configDict = ConfigDict({
"MultiProcess": {
"ProcessCount": 0,
"MinProcessCount": 2,
"PerCPU": 1,
"PerGB": 1,
},
"Postgres": {
"ExtraConnections": 5,
"BuffersToConnectionsRatio": 1.5,
},
"SharedConnectionPool": False,
"MaxDBConnectionsPerPool": 10,
})
_updateMultiProcess(configDict)
self.assertEquals(45, configDict.Postgres.MaxConnections)
self.assertEquals(67, configDict.Postgres.SharedBuffers)
def test_updateUtilityLog(self):
configDict = {
"ServerRoot": "/a/b/c/",
"LogRoot": "Logs",
"UtilityLogFile": "util.txt",
}
_updateUtilityLog(configDict)
self.assertEquals(configDict["UtilityLogFile"], "{}.log".format(os.path.basename(sys.argv[0])))
_updateDataStore(configDict)
_updateUtilityLog(configDict)
self.assertEquals(configDict["UtilityLogFile"], "/a/b/c/Logs/{}.log".format(os.path.basename(sys.argv[0])))
| apache-2.0 | 1,329,087,385,722,438,100 | 31.802139 | 115 | 0.614607 | false | 3.957419 | true | false | false |
longmazhanfeng/interface_web | interface_platform/rfit/HttpLibrary.py | 1 | 18018 | # -*- coding: UTF-8 -*-
import urllib2
import cookielib
import urllib
import httplib2
import poster
import json
import sys
import base64
import requests
reload(sys)
sys.setdefaultencoding('utf-8')
class HttpLibrary(object):
"""Http Client"""
global host
# host = 'https://my.qiye.yixin.im'
host = 'https://super.qiye.yixin.im'
# host = 'http://10.164.96.78'
global port
port = "0"
# port = "8184"
global ticket_path
ticket_path = "http://10.164.96.78:8184/app/system/getAppAuthTicketFromWeb"
def __init__(self):
pass
def get_cookie(self, username, password):
"""Get cookie from username and password.
Examples:
| Get Cookie | username | password |
"""
print 'start to getcookie'
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
postdata = urllib.urlencode({'email': username, 'password': password})
# url = self.__checkport() + '/login/in' + '?' + postdata
url = self.__checkport() + '/login/in'
print 'HttpPost url is ' + url
try:
# response = opener.open(url)
response = opener.open(url, postdata)
except urllib2.URLError as e:
if hasattr(e, 'reason'):
print 'getcookie failed!'
print 'reason is ' + e.reason
elif hasattr(e, 'code'):
print 'getcookie failed!'
print 'reson is ' + e.reason + ',error code is '
else:
print 'getcookie failed! the error is not URLError and HTTPError'
else:
content = response.read()
print 'get cookie sussessful,getcookie response is ' + str(content).decode('utf-8')
return response.info()['Set-Cookie']
def get_admin_cookie(self, username, password):
"""Get admin cookie from username and password.
Examples:
| Get Admin Cookie | username | password |
"""
print 'start to getadmincookie'
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
postdata = urllib.urlencode({'account': username, 'password': password})
url = 'https://super.qiye.yixin.im' + '/checkLogin?'
print 'HttpPost url is ' + url
try:
response = opener.open(url, postdata)
except urllib2.URLError as e:
if hasattr(e, 'reason'):
print 'getadmincookie failed!'
print 'reason is ' + e.reason
elif hasattr(e, 'code'):
print 'getadmincookie failed!'
print 'reson is ' + e.reason + ',error code is '
else:
print 'getadmincookie failed! the error is not URLError and HTTPError'
else:
content = response.read()
print 'get admin cookie sussessful,getcookie admin response is ' + str(content).decode('utf-8')
return response.info()['Set-Cookie']
def web_get(self, path, parameter, cookie):
"""Issues a HTTP GET request,parameter should be a python dict,this method return a string object.
Examples:
| ${res} | WEB Get | /foo/bar.do | {'foo': '1','bar': '2'} | cookie |
"""
if parameter == 'None':
url = self.__checkport() + path
else:
# url = self.__checkport() + path + '?' + str(self.__generate_url(parameter))
url = self.__checkport() + path + '?' + str(self.__encodepara(parameter))
print 'HttpGet request url is ' + url
res = urllib2.Request(url)
res.add_header('Accept', 'application/json')
res.add_header('Content-Type', 'application/x-www-form-urlencoded')
res.add_header('Cookie', cookie)
try:
response = urllib2.urlopen(res)
except urllib2.URLError as e:
if hasattr(e, 'reason'):
print 'send HttpGet failed!'
print 'reason is ' + e.reason
elif hasattr(e, 'code'):
print 'send HttpGet failed!'
print 'reason is ' + e.reason + ',error code is ' + e.code
else:
print 'send HttpGet failed! the error is not URLError and HTTPError'
else:
info = self.__replace_null(response.read())
print 'HttpGet response is ' + str(info)
return info.decode('utf-8')
def web_post(self, path, para, data, cookie, uid=''):
"""Issues a HTTP POST request,parameter should be a python dict,data is post entity, this method return a string object.
Examples:
| ${res} | WEB POST | /foo/bar.do | {'foo': '1','bar': '2'} | {"foo": {"bar": [1,2,3]}} | cookie |
| ${res} | WEB POST | /foo/bar.do | {'foo': '1','bar': '2'} | None | cookie |
"""
http = httplib2.Http()
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': cookie,
'uid': uid}
headers1 = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': cookie, 'uid': uid}
if para == 'None':
if "http" in path:
url = path
print "chenyazhi test url: ", url
else:
url = self.__checkport() + path
else:
# url = self.__checkport() + path + '?' + str(self.__generate_url(para))
url = self.__checkport() + path + '?' + str(self.__encodepara(para))
print 'HttpPost url is ' + url
try:
if data == 'None':
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
response, content = http.request(url, 'POST', headers=headers)
res_content = self.__replace_null(content)
print 'send HttpPost successful! content is ' + res_content
return res_content.decode('utf-8')
else:
if type(eval(data)) == dict:
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
response, content = http.request(url, 'POST', headers=headers1, body=json.dumps(eval(data)))
res_content = self.__replace_null(content)
print 'send HttpPost successful! content is ' + res_content
return res_content.decode('utf-8')
else:
print 'please confirm data type,data is not json'
except Exception, e:
raise e
def __generate_url(self, parameter):
"""generate url from parameter"""
parameter = eval(parameter)
para = ''
for key in parameter.keys():
para = str(para) + key + '=' + parameter.get(key) + '&'
url = para[:-1]
return url
def web_delete(self, path, parameter, data, cookie):
"""Issues a HTTP DELETE request,parameter should be a python dict,data is delete entity, this method return a string object.
Examples:
| ${res} | WEB DELETE | /foo/bar.do | {'foo': '1','bar': '2'} | {"foo": {"bar": [1,2,3]}} | cookie |
| ${res} | WEB DELETE | /foo/bar.do | None | {"foo": {"bar": [1,2,3]}} | cookie |
"""
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': cookie}
headers1 = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': cookie}
if parameter == 'None':
url = self.__checkport() + path
else:
# url = self.__checkport() + path + '?' + str(self.__generate_url(parameter))
url = self.__checkport() + path + '?' + str(self.__encodepara(parameter))
print 'HttpDelete url is ' + url
if data == 'None':
request = urllib2.Request(url, headers=headers)
else:
if type(eval(data)) == dict:
request = urllib2.Request(url, data=urllib.urlencode(data), headers=headers1)
else:
print 'please confirm data type,data is not json'
request.get_method = lambda: 'DELETE'
opener = urllib2.build_opener()
try:
# response = urllib2.urlopen(request)
response = opener.open(request)
except Exception, e:
raise e
else:
info = self.__replace_null(response.read())
print 'HttpDelete response is ' + info
return info.decode('utf-8')
def web_put(self, path, parameter, data, cookie):
"""Issues a HTTP PUT request,parameter should be a python dict,data is put entity, this method return a string object.
Examples:
| ${res} | WEB PUT | /foo/bar.do | {'foo': '1','bar': '2'} | {"foo": {"bar": [1,2,3]}} | cookie |
| ${res} | WEB PUT | /foo/bar.do | {'foo': '1','bar': '2'} | None | cookie |
"""
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': cookie}
headers1 = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': cookie}
if parameter == 'None':
url = self.__checkport() + path
else:
# url = self.__checkport() + path + '?' + str(self.__generate_url(parameter))
url = self.__checkport() + path + '?' + str(self.__encodepara(parameter))
print 'HttpPut url is ' + url
http = httplib2.Http()
try:
if data == 'None':
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
response, content = http.request(url, 'PUT', headers=headers)
elif data != 'None':
if type(eval(data)) == dict:
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
response, content = http.request(url, 'PUT', headers=headers1, body=json.dumps(eval(data)))
else:
print 'please confirm data type,data is not json'
else:
info = self.__replace_null(str(content))
print 'Send HttpPut successful,content is ' + info
return info.decode('utf-8')
except Exception, e:
raise e
def web_post_file(self, path, parameter, entity, cookie):
"""Issues a HTTP POST FILE request,url is the URL relative to the server root,parameter should be a python dict,this method return a string object.
Examples:
| ${res} | WEB POST FILE | https://b.yixin.im/addCodeConf.p | {'file':open('Resources/Material/codeConf.csv','rb'),'name':'text码活动ffd'}| cookie |
"""
if parameter == 'None':
url = self.__checkport() + path
else:
url = self.__checkport() + path + '?' + str(self.__encodepara(parameter))
opener = poster.streaminghttp.register_openers()
datagen, headers = poster.encode.multipart_encode(eval(entity))
res = urllib2.Request(url, datagen, headers)
res.add_header('Cookie', cookie)
try:
response = urllib2.urlopen(res)
except Exception, e:
raise e
else:
info = self.__replace_null(response.read())
print 'send file successful,http response is ' + info
return info.decode('utf-8')
def web_post_filebyte(self, path, para, entity):
"""this keyword is for openplatform to post file.
Examples:
| ${res} | WEB POST FILEBYTE | /cgi-bin/file/upload | {'access_token':'ACCESS_TOKEN'} | {'content':'Resources/Material/logo.jpg','type':'jpg'}
"""
if type(eval(entity)) != dict:
print 'entity must be dict'
return
else:
entitydict = eval(entity)
filename = entitydict['content']
f = open(filename, 'rb')
fbyte = f.read()
enbyte = base64.b64encode(fbyte)
entitydict['content'] = enbyte
res = self.web_post(path, para, str(entitydict), 'None')
return res
def __replace_null(self, response):
strres = json.dumps(response, ensure_ascii=False)
return eval(strres.replace('null', '\\"null\\"').replace('false', '\\"false\\"').replace('true', '\\"true\\"'))
def web_environment_config(self, h, p):
"""Set HTTP Request host and port,host and port is global variable.
host default value is https://b.yixin.im,port default value is 0.
Examples:
| WEB Environment Config| host | port |
"""
global host
global port
host = h
port = p
print 'host is ' + h
print 'port is ' + str(p)
def __checkport(self):
global host
global port
if port == "0":
url = host
else:
url = host + ':' + str(port)
return url
def __encodepara(self, para):
encodepara = urllib.urlencode(para)
return encodepara
def web_formdatapost(self, path, para, data, cookie):
http = httplib2.Http()
headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': cookie}
if para == 'None':
url = self.__checkport() + path
else:
# url = self.__checkport() + path + '?' + str(self.__generate_url(para))
url = self.__checkport() + path + '?' + str(self.__encodepara(para))
print 'HttpPost url is ' + url
try:
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
response, content = http.request(url, 'POST', headers=headers, body=data)
res_content = self.__replace_null(content)
print 'send HttpPost successful! content is ' + res_content
return res_content.decode('utf-8')
except Exception, e:
raise e
def web_get_oauth(self, my_qiye_url, cookie, appKey, uid):
'''
内部系统访问外部应用接入OAuth免登
使用时,需要开始VPN
:param my_qiye_url: 未跳转前的url,如重要通知:"https://my.qiye.yixin.im/app/manageUrl?appId=613&url=https://inotice.qiye.yixin.im/manage/index"
:param cookie: 后台普通管理员的cookie
:param appKey: 大后台查看appKey
:param uid: 后台普通管理员的uid,和获取cookie的管理员同一个
:return: 带code的url
Examples:
| Web Get Oauth| my_qiye_url | cookie | appKey | uid |
'''
global ticket_path
###获取url值###
redirect_uri = self.get_id_from_url(my_qiye_url, "url")
###获取内部Web免登票据###
# path = "http://10.164.96.78:8184/app/system/getAppAuthTicketFromWeb"
para = 'None'
data = '{"appid": "' + appKey + '"}'
res_ticket = self.web_post(ticket_path, 'None', data, cookie, uid)
st = json.loads(res_ticket).get('result').get('st')
oauth_url = "https://oauth-test.qiye.yixin.im/authorize?response_type=code&client_id=" + appKey + "&st=" + st + "&redirect_uri=" + redirect_uri
url_code = requests.get(oauth_url).url
return url_code
def get_id_from_url(self, url, id):
"""
获取url里的关键字的值.
比如 url="https://my.qiye.yixin.im/app/manageUrl?appId=613&url=https://inotice.qiye.yixin.im/manage/index"
需要获取appId的值,在id处传入参数 "appId"
"""
if "?" not in url:
print "The url is indissociable "
else:
spliturl = url.split("?")
url_body = spliturl[1].split("&")
print url_body
for r in url_body:
if id in r:
id_long = len(id)
print r[id_long + 1:]
return r[id_long + 1:]
print "There have not " + id
def get_url_cookie(self, url):
print 'start to getcookie'
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
print 'HttpPost url is ' + url
try:
response = opener.open(url)
except urllib2.URLError as e:
if hasattr(e, 'reason'):
print 'getcookie failed!'
print 'reason is ' + e.reason
elif hasattr(e, 'code'):
print 'getcookie failed!'
print 'reson is ' + e.reason + ',error code is '
else:
print 'getcookie failed! the error is not URLError and HTTPError'
else:
content = response.read()
print 'get cookie sussessful,getcookie response is ' + str(content).decode('utf-8')
return response.info()['Set-Cookie']
if __name__ == '__main__':
pass
# h = HttpLibrary()
# cookie = h.get_admin_cookie("[email protected]", "Admin123")
# print cookie
# cookie = h.get_cookie("[email protected]", "Abc123456")
#
# r = h.web_get_oauth("https://my.qiye.yixin.im/app/manageUrl?appId=613&url=https://inotice.qiye.yixin.im/manage/index", cookie, "bossnotice", "130")
#
# ###管理员登录###
# r = h.web_post("/checkLogin", "{'account':'[email protected]','password':'Abc123456'}", "None", "NTESkolibri-adminSI=1658DE8D79232165A1E7A4AD47C77A79.hzabj-kolibri-1.server.163.org-8016; Path=/; HttpOnly")
# print r
# web_get(self,path,parameter,cookie):
# print "!!!!!!!!!!!!!!!!!!!!!"
# r = h.web_get('/smsquota/getCompanyInfo','{"domain":"yixin.im"}',cookie)
# print r
# h.get_id_from_url("https://my.qiye.yixin.im/app/manageUrl?appId=613&url=https://inotice.qiye.yixin.im/manage/index", "appId")
# cookie = h.get_cookie('[email protected]','Admin123')
# h.web_post_filebyte('/cgi-bin/file/upload','{"access_token":"718ad40d0fbc4eba89621f86e0d23313"}','{"content":"Resources/Material/logo.jpg","type":"jpg"}')
| mit | -2,289,345,511,508,745,700 | 40.896471 | 208 | 0.556779 | false | 3.696492 | false | false | false |
chimeno/wagtail | wagtail/tests/migrations/0005_auto_20141008_0122.py | 3 | 1084 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0002_initial_data'),
('tests', '0004_auto_20141008_0420'),
]
operations = [
migrations.CreateModel(
name='PageChooserModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('page', models.ForeignKey(help_text=b'help text', to='wagtailcore.Page')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SnippetChooserModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('advert', models.ForeignKey(help_text=b'help text', to='tests.Advert')),
],
options={
},
bases=(models.Model,),
),
]
| bsd-3-clause | -4,484,322,285,801,456,600 | 29.971429 | 114 | 0.531365 | false | 4.388664 | false | false | false |
hankcs/HanLP | hanlp/components/parsers/parse_alg.py | 1 | 10872 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-04-02 23:20
from collections import defaultdict
from hanlp.components.parsers.chu_liu_edmonds import decode_mst
import numpy as np
class Tarjan:
"""Computes Tarjan's algorithm for finding strongly connected components (cycles) of a graph"""
def __init__(self, prediction, tokens):
"""
Parameters
----------
prediction : numpy.ndarray
a predicted dependency tree where prediction[dep_idx] = head_idx
tokens : numpy.ndarray
the tokens we care about (i.e. exclude _GO, _EOS, and _PAD)
"""
self._edges = defaultdict(set)
self._vertices = set((0,))
for dep, head in enumerate(prediction[tokens]):
self._vertices.add(dep + 1)
self._edges[head].add(dep + 1)
self._indices = {}
self._lowlinks = {}
self._onstack = defaultdict(lambda: False)
self._SCCs = []
index = 0
stack = []
for v in self.vertices:
if v not in self.indices:
self.strongconnect(v, index, stack)
# =============================================================
def strongconnect(self, v, index, stack):
"""
Args:
v:
index:
stack:
Returns:
"""
self._indices[v] = index
self._lowlinks[v] = index
index += 1
stack.append(v)
self._onstack[v] = True
for w in self.edges[v]:
if w not in self.indices:
self.strongconnect(w, index, stack)
self._lowlinks[v] = min(self._lowlinks[v], self._lowlinks[w])
elif self._onstack[w]:
self._lowlinks[v] = min(self._lowlinks[v], self._indices[w])
if self._lowlinks[v] == self._indices[v]:
self._SCCs.append(set())
while stack[-1] != v:
w = stack.pop()
self._onstack[w] = False
self._SCCs[-1].add(w)
w = stack.pop()
self._onstack[w] = False
self._SCCs[-1].add(w)
return
# ======================
@property
def edges(self):
return self._edges
@property
def vertices(self):
return self._vertices
@property
def indices(self):
return self._indices
@property
def SCCs(self):
return self._SCCs
class UnionFind(object):
def __init__(self, n) -> None:
super().__init__()
self.parent = [x for x in range(n)]
self.height = [0] * n
def find(self, x):
if self.parent[x] == x:
return x
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def unite(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.height[x] < self.height[y]:
self.parent[x] = y
else:
self.parent[y] = x
if self.height[x] == self.height[y]:
self.height[x] += 1
def same(self, x, y):
return self.find(x) == self.find(y)
def tarjan(parse_probs, length, tokens_to_keep, ensure_tree=True):
"""Adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/models/nn.py
Args:
parse_probs(NDArray): seq_len x seq_len, the probability of arcs
length(NDArray): sentence length including ROOT
tokens_to_keep(NDArray): mask matrix
ensure_tree: (Default value = True)
Returns:
"""
if ensure_tree:
parse_preds, parse_probs, tokens = unique_root(parse_probs, tokens_to_keep, length)
# remove cycles
tarjan = Tarjan(parse_preds, tokens)
for SCC in tarjan.SCCs:
if len(SCC) > 1:
dependents = set()
to_visit = set(SCC)
while len(to_visit) > 0:
node = to_visit.pop()
if not node in dependents:
dependents.add(node)
to_visit.update(tarjan.edges[node])
# The indices of the nodes that participate in the cycle
cycle = np.array(list(SCC))
# The probabilities of the current heads
old_heads = parse_preds[cycle]
old_head_probs = parse_probs[cycle, old_heads]
# Set the probability of depending on a non-head to zero
non_heads = np.array(list(dependents))
parse_probs[np.repeat(cycle, len(non_heads)), np.repeat([non_heads], len(cycle), axis=0).flatten()] = 0
# Get new potential heads and their probabilities
new_heads = np.argmax(parse_probs[cycle][:, tokens], axis=1) + 1
new_head_probs = parse_probs[cycle, new_heads] / old_head_probs
# Select the most probable change
change = np.argmax(new_head_probs)
changed_cycle = cycle[change]
old_head = old_heads[change]
new_head = new_heads[change]
# Make the change
parse_preds[changed_cycle] = new_head
tarjan.edges[new_head].add(changed_cycle)
tarjan.edges[old_head].remove(changed_cycle)
return parse_preds
else:
# block and pad heads
parse_probs = parse_probs * tokens_to_keep
parse_preds = np.argmax(parse_probs, axis=1)
return parse_preds
def chu_liu_edmonds(parse_probs, length):
tree = decode_mst(parse_probs.T, length, False)[0]
tree[0] = 0
return tree
def unique_root(parse_probs, tokens_to_keep: np.ndarray, length):
I = np.eye(len(tokens_to_keep))
# block loops and pad heads
if tokens_to_keep.ndim == 1:
tokens_to_keep = np.expand_dims(tokens_to_keep, -1)
parse_probs = parse_probs * tokens_to_keep * (1 - I)
parse_preds = np.argmax(parse_probs, axis=1)
tokens = np.arange(1, length)
roots = np.where(parse_preds[tokens] == 0)[0] + 1
# ensure at least one root
if len(roots) < 1:
# The current root probabilities
root_probs = parse_probs[tokens, 0]
# The current head probabilities
old_head_probs = parse_probs[tokens, parse_preds[tokens]]
# Get new potential root probabilities
new_root_probs = root_probs / old_head_probs
# Select the most probable root
new_root = tokens[np.argmax(new_root_probs)]
# Make the change
parse_preds[new_root] = 0
# ensure at most one root
elif len(roots) > 1:
# The probabilities of the current heads
root_probs = parse_probs[roots, 0]
# Set the probability of depending on the root zero
parse_probs[roots, 0] = 0
# Get new potential heads and their probabilities
new_heads = np.argmax(parse_probs[roots][:, tokens], axis=1) + 1
new_head_probs = parse_probs[roots, new_heads] / root_probs
# Select the most probable root
new_root = roots[np.argmin(new_head_probs)]
# Make the change
parse_preds[roots] = new_heads
parse_preds[new_root] = 0
return parse_preds, parse_probs, tokens
def dfs(graph, start, end):
fringe = [(start, [])]
while fringe:
state, path = fringe.pop()
if path and state == end:
yield path
continue
for next_state in graph[state]:
if next_state in path:
continue
fringe.append((next_state, path + [next_state]))
def mst_then_greedy(arc_scores, rel_scores, mask, root_rel_idx, rel_idx=None):
from scipy.special import softmax
from scipy.special import expit as sigmoid
length = sum(mask) + 1
mask = mask[:length]
arc_scores = arc_scores[:length, :length]
arc_pred = arc_scores > 0
arc_probs = sigmoid(arc_scores)
rel_scores = rel_scores[:length, :length, :]
rel_probs = softmax(rel_scores, -1)
if not any(arc_pred[:, 0][1:]): # no root
root = np.argmax(rel_probs[1:, 0, root_rel_idx]) + 1
arc_probs[root, 0] = 1
parse_preds, parse_probs, tokens = unique_root(arc_probs, mask, length)
root = adjust_root_score(arc_scores, parse_preds, root_rel_idx, rel_scores)
tree = chu_liu_edmonds(arc_scores, length)
if rel_idx is not None: # Unknown DEPREL label: 'ref'
rel_scores[np.arange(len(tree)), tree, rel_idx] = -float('inf')
return tree, add_secondary_arcs_by_scores(arc_scores, rel_scores, tree, root_rel_idx)
def adjust_root_score(arc_scores, parse_preds, root_rel_idx, rel_scores=None):
root = np.where(parse_preds[1:] == 0)[0] + 1
arc_scores[:, 0] = min(np.min(arc_scores), -1000)
arc_scores[root, 0] = max(np.max(arc_scores), 1000)
if rel_scores is not None:
rel_scores[:, :, root_rel_idx] = -float('inf')
rel_scores[root, 0, root_rel_idx] = float('inf')
return root
def add_secondary_arcs_by_scores(arc_scores, rel_scores, tree, root_rel_idx, arc_preds=None):
if not isinstance(tree, np.ndarray):
tree = np.array(tree)
if arc_preds is None:
arc_preds = arc_scores > 0
rel_pred = np.argmax(rel_scores, axis=-1)
return add_secondary_arcs_by_preds(arc_scores, arc_preds, rel_pred, tree, root_rel_idx)
def add_secondary_arcs_by_preds(arc_scores, arc_preds, rel_preds, tree, root_rel_idx=None):
dh = np.argwhere(arc_preds)
sdh = sorted([(arc_scores[x[0], x[1]], list(x)) for x in dh], reverse=True)
graph = [[] for _ in range(len(tree))]
for d, h in enumerate(tree):
if d:
graph[h].append(d)
for s, (d, h) in sdh:
if not d or not h or d in graph[h]:
continue
try:
path = next(dfs(graph, d, h))
except StopIteration:
# no path from d to h
graph[h].append(d)
parse_graph = [[] for _ in range(len(tree))]
num_root = 0
for h in range(len(tree)):
for d in graph[h]:
rel = rel_preds[d, h]
if h == 0 and root_rel_idx is not None:
rel = root_rel_idx
assert num_root == 0
num_root += 1
parse_graph[d].append((h, rel))
parse_graph[d] = sorted(parse_graph[d])
return parse_graph
def adjust_root_score_then_add_secondary_arcs(arc_scores, rel_scores, tree, root_rel_idx):
if len(arc_scores) != tree:
arc_scores = arc_scores[:len(tree), :len(tree)]
rel_scores = rel_scores[:len(tree), :len(tree), :]
parse_preds = arc_scores > 0
# adjust_root_score(arc_scores, parse_preds, rel_scores)
parse_preds[:, 0] = False # set heads to False
rel_scores[:, :, root_rel_idx] = -float('inf')
return add_secondary_arcs_by_scores(arc_scores, rel_scores, tree, root_rel_idx, parse_preds)
| apache-2.0 | -5,322,751,009,263,247,000 | 34.070968 | 119 | 0.560706 | false | 3.537911 | false | false | false |
grochmal/capybara | src/util/graph-school.py | 1 | 2297 | #!/usr/bin/env python
import os,sys
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
__doc__ = '''
Usage: graph-school.py <dataset> <index column> <feature out> <sorted out>
'''
def avgs(dat, idx, fout, sout):
schools = [ 'renaissance'
, 'baroque'
, 'neoclassicism'
, 'romanticism'
, 'impressionism'
, 'mughal'
]
data = pd.read_table( dat, delim_whitespace=True
, skipinitialspace=True, index_col=idx )
df = data[data.columns[3:]]
df.index = data.school
d = (df - df.min()) / (df.max() - df.min())
d['school'] = d.index
grp = d.groupby('school')
means = grp.mean().transpose()[schools]
fe = range(192)
ren = means['renaissance'].tolist()
bar = means['baroque'].tolist()
neo = means['neoclassicism'].tolist()
rom = means['romanticism'].tolist()
imp = means['impressionism'].tolist()
mug = means['mughal'].tolist()
plt.plot(fe, ren, '-r', label='renaissance')
plt.plot(fe, bar, '-g', label='baroque')
plt.plot(fe, neo, '-b', label='neoclassicism')
plt.plot(fe, rom, '-c', label='romanticism')
plt.plot(fe, imp, '-m', label='impressionism')
plt.plot(fe, mug, '-y', label='mughal')
plt.legend(loc='upper left')
plt.xlabel('feature')
plt.ylabel('mean value')
#plt.show()
plt.savefig(fout, dpi=150)
plt.close()
ren.sort()
bar.sort()
neo.sort()
rom.sort()
imp.sort()
mug.sort()
plt.plot(fe, ren, '-r', label='renaissance')
plt.plot(fe, bar, '-g', label='baroque')
plt.plot(fe, neo, '-b', label='neoclassicism')
plt.plot(fe, rom, '-c', label='romanticism')
plt.plot(fe, imp, '-m', label='impressionism')
plt.plot(fe, mug, '-y', label='mughal')
plt.legend(loc='upper left')
plt.ylabel('feature mean value')
#plt.show()
plt.savefig(sout, dpi=150)
if '__main__' == __name__:
if 5 != len(sys.argv):
print __doc__
exit(0)
if not os.path.isfile(sys.argv[1]):
print sys.argv[1], ': no such file'
exit(1)
avgs(*sys.argv[1:])
| gpl-3.0 | 6,906,900,306,652,100,000 | 30.040541 | 78 | 0.542011 | false | 3.108254 | false | false | false |
veger/ansible | lib/ansible/modules/network/f5/bigip_hostname.py | 3 | 9323 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_hostname
short_description: Manage the hostname of a BIG-IP
description:
- Manage the hostname of a BIG-IP.
version_added: 2.3
options:
hostname:
description:
- Hostname of the BIG-IP host.
required: True
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Matthew Lam (@mryanlam)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set the hostname of the BIG-IP
bigip_hostname:
hostname: bigip.localhost.localdomain
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
hostname:
description: The new hostname of the device
returned: changed
type: string
sample: big-ip01.internal
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
class Parameters(AnsibleF5Parameters):
api_attributes = ['hostname']
updatables = ['hostname']
returnables = ['hostname']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
@property
def hostname(self):
if self._values['hostname'] is None:
return None
return str(self._values['hostname'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
pass
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = ApiParameters()
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
result = dict()
changed = self.update()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _read_global_settings_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/global-settings/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
def read_current_from_device(self):
result = self._read_global_settings_from_device()
uri = "https://{0}:{1}/mgmt/tm/cm/device/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
self_device = next((x['name'] for x in response['items'] if x['selfDevice'] == "true"), None)
result['self_device'] = self_device
return ApiParameters(params=result)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/global-settings/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.have.self_device:
uri = "https://{0}:{1}/mgmt/tm/cm/device".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='mv',
name=self.have.self_device,
target=self.want.hostname
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
hostname=dict(
required=True
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,529,788,985,031,791,000 | 28.410095 | 101 | 0.598091 | false | 4.107048 | false | false | false |
newmediamedicine/indivo_server_1_0 | indivo/models/fact_objects/vitals.py | 1 | 1155 | """
Indivo Model for Vitals
"""
from fact import Fact
from django.db import models
from django.conf import settings
class Vitals(Fact):
name = models.CharField(max_length=100)
name_type = models.CharField(max_length=80, null=True)
name_value = models.CharField(max_length=40, null=True)
name_abbrev = models.CharField(max_length=20, null=True)
measured_by=models.CharField(max_length=200, null=True)
date_measured_start=models.DateTimeField(null=True)
date_measured_end=models.DateTimeField(null=True)
result_unit=models.CharField(max_length=100, null=True)
result_textvalue=models.CharField(max_length=5000, null=True)
result_value=models.CharField(max_length=200, null=True)
result_unit_type=models.CharField(max_length=200, null=True)
result_unit_value=models.CharField(max_length=200, null=True)
result_unit_abbrev=models.CharField(max_length=200, null=True)
site = models.CharField(max_length=40, null=True)
position = models.CharField(max_length=40, null=True)
technique=models.CharField(max_length=200, null=True)
comments = models.TextField(null=True)
def __unicode__(self):
return 'Vitals %s' % self.id
| gpl-3.0 | 4,515,700,823,577,375,000 | 38.827586 | 64 | 0.758442 | false | 3.147139 | false | false | false |
jonparrott/gcloud-python | bigtable/google/cloud/bigtable/table.py | 2 | 34708 | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User-friendly container for Google Cloud Bigtable Table."""
from grpc import StatusCode
from google.api_core.exceptions import RetryError
from google.api_core.exceptions import NotFound
from google.api_core.retry import if_exception_type
from google.api_core.retry import Retry
from google.api_core.gapic_v1.method import wrap_method
from google.cloud._helpers import _to_bytes
from google.cloud.bigtable.column_family import _gc_rule_from_pb
from google.cloud.bigtable.column_family import ColumnFamily
from google.cloud.bigtable.batcher import MutationsBatcher
from google.cloud.bigtable.batcher import (FLUSH_COUNT, MAX_ROW_BYTES)
from google.cloud.bigtable.row import AppendRow
from google.cloud.bigtable.row import ConditionalRow
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.row_data import PartialRowsData
from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable.row_set import RowRange
from google.cloud.bigtable import enums
from google.cloud.bigtable_v2.proto import (
bigtable_pb2 as data_messages_v2_pb2)
from google.cloud.bigtable_admin_v2.proto import (
table_pb2 as admin_messages_v2_pb2)
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_admin_messages_v2_pb2)
# Maximum number of mutations in bulk (MutateRowsRequest message):
# (https://cloud.google.com/bigtable/docs/reference/data/rpc/
# google.bigtable.v2#google.bigtable.v2.MutateRowRequest)
_MAX_BULK_MUTATIONS = 100000
VIEW_NAME_ONLY = enums.Table.View.NAME_ONLY
class _BigtableRetryableError(Exception):
"""Retry-able error expected by the default retry strategy."""
DEFAULT_RETRY = Retry(
predicate=if_exception_type(_BigtableRetryableError),
initial=1.0,
maximum=15.0,
multiplier=2.0,
deadline=120.0, # 2 minutes
)
"""The default retry strategy to be used on retry-able errors.
Used by :meth:`~google.cloud.bigtable.table.Table.mutate_rows`.
"""
class TableMismatchError(ValueError):
"""Row from another table."""
class TooManyMutationsError(ValueError):
"""The number of mutations for bulk request is too big."""
class Table(object):
"""Representation of a Google Cloud Bigtable Table.
.. note::
We don't define any properties on a table other than the name.
The only other fields are ``column_families`` and ``granularity``,
The ``column_families`` are not stored locally and
``granularity`` is an enum with only one value.
We can use a :class:`Table` to:
* :meth:`create` the table
* :meth:`delete` the table
* :meth:`list_column_families` in the table
:type table_id: str
:param table_id: The ID of the table.
:type instance: :class:`~google.cloud.bigtable.instance.Instance`
:param instance: The instance that owns the table.
:type app_profile_id: str
:param app_profile_id: (Optional) The unique name of the AppProfile.
"""
def __init__(self, table_id, instance, app_profile_id=None):
self.table_id = table_id
self._instance = instance
self._app_profile_id = app_profile_id
@property
def name(self):
"""Table name used in requests.
.. note::
This property will not change if ``table_id`` does not, but the
return value is not cached.
The table name is of the form
``"projects/../instances/../tables/{table_id}"``
:rtype: str
:returns: The table name.
"""
project = self._instance._client.project
instance_id = self._instance.instance_id
table_client = self._instance._client.table_data_client
return table_client.table_path(
project=project, instance=instance_id, table=self.table_id)
def column_family(self, column_family_id, gc_rule=None):
"""Factory to create a column family associated with this table.
:type column_family_id: str
:param column_family_id: The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type gc_rule: :class:`.GarbageCollectionRule`
:param gc_rule: (Optional) The garbage collection settings for this
column family.
:rtype: :class:`.ColumnFamily`
:returns: A column family owned by this table.
"""
return ColumnFamily(column_family_id, self, gc_rule=gc_rule)
def row(self, row_key, filter_=None, append=False):
"""Factory to create a row associated with this table.
.. warning::
At most one of ``filter_`` and ``append`` can be used in a
:class:`~google.cloud.bigtable.row.Row`.
:type row_key: bytes
:param row_key: The key for the row being created.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) Filter to be used for conditional mutations.
See :class:`.ConditionalRow` for more details.
:type append: bool
:param append: (Optional) Flag to determine if the row should be used
for append mutations.
:rtype: :class:`~google.cloud.bigtable.row.Row`
:returns: A row owned by this table.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``filter_`` and ``append`` are used.
"""
if append and filter_ is not None:
raise ValueError('At most one of filter_ and append can be set')
if append:
return AppendRow(row_key, self)
elif filter_ is not None:
return ConditionalRow(row_key, self, filter_=filter_)
else:
return DirectRow(row_key, self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (other.table_id == self.table_id and
other._instance == self._instance)
def __ne__(self, other):
return not self == other
def create(self, initial_split_keys=[], column_families={}):
"""Creates this table.
.. note::
A create request returns a
:class:`._generated.table_pb2.Table` but we don't use
this response.
:type initial_split_keys: list
:param initial_split_keys: (Optional) list of row keys in bytes that
will be used to initially split the table
into several tablets.
:type column_families: dict
:param column_failies: (Optional) A map columns to create. The key is
the column_id str and the value is a
:class:`GarbageCollectionRule`
"""
table_client = self._instance._client.table_admin_client
instance_name = self._instance.name
families = {id: ColumnFamily(id, self, rule).to_pb()
for (id, rule) in column_families.items()}
table = admin_messages_v2_pb2.Table(column_families=families)
split = table_admin_messages_v2_pb2.CreateTableRequest.Split
splits = [split(key=_to_bytes(key)) for key in initial_split_keys]
table_client.create_table(parent=instance_name, table_id=self.table_id,
table=table, initial_splits=splits)
def exists(self):
"""Check whether the table exists.
:rtype: bool
:returns: True if the table exists, else False.
"""
table_client = self._instance._client.table_admin_client
try:
table_client.get_table(name=self.name, view=VIEW_NAME_ONLY)
return True
except NotFound:
return False
def delete(self):
"""Delete this table."""
table_client = self._instance._client.table_admin_client
table_client.delete_table(name=self.name)
def list_column_families(self):
"""List the column families owned by this table.
:rtype: dict
:returns: Dictionary of column families attached to this table. Keys
are strings (column family names) and values are
:class:`.ColumnFamily` instances.
:raises: :class:`ValueError <exceptions.ValueError>` if the column
family name from the response does not agree with the computed
name from the column family ID.
"""
table_client = self._instance._client.table_admin_client
table_pb = table_client.get_table(self.name)
result = {}
for column_family_id, value_pb in table_pb.column_families.items():
gc_rule = _gc_rule_from_pb(value_pb.gc_rule)
column_family = self.column_family(column_family_id,
gc_rule=gc_rule)
result[column_family_id] = column_family
return result
def get_cluster_states(self):
"""List the cluster states owned by this table.
:rtype: dict
:returns: Dictionary of cluster states for this table.
Keys are cluster ids and values are
:class: 'ClusterState' instances.
"""
REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW
table_client = self._instance._client.table_admin_client
table_pb = table_client.get_table(self.name, view=REPLICATION_VIEW)
return {cluster_id: ClusterState(value_pb.replication_state)
for cluster_id, value_pb in table_pb.cluster_states.items()}
def read_row(self, row_key, filter_=None):
"""Read a single row from this table.
:type row_key: bytes
:param row_key: The key of the row to read from.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
row. If unset, returns the entire row.
:rtype: :class:`.PartialRowData`, :data:`NoneType <types.NoneType>`
:returns: The contents of the row if any chunks were returned in
the response, otherwise :data:`None`.
:raises: :class:`ValueError <exceptions.ValueError>` if a commit row
chunk is never encountered.
"""
row_set = RowSet()
row_set.add_row_key(row_key)
result_iter = iter(self.read_rows(filter_=filter_, row_set=row_set))
row = next(result_iter, None)
if next(result_iter, None) is not None:
raise ValueError('More than one row was returned.')
return row
def read_rows(self, start_key=None, end_key=None, limit=None,
filter_=None, end_inclusive=False, row_set=None,
retry=DEFAULT_RETRY_READ_ROWS):
"""Read rows from this table.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads every column in
each row.
:type end_inclusive: bool
:param end_inclusive: (Optional) Whether the ``end_key`` should be
considered inclusive. The default is False (exclusive).
:type row_set: :class:`row_set.RowSet`
:param filter_: (Optional) The row set containing multiple row keys and
row_ranges.
:type retry: :class:`~google.api_core.retry.Retry`
:param retry:
(Optional) Retry delay and deadline arguments. To override, the
default value :attr:`DEFAULT_RETRY_READ_ROWS` can be used and
modified with the :meth:`~google.api_core.retry.Retry.with_delay`
method or the :meth:`~google.api_core.retry.Retry.with_deadline`
method.
:rtype: :class:`.PartialRowsData`
:returns: A :class:`.PartialRowsData` a generator for consuming
the streamed results.
"""
request_pb = _create_row_request(
self.name, start_key=start_key, end_key=end_key,
filter_=filter_, limit=limit, end_inclusive=end_inclusive,
app_profile_id=self._app_profile_id, row_set=row_set)
data_client = self._instance._client.table_data_client
return PartialRowsData(
data_client.transport.read_rows,
request_pb, retry)
def yield_rows(self, **kwargs):
"""Read rows from this table.
.. warning::
This method will be removed in future releases. Please use
``read_rows`` instead.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads every column in
each row.
:type row_set: :class:`row_set.RowSet`
:param filter_: (Optional) The row set containing multiple row keys and
row_ranges.
:rtype: :class:`.PartialRowData`
:returns: A :class:`.PartialRowData` for each row returned
"""
return self.read_rows(**kwargs)
def mutate_rows(self, rows, retry=DEFAULT_RETRY):
"""Mutates multiple rows in bulk.
The method tries to update all specified rows.
If some of the rows weren't updated, it would not remove mutations.
They can be applied to the row separately.
If row mutations finished successfully, they would be cleaned up.
Optionally, a ``retry`` strategy can be specified to re-attempt
mutations on rows that return transient errors. This method will retry
until all rows succeed or until the request deadline is reached. To
specify a ``retry`` strategy of "do-nothing", a deadline of ``0.0``
can be specified.
:type rows: list
:param rows: List or other iterable of :class:`.DirectRow` instances.
:type retry: :class:`~google.api_core.retry.Retry`
:param retry:
(Optional) Retry delay and deadline arguments. To override, the
default value :attr:`DEFAULT_RETRY` can be used and modified with
the :meth:`~google.api_core.retry.Retry.with_delay` method or the
:meth:`~google.api_core.retry.Retry.with_deadline` method.
:rtype: list
:returns: A list of response statuses (`google.rpc.status_pb2.Status`)
corresponding to success or failure of each row mutation
sent. These will be in the same order as the `rows`.
"""
retryable_mutate_rows = _RetryableMutateRowsWorker(
self._instance._client, self.name, rows,
app_profile_id=self._app_profile_id)
return retryable_mutate_rows(retry=retry)
def sample_row_keys(self):
"""Read a sample of row keys in the table.
The returned row keys will delimit contiguous sections of the table of
approximately equal size, which can be used to break up the data for
distributed tasks like mapreduces.
The elements in the iterator are a SampleRowKeys response and they have
the properties ``offset_bytes`` and ``row_key``. They occur in sorted
order. The table might have contents before the first row key in the
list and after the last one, but a key containing the empty string
indicates "end of table" and will be the last response given, if
present.
.. note::
Row keys in this list may not have ever been written to or read
from, and users should therefore not make any assumptions about the
row key structure that are specific to their use case.
The ``offset_bytes`` field on a response indicates the approximate
total storage space used by all rows in the table which precede
``row_key``. Buffering the contents of all rows between two subsequent
samples would require space roughly equal to the difference in their
``offset_bytes`` fields.
:rtype: :class:`~google.cloud.exceptions.GrpcRendezvous`
:returns: A cancel-able iterator. Can be consumed by calling ``next()``
or by casting to a :class:`list` and can be cancelled by
calling ``cancel()``.
"""
data_client = self._instance._client.table_data_client
response_iterator = data_client.sample_row_keys(
self.name, app_profile_id=self._app_profile_id)
return response_iterator
def truncate(self, timeout=None):
"""Truncate the table
:type timeout: float
:param timeout: (Optional) The amount of time, in seconds, to wait
for the request to complete.
:raise: google.api_core.exceptions.GoogleAPICallError: If the
request failed for any reason.
google.api_core.exceptions.RetryError: If the request failed
due to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
client = self._instance._client
table_admin_client = client.table_admin_client
if timeout:
table_admin_client.drop_row_range(
self.name, delete_all_data_from_table=True, timeout=timeout)
else:
table_admin_client.drop_row_range(
self.name, delete_all_data_from_table=True)
def drop_by_prefix(self, row_key_prefix, timeout=None):
"""
:type row_prefix: bytes
:param row_prefix: Delete all rows that start with this row key
prefix. Prefix cannot be zero length.
:type timeout: float
:param timeout: (Optional) The amount of time, in seconds, to wait
for the request to complete.
:raise: google.api_core.exceptions.GoogleAPICallError: If the
request failed for any reason.
google.api_core.exceptions.RetryError: If the request failed
due to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
client = self._instance._client
table_admin_client = client.table_admin_client
if timeout:
table_admin_client.drop_row_range(
self.name, row_key_prefix=_to_bytes(row_key_prefix),
timeout=timeout)
else:
table_admin_client.drop_row_range(
self.name, row_key_prefix=_to_bytes(row_key_prefix))
def mutations_batcher(self, flush_count=FLUSH_COUNT,
max_row_bytes=MAX_ROW_BYTES):
"""Factory to create a mutation batcher associated with this instance.
:type table: class
:param table: class:`~google.cloud.bigtable.table.Table`.
:type flush_count: int
:param flush_count: (Optional) Maximum number of rows per batch. If it
reaches the max number of rows it calls finish_batch() to
mutate the current row batch. Default is FLUSH_COUNT (1000
rows).
:type max_row_bytes: int
:param max_row_bytes: (Optional) Max number of row mutations size to
flush. If it reaches the max number of row mutations size it
calls finish_batch() to mutate the current row batch.
Default is MAX_ROW_BYTES (5 MB).
"""
return MutationsBatcher(self, flush_count, max_row_bytes)
class _RetryableMutateRowsWorker(object):
"""A callable worker that can retry to mutate rows with transient errors.
This class is a callable that can retry mutating rows that result in
transient errors. After all rows are successful or none of the rows
are retryable, any subsequent call on this callable will be a no-op.
"""
# pylint: disable=unsubscriptable-object
RETRY_CODES = (
StatusCode.DEADLINE_EXCEEDED.value[0],
StatusCode.ABORTED.value[0],
StatusCode.UNAVAILABLE.value[0],
)
# pylint: enable=unsubscriptable-object
def __init__(self, client, table_name, rows, app_profile_id=None):
self.client = client
self.table_name = table_name
self.rows = rows
self.app_profile_id = app_profile_id
self.responses_statuses = [None] * len(self.rows)
def __call__(self, retry=DEFAULT_RETRY):
"""Attempt to mutate all rows and retry rows with transient errors.
Will retry the rows with transient errors until all rows succeed or
``deadline`` specified in the `retry` is reached.
:rtype: list
:returns: A list of response statuses (`google.rpc.status_pb2.Status`)
corresponding to success or failure of each row mutation
sent. These will be in the same order as the ``rows``.
"""
mutate_rows = self._do_mutate_retryable_rows
if retry:
mutate_rows = retry(self._do_mutate_retryable_rows)
try:
mutate_rows()
except (_BigtableRetryableError, RetryError):
# - _BigtableRetryableError raised when no retry strategy is used
# and a retryable error on a mutation occurred.
# - RetryError raised when retry deadline is reached.
# In both cases, just return current `responses_statuses`.
pass
return self.responses_statuses
@staticmethod
def _is_retryable(status):
return (status is None or
status.code in _RetryableMutateRowsWorker.RETRY_CODES)
def _do_mutate_retryable_rows(self):
"""Mutate all the rows that are eligible for retry.
A row is eligible for retry if it has not been tried or if it resulted
in a transient error in a previous call.
:rtype: list
:return: The responses statuses, which is a list of
:class:`~google.rpc.status_pb2.Status`.
:raises: One of the following:
* :exc:`~.table._BigtableRetryableError` if any
row returned a transient error.
* :exc:`RuntimeError` if the number of responses doesn't
match the number of rows that were retried
"""
retryable_rows = []
index_into_all_rows = []
for index, status in enumerate(self.responses_statuses):
if self._is_retryable(status):
retryable_rows.append(self.rows[index])
index_into_all_rows.append(index)
if not retryable_rows:
# All mutations are either successful or non-retryable now.
return self.responses_statuses
mutate_rows_request = _mutate_rows_request(
self.table_name, retryable_rows,
app_profile_id=self.app_profile_id)
data_client = self.client.table_data_client
inner_api_calls = data_client._inner_api_calls
if 'mutate_rows' not in inner_api_calls:
default_retry = data_client._method_configs['MutateRows'].retry,
default_timeout = data_client._method_configs['MutateRows'].timeout
data_client._inner_api_calls[
'mutate_rows'] = wrap_method(
data_client.transport.mutate_rows,
default_retry=default_retry,
default_timeout=default_timeout,
client_info=data_client._client_info,
)
responses = data_client._inner_api_calls['mutate_rows'](
mutate_rows_request, retry=None)
num_responses = 0
num_retryable_responses = 0
for response in responses:
for entry in response.entries:
num_responses += 1
index = index_into_all_rows[entry.index]
self.responses_statuses[index] = entry.status
if self._is_retryable(entry.status):
num_retryable_responses += 1
if entry.status.code == 0:
self.rows[index].clear()
if len(retryable_rows) != num_responses:
raise RuntimeError(
'Unexpected number of responses', num_responses,
'Expected', len(retryable_rows))
if num_retryable_responses:
raise _BigtableRetryableError
return self.responses_statuses
class ClusterState(object):
"""Representation of a Cluster State.
:type replication_state: int
:param replication_state: enum value for cluster state
Possible replications_state values are
0 for STATE_NOT_KNOWN: The replication state of the table is
unknown in this cluster.
1 for INITIALIZING: The cluster was recently created, and the
table must finish copying
over pre-existing data from other clusters before it can
begin receiving live replication updates and serving
``Data API`` requests.
2 for PLANNED_MAINTENANCE: The table is temporarily unable to
serve
``Data API`` requests from this
cluster due to planned internal maintenance.
3 for UNPLANNED_MAINTENANCE: The table is temporarily unable
to serve
``Data API`` requests from this
cluster due to unplanned or emergency maintenance.
4 for READY: The table can serve
``Data API`` requests from this
cluster. Depending on replication delay, reads may not
immediately reflect the state of the table in other clusters.
"""
def __init__(self, replication_state):
self.replication_state = replication_state
def __repr__(self):
"""Representation of cluster state instance as string value
for cluster state.
:rtype: ClusterState instance
:returns: ClusterState instance as representation of string
value for cluster state.
"""
replication_dict = {
enums.Table.ReplicationState.STATE_NOT_KNOWN: "STATE_NOT_KNOWN",
enums.Table.ReplicationState.INITIALIZING: "INITIALIZING",
enums.Table.ReplicationState.PLANNED_MAINTENANCE:
"PLANNED_MAINTENANCE",
enums.Table.ReplicationState.UNPLANNED_MAINTENANCE:
"UNPLANNED_MAINTENANCE",
enums.Table.ReplicationState.READY: "READY"
}
return replication_dict[self.replication_state]
def __eq__(self, other):
"""Checks if two ClusterState instances(self and other) are
equal on the basis of instance variable 'replication_state'.
:type other: ClusterState
:param other: ClusterState instance to compare with.
:rtype: Boolean value
:returns: True if two cluster state instances have same
replication_state.
"""
if not isinstance(other, self.__class__):
return False
return self.replication_state == other.replication_state
def __ne__(self, other):
"""Checks if two ClusterState instances(self and other) are
not equal.
:type other: ClusterState.
:param other: ClusterState instance to compare with.
:rtype: Boolean value.
:returns: True if two cluster state instances are not equal.
"""
return not self == other
def _create_row_request(table_name, start_key=None, end_key=None,
filter_=None, limit=None, end_inclusive=False,
app_profile_id=None, row_set=None):
"""Creates a request to read rows in a table.
:type table_name: str
:param table_name: The name of the table to read from.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads the entire table.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:type end_inclusive: bool
:param end_inclusive: (Optional) Whether the ``end_key`` should be
considered inclusive. The default is False (exclusive).
:type: app_profile_id: str
:param app_profile_id: (Optional) The unique name of the AppProfile.
:type row_set: :class:`row_set.RowSet`
:param filter_: (Optional) The row set containing multiple row keys and
row_ranges.
:rtype: :class:`data_messages_v2_pb2.ReadRowsRequest`
:returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``row_set`` and one of ``start_key`` or ``end_key`` are set
"""
request_kwargs = {'table_name': table_name}
if ((start_key is not None or end_key is not None) and
row_set is not None):
raise ValueError('Row range and row set cannot be '
'set simultaneously')
if filter_ is not None:
request_kwargs['filter'] = filter_.to_pb()
if limit is not None:
request_kwargs['rows_limit'] = limit
if app_profile_id is not None:
request_kwargs['app_profile_id'] = app_profile_id
message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs)
if start_key is not None or end_key is not None:
row_set = RowSet()
row_set.add_row_range(RowRange(start_key, end_key,
end_inclusive=end_inclusive))
if row_set is not None:
row_set._update_message_request(message)
return message
def _mutate_rows_request(table_name, rows, app_profile_id=None):
"""Creates a request to mutate rows in a table.
:type table_name: str
:param table_name: The name of the table to write to.
:type rows: list
:param rows: List or other iterable of :class:`.DirectRow` instances.
:type: app_profile_id: str
:param app_profile_id: (Optional) The unique name of the AppProfile.
:rtype: :class:`data_messages_v2_pb2.MutateRowsRequest`
:returns: The ``MutateRowsRequest`` protobuf corresponding to the inputs.
:raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is
greater than 100,000
"""
request_pb = data_messages_v2_pb2.MutateRowsRequest(
table_name=table_name, app_profile_id=app_profile_id)
mutations_count = 0
for row in rows:
_check_row_table_name(table_name, row)
_check_row_type(row)
mutations = row._get_mutations()
request_pb.entries.add(row_key=row.row_key, mutations=mutations)
mutations_count += len(mutations)
if mutations_count > _MAX_BULK_MUTATIONS:
raise TooManyMutationsError('Maximum number of mutations is %s' %
(_MAX_BULK_MUTATIONS,))
return request_pb
def _check_row_table_name(table_name, row):
"""Checks that a row belongs to a table.
:type table_name: str
:param table_name: The name of the table.
:type row: :class:`~google.cloud.bigtable.row.Row`
:param row: An instance of :class:`~google.cloud.bigtable.row.Row`
subclasses.
:raises: :exc:`~.table.TableMismatchError` if the row does not belong to
the table.
"""
if row.table is not None and row.table.name != table_name:
raise TableMismatchError(
'Row %s is a part of %s table. Current table: %s' %
(row.row_key, row.table.name, table_name))
def _check_row_type(row):
"""Checks that a row is an instance of :class:`.DirectRow`.
:type row: :class:`~google.cloud.bigtable.row.Row`
:param row: An instance of :class:`~google.cloud.bigtable.row.Row`
subclasses.
:raises: :class:`TypeError <exceptions.TypeError>` if the row is not an
instance of DirectRow.
"""
if not isinstance(row, DirectRow):
raise TypeError('Bulk processing can not be applied for '
'conditional or append mutations.')
| apache-2.0 | -2,126,930,044,904,904,200 | 39.171296 | 79 | 0.620116 | false | 4.232167 | false | false | false |
openchordcharts/openchordcharts-api | openchordcharts_api/controllers/charts.py | 2 | 9218 | # -*- coding: utf-8 -*-
# Open Chord Charts -- Database of free chord charts
# By: Christophe Benz <[email protected]>
#
# Copyright (C) 2012 Christophe Benz
# https://github.com/openchordcharts/
#
# This file is part of Open Chord Charts.
#
# Open Chord Charts is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Open Chord Charts is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Charts controller functions."""
import re
import webob
from webob.dec import wsgify
from .. import conf, contexts, conv, model, urls, wsgihelpers
# Routes
def route_api1(environ, start_response):
req = webob.Request(environ)
ctx = contexts.Ctx(req)
chart, error = conv.pipe(
conv.input_to_slug,
conv.not_none,
model.Chart.make_id_or_slug_to_instance(),
)(req.urlvars.get('id_or_slug'), state=ctx)
if error is not None:
return wsgihelpers.not_found(ctx, message=error)(environ, start_response)
ctx.node = chart
router = urls.make_router(
('GET', '^$', api1_view),
('POST', '^$', api1_create_or_edit),
(('GET', 'POST'), '^/delete$', api1_delete),
)
return router(environ, start_response)
def route_api1_class(environ, start_response):
router = urls.make_router(
('GET', '^$', api1_search),
('POST', '^$', api1_create_or_edit),
(None, '^/(?P<id_or_slug>[^/]+)(?=/|$)', route_api1),
)
return router(environ, start_response)
# Controllers
@wsgify
def api1_create_or_edit(req):
ctx = contexts.Ctx(req)
user = model.get_user(ctx, check=True)
is_create_mode = ctx.node is None
chart_attributes, errors = conv.pipe(
conv.input_to_json_dict,
conv.struct(
{
'composers': conv.validate_list_of_strings,
'compositionYear': conv.test_isinstance(int),
'genre': conv.pipe(
conv.test_isinstance(unicode),
conv.cleanup_line,
),
'interpretations': conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.pipe(
conv.test_isinstance(dict),
conv.struct(
{
'externalLinks': conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.pipe(
conv.test_isinstance(unicode),
conv.make_str_to_url(full=True),
conv.not_none,
),
),
),
'interpreterName': conv.empty_to_none,
'year': conv.test_isinstance(int),
},
default=None, # Fail if unexpected item.
),
conv.empty_to_none,
conv.not_none,
),
),
conv.empty_to_none,
),
'key': conv.pipe(
conv.test_isinstance(unicode),
conv.cleanup_line,
conv.str_to_chart_key,
conv.not_none,
),
'parts': conv.pipe(
conv.test_isinstance(dict),
conv.uniform_mapping(
conv.cleanup_line,
conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.pipe(
conv.test_isinstance(dict),
conv.struct(
{
'alterations': conv.pipe(
conv.test_isinstance(list),
conv.uniform_sequence(
conv.pipe(
conv.test_isinstance(unicode),
conv.empty_to_none,
conv.not_none,
),
),
),
'degree': conv.pipe(
conv.test_isinstance(int),
conv.test_between(0, 11),
),
'duration': conv.anything_to_float,
},
default=None, # Fail if unexpected item.
),
conv.empty_to_none,
conv.not_none,
),
),
),
),
),
'structure': conv.validate_list_of_strings,
'title': conv.pipe(
conv.test_isinstance(unicode),
conv.cleanup_line,
conv.not_none,
),
},
default=None, # Fail if unexpected item.
),
conv.validate_structure_and_parts,
)(req.body, state=ctx)
if errors is not None:
return wsgihelpers.bad_request(ctx, errors=errors, message=ctx._(u'Invalid JSON'))
chart_already_exists = lambda ctx, slug: \
wsgihelpers.bad_request(ctx, message=ctx._(u'Chart with slug "{}" already exists'.format(slug)))
if is_create_mode:
slug = conv.slugify(chart_attributes['title'])
existing_chart = model.Chart.find_one({'slug': slug})
if existing_chart is not None:
return chart_already_exists(ctx, slug)
chart_attributes['owner_account_id'] = user._id
chart = model.Chart(**chart_attributes)
else:
chart = ctx.node
model.check_owner(ctx, user, chart)
slug = conv.slugify(chart_attributes['title'])
existing_chart = model.Chart.find_one({'_id': {'$ne': chart._id}, 'slug': slug})
if existing_chart is not None:
return chart_already_exists(ctx, slug)
chart.set_attributes(**chart_attributes)
chart.compute_attributes()
chart.save(safe=True)
return wsgihelpers.respond_json(ctx, {'chart': chart.to_json(state=ctx)})
@wsgify
def api1_delete(req):
ctx = contexts.Ctx(req)
user = model.get_user(ctx, check=True)
chart = ctx.node
model.check_owner(ctx, user, chart)
chart.delete(safe=True)
return wsgihelpers.respond_json(ctx, {'delete': 'ok'})
@wsgify
def api1_search(req):
ctx = contexts.Ctx(req)
data, errors = conv.struct(
{
'ownerSlug': conv.cleanup_line,
'q': conv.cleanup_line,
},
default=None, # Fail if unexpected item.
)(req.params, state=conv.default_state)
if errors is not None:
return wsgihelpers.bad_request(ctx, errors=errors)
spec = {}
keywords = None
if data['q'] is not None:
keywords = data['q'].strip().split()
spec['keywords'] = {'$all': [re.compile(u'^{0}'.format(re.escape(keyword))) for keyword in keywords]}
if data['ownerSlug']:
owner_account = model.Account.find_one({'username': data['ownerSlug']})
if owner_account is None:
return wsgihelpers.bad_request(ctx, message=ctx._(u'Invalid account: {}'.format(data['ownerSlug'])))
spec['owner_account_id'] = owner_account._id
charts_cursor = model.Chart.find(spec).sort('slug').limit(conf['charts.limit'])
return wsgihelpers.respond_json(ctx, {
'charts': [chart.to_json(state=ctx, with_owner=True) for chart in charts_cursor],
})
@wsgify
def api1_view(req):
ctx = contexts.Ctx(req)
chart = ctx.node
return wsgihelpers.respond_json(ctx, {'chart': chart.to_json(state=ctx, with_owner=True)})
| agpl-3.0 | 2,694,175,666,760,201,700 | 38.393162 | 112 | 0.466913 | false | 4.58607 | true | false | false |
spring-week-topos/cinder-week | cinder/volume/drivers/zadara.py | 3 | 26060 | # Copyright (c) 2012 Zadara Storage, Inc.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Zadara Virtual Private Storage Array (VPSA).
This driver requires VPSA with API ver.13.07 or higher.
"""
import httplib
from lxml import etree
from oslo.config import cfg
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.volume import driver
LOG = logging.getLogger(__name__)
zadara_opts = [
cfg.StrOpt('zadara_vpsa_ip',
default=None,
help='Management IP of Zadara VPSA'),
cfg.StrOpt('zadara_vpsa_port',
default=None,
help='Zadara VPSA port number'),
cfg.BoolOpt('zadara_vpsa_use_ssl',
default=False,
help='Use SSL connection'),
cfg.StrOpt('zadara_user',
default=None,
help='User name for the VPSA'),
cfg.StrOpt('zadara_password',
default=None,
help='Password for the VPSA',
secret=True),
cfg.StrOpt('zadara_vpsa_poolname',
default=None,
help='Name of VPSA storage pool for volumes'),
cfg.BoolOpt('zadara_vol_thin',
default=True,
help='Default thin provisioning policy for volumes'),
cfg.BoolOpt('zadara_vol_encrypt',
default=False,
help='Default encryption policy for volumes'),
cfg.StrOpt('zadara_vol_name_template',
default='OS_%s',
help='Default template for VPSA volume names'),
cfg.BoolOpt('zadara_vpsa_auto_detach_on_delete',
default=True,
help="Automatically detach from servers on volume delete"),
cfg.BoolOpt('zadara_vpsa_allow_nonexistent_delete',
default=True,
help="Don't halt on deletion of non-existing volumes"), ]
CONF = cfg.CONF
CONF.register_opts(zadara_opts)
class ZadaraVPSAConnection(object):
"""Executes volume driver commands on VPSA."""
def __init__(self, conf):
self.conf = conf
self.access_key = None
self.ensure_connection()
def _generate_vpsa_cmd(self, cmd, **kwargs):
"""Generate command to be sent to VPSA."""
def _joined_params(params):
param_str = []
for k, v in params.items():
param_str.append("%s=%s" % (k, v))
return '&'.join(param_str)
# Dictionary of applicable VPSA commands in the following format:
# 'command': (method, API_URL, {optional parameters})
vpsa_commands = {
'login': ('POST',
'/api/users/login.xml',
{'user': self.conf.zadara_user,
'password': self.conf.zadara_password}),
# Volume operations
'create_volume': ('POST',
'/api/volumes.xml',
{'name': kwargs.get('name'),
'capacity': kwargs.get('size'),
'pool': self.conf.zadara_vpsa_poolname,
'thin': 'YES'
if self.conf.zadara_vol_thin else 'NO',
'crypt': 'YES'
if self.conf.zadara_vol_encrypt else 'NO'}),
'delete_volume': ('DELETE',
'/api/volumes/%s.xml' % kwargs.get('vpsa_vol'),
{}),
'expand_volume': ('POST',
'/api/volumes/%s/expand.xml'
% kwargs.get('vpsa_vol'),
{'capacity': kwargs.get('size')}),
# Snapshot operations
'create_snapshot': ('POST',
'/api/consistency_groups/%s/snapshots.xml'
% kwargs.get('cg_name'),
{'display_name': kwargs.get('snap_name')}),
'delete_snapshot': ('DELETE',
'/api/snapshots/%s.xml'
% kwargs.get('snap_id'),
{}),
'create_clone_from_snap': ('POST',
'/api/consistency_groups/%s/clone.xml'
% kwargs.get('cg_name'),
{'name': kwargs.get('name'),
'snapshot': kwargs.get('snap_id')}),
'create_clone': ('POST',
'/api/consistency_groups/%s/clone.xml'
% kwargs.get('cg_name'),
{'name': kwargs.get('name')}),
# Server operations
'create_server': ('POST',
'/api/servers.xml',
{'display_name': kwargs.get('initiator'),
'iqn': kwargs.get('initiator')}),
# Attach/Detach operations
'attach_volume': ('POST',
'/api/servers/%s/volumes.xml'
% kwargs.get('vpsa_srv'),
{'volume_name[]': kwargs.get('vpsa_vol'),
'force': 'NO'}),
'detach_volume': ('POST',
'/api/volumes/%s/detach.xml'
% kwargs.get('vpsa_vol'),
{'server_name[]': kwargs.get('vpsa_srv'),
'force': 'NO'}),
# Get operations
'list_volumes': ('GET',
'/api/volumes.xml',
{}),
'list_pools': ('GET',
'/api/pools.xml',
{}),
'list_controllers': ('GET',
'/api/vcontrollers.xml',
{}),
'list_servers': ('GET',
'/api/servers.xml',
{}),
'list_vol_attachments': ('GET',
'/api/volumes/%s/servers.xml'
% kwargs.get('vpsa_vol'),
{}),
'list_vol_snapshots': ('GET',
'/api/consistency_groups/%s/snapshots.xml'
% kwargs.get('cg_name'),
{})}
if cmd not in vpsa_commands.keys():
raise exception.UnknownCmd(cmd=cmd)
else:
(method, url, params) = vpsa_commands[cmd]
if method == 'GET':
# For GET commands add parameters to the URL
params.update(dict(access_key=self.access_key,
page=1, start=0, limit=0))
url += '?' + _joined_params(params)
body = ''
elif method == 'DELETE':
# For DELETE commands add parameters to the URL
params.update(dict(access_key=self.access_key))
url += '?' + _joined_params(params)
body = ''
elif method == 'POST':
if self.access_key:
params.update(dict(access_key=self.access_key))
body = _joined_params(params)
else:
raise exception.UnknownCmd(cmd=method)
return (method, url, body)
def ensure_connection(self, cmd=None):
"""Retrieve access key for VPSA connection."""
if self.access_key or cmd == 'login':
return
cmd = 'login'
xml_tree = self.send_cmd(cmd)
user = xml_tree.find('user')
if user is None:
raise exception.MalformedResponse(cmd=cmd,
reason='no "user" field')
access_key = user.findtext('access-key')
if access_key is None:
raise exception.MalformedResponse(cmd=cmd,
reason='no "access-key" field')
self.access_key = access_key
def send_cmd(self, cmd, **kwargs):
"""Send command to VPSA Controller."""
self.ensure_connection(cmd)
(method, url, body) = self._generate_vpsa_cmd(cmd, **kwargs)
LOG.debug(_('Sending %(method)s to %(url)s. Body "%(body)s"'),
{'method': method, 'url': url, 'body': body})
if self.conf.zadara_vpsa_use_ssl:
connection = httplib.HTTPSConnection(self.conf.zadara_vpsa_ip,
self.conf.zadara_vpsa_port)
else:
connection = httplib.HTTPConnection(self.conf.zadara_vpsa_ip,
self.conf.zadara_vpsa_port)
connection.request(method, url, body)
response = connection.getresponse()
if response.status != 200:
connection.close()
raise exception.BadHTTPResponseStatus(status=response.status)
data = response.read()
connection.close()
xml_tree = etree.fromstring(data)
status = xml_tree.findtext('status')
if status != '0':
raise exception.FailedCmdWithDump(status=status, data=data)
if method in ['POST', 'DELETE']:
LOG.debug(_('Operation completed. %(data)s'), {'data': data})
return xml_tree
class ZadaraVPSAISCSIDriver(driver.ISCSIDriver):
"""Zadara VPSA iSCSI volume driver."""
VERSION = '13.07'
def __init__(self, *args, **kwargs):
super(ZadaraVPSAISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(zadara_opts)
def do_setup(self, context):
"""Any initialization the volume driver does while starting.
Establishes initial connection with VPSA and retrieves access_key.
"""
self.vpsa = ZadaraVPSAConnection(self.configuration)
def check_for_setup_error(self):
"""Returns an error (exception) if prerequisites aren't met."""
self.vpsa.ensure_connection()
def local_path(self, volume):
"""Return local path to existing local volume."""
raise NotImplementedError()
def _xml_parse_helper(self, xml_tree, first_level, search_tuple,
first=True):
"""Helper for parsing VPSA's XML output.
Returns single item if first==True or list for multiple selection.
If second argument in search_tuple is None - returns all items with
appropriate key.
"""
objects = xml_tree.find(first_level)
if objects is None:
return None
result_list = []
(key, value) = search_tuple
for object in objects.getchildren():
found_value = object.findtext(key)
if found_value and (found_value == value or value is None):
if first:
return object
else:
result_list.append(object)
return result_list if result_list else None
def _get_vpsa_volume_name_and_size(self, name):
"""Return VPSA's name & size for the volume."""
xml_tree = self.vpsa.send_cmd('list_volumes')
volume = self._xml_parse_helper(xml_tree, 'volumes',
('display-name', name))
if volume is not None:
return (volume.findtext('name'),
int(volume.findtext('virtual-capacity')))
return (None, None)
def _get_vpsa_volume_name(self, name):
"""Return VPSA's name for the volume."""
(vol_name, size) = self._get_vpsa_volume_name_and_size(name)
return vol_name
def _get_volume_cg_name(self, name):
"""Return name of the consistency group for the volume."""
xml_tree = self.vpsa.send_cmd('list_volumes')
volume = self._xml_parse_helper(xml_tree, 'volumes',
('display-name', name))
if volume is not None:
return volume.findtext('cg-name')
return None
def _get_snap_id(self, cg_name, snap_name):
"""Return snapshot ID for particular volume."""
xml_tree = self.vpsa.send_cmd('list_vol_snapshots',
cg_name=cg_name)
snap = self._xml_parse_helper(xml_tree, 'snapshots',
('display-name', snap_name))
if snap is not None:
return snap.findtext('name')
return None
def _get_pool_capacity(self, pool_name):
"""Return pool's total and available capacities."""
xml_tree = self.vpsa.send_cmd('list_pools')
pool = self._xml_parse_helper(xml_tree, 'pools',
('name', pool_name))
if pool is not None:
total = int(pool.findtext('capacity'))
free = int(float(pool.findtext('available-capacity')))
LOG.debug(_('Pool %(name)s: %(total)sGB total, %(free)sGB free'),
{'name': pool_name, 'total': total, 'free': free})
return (total, free)
return ('infinite', 'infinite')
def _get_active_controller_details(self):
"""Return details of VPSA's active controller."""
xml_tree = self.vpsa.send_cmd('list_controllers')
ctrl = self._xml_parse_helper(xml_tree, 'vcontrollers',
('state', 'active'))
if ctrl is not None:
return dict(target=ctrl.findtext('target'),
ip=ctrl.findtext('iscsi-ip'),
chap_user=ctrl.findtext('chap-username'),
chap_passwd=ctrl.findtext('chap-target-secret'))
return None
def _get_server_name(self, initiator):
"""Return VPSA's name for server object with given IQN."""
xml_tree = self.vpsa.send_cmd('list_servers')
server = self._xml_parse_helper(xml_tree, 'servers',
('iqn', initiator))
if server is not None:
return server.findtext('name')
return None
def _create_vpsa_server(self, initiator):
"""Create server object within VPSA (if doesn't exist)."""
vpsa_srv = self._get_server_name(initiator)
if not vpsa_srv:
xml_tree = self.vpsa.send_cmd('create_server', initiator=initiator)
vpsa_srv = xml_tree.findtext('server-name')
return vpsa_srv
def create_volume(self, volume):
"""Create volume."""
self.vpsa.send_cmd(
'create_volume',
name=self.configuration.zadara_vol_name_template % volume['name'],
size=volume['size'])
def delete_volume(self, volume):
"""Delete volume.
Return ok if doesn't exist. Auto detach from all servers.
"""
# Get volume name
name = self.configuration.zadara_vol_name_template % volume['name']
vpsa_vol = self._get_vpsa_volume_name(name)
if not vpsa_vol:
msg = _('Volume %(name)s could not be found. '
'It might be already deleted') % {'name': name}
LOG.warning(msg)
if self.configuration.zadara_vpsa_allow_nonexistent_delete:
return
else:
raise exception.VolumeNotFound(volume_id=name)
# Check attachment info and detach from all
xml_tree = self.vpsa.send_cmd('list_vol_attachments',
vpsa_vol=vpsa_vol)
servers = self._xml_parse_helper(xml_tree, 'servers',
('iqn', None), first=False)
if servers:
if not self.configuration.zadara_vpsa_auto_detach_on_delete:
raise exception.VolumeAttached(volume_id=name)
for server in servers:
vpsa_srv = server.findtext('name')
if vpsa_srv:
self.vpsa.send_cmd('detach_volume',
vpsa_srv=vpsa_srv,
vpsa_vol=vpsa_vol)
# Delete volume
self.vpsa.send_cmd('delete_volume', vpsa_vol=vpsa_vol)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug(_('Create snapshot: %s'), snapshot['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
% snapshot['volume_name']
cg_name = self._get_volume_cg_name(volume_name)
if not cg_name:
msg = _('Volume %(name)s not found') % {'name': volume_name}
LOG.error(msg)
raise exception.VolumeNotFound(volume_id=volume_name)
self.vpsa.send_cmd('create_snapshot',
cg_name=cg_name,
snap_name=snapshot['name'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug(_('Delete snapshot: %s'), snapshot['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
% snapshot['volume_name']
cg_name = self._get_volume_cg_name(volume_name)
if not cg_name:
# If the volume isn't present, then don't attempt to delete
LOG.warning(_("snapshot: original volume %s not found, "
"skipping delete operation")
% snapshot['volume_name'])
return True
snap_id = self._get_snap_id(cg_name, snapshot['name'])
if not snap_id:
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_("snapshot: snapshot %s not found, "
"skipping delete operation")
% snapshot['name'])
return True
self.vpsa.send_cmd('delete_snapshot',
snap_id=snap_id)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug(_('Creating volume from snapshot: %s') % snapshot['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
% snapshot['volume_name']
cg_name = self._get_volume_cg_name(volume_name)
if not cg_name:
msg = _('Volume %(name)s not found') % {'name': volume_name}
LOG.error(msg)
raise exception.VolumeNotFound(volume_id=volume_name)
snap_id = self._get_snap_id(cg_name, snapshot['name'])
if not snap_id:
msg = _('Snapshot %(name)s not found') % {'name': snapshot['name']}
LOG.error(msg)
raise exception.VolumeNotFound(volume_id=snapshot['name'])
self.vpsa.send_cmd('create_clone_from_snap',
cg_name=cg_name,
name=self.configuration.zadara_vol_name_template
% volume['name'],
snap_id=snap_id)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.debug(_('Creating clone of volume: %s') % src_vref['name'])
# Retrieve the CG name for the base volume
volume_name = self.configuration.zadara_vol_name_template\
% src_vref['name']
cg_name = self._get_volume_cg_name(volume_name)
if not cg_name:
msg = _('Volume %(name)s not found') % {'name': volume_name}
LOG.error(msg)
raise exception.VolumeNotFound(volume_id=volume_name)
self.vpsa.send_cmd('create_clone',
cg_name=cg_name,
name=self.configuration.zadara_vol_name_template
% volume['name'])
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
# Get volume name
name = self.configuration.zadara_vol_name_template % volume['name']
(vpsa_vol, size) = self._get_vpsa_volume_name_and_size(name)
if not vpsa_vol:
msg = _('Volume %(name)s could not be found. '
'It might be already deleted') % {'name': name}
LOG.error(msg)
raise exception.VolumeNotFound(volume_id=name)
if new_size < size:
raise exception.InvalidInput(
reason='%s < current size %s' % (new_size, size))
expand_size = new_size - size
self.vpsa.send_cmd('expand_volume',
vpsa_vol=vpsa_vol,
size=expand_size)
def create_export(self, context, volume):
"""Irrelevant for VPSA volumes. Export created during attachment."""
pass
def ensure_export(self, context, volume):
"""Irrelevant for VPSA volumes. Export created during attachment."""
pass
def remove_export(self, context, volume):
"""Irrelevant for VPSA volumes. Export removed during detach."""
pass
def initialize_connection(self, volume, connector):
"""Attach volume to initiator/host.
During this call VPSA exposes volume to particular Initiator. It also
creates a 'server' entity for Initiator (if it was not created before)
All necessary connection information is returned, including auth data.
Connection data (target, LUN) is not stored in the DB.
"""
# Get/Create server name for IQN
initiator_name = connector['initiator']
vpsa_srv = self._create_vpsa_server(initiator_name)
if not vpsa_srv:
raise exception.ZadaraServerCreateFailure(name=initiator_name)
# Get volume name
name = self.configuration.zadara_vol_name_template % volume['name']
vpsa_vol = self._get_vpsa_volume_name(name)
if not vpsa_vol:
raise exception.VolumeNotFound(volume_id=name)
# Get Active controller details
ctrl = self._get_active_controller_details()
if not ctrl:
raise exception.ZadaraVPSANoActiveController()
# Attach volume to server
self.vpsa.send_cmd('attach_volume',
vpsa_srv=vpsa_srv,
vpsa_vol=vpsa_vol)
# Get connection info
xml_tree = self.vpsa.send_cmd('list_vol_attachments',
vpsa_vol=vpsa_vol)
server = self._xml_parse_helper(xml_tree, 'servers',
('iqn', initiator_name))
if server is None:
raise exception.ZadaraAttachmentsNotFound(name=name)
target = server.findtext('target')
lun = server.findtext('lun')
if target is None or lun is None:
raise exception.ZadaraInvalidAttachmentInfo(
name=name,
reason='target=%s, lun=%s' % (target, lun))
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = '%s:%s' % (ctrl['ip'], '3260')
properties['target_iqn'] = target
properties['target_lun'] = lun
properties['volume_id'] = volume['id']
properties['auth_method'] = 'CHAP'
properties['auth_username'] = ctrl['chap_user']
properties['auth_password'] = ctrl['chap_passwd']
LOG.debug(_('Attach properties: %(properties)s'),
{'properties': properties})
return {'driver_volume_type': 'iscsi',
'data': properties}
def terminate_connection(self, volume, connector, **kwargs):
"""Detach volume from the initiator."""
# Get server name for IQN
initiator_name = connector['initiator']
vpsa_srv = self._get_server_name(initiator_name)
if not vpsa_srv:
raise exception.ZadaraServerNotFound(name=initiator_name)
# Get volume name
name = self.configuration.zadara_vol_name_template % volume['name']
vpsa_vol = self._get_vpsa_volume_name(name)
if not vpsa_vol:
raise exception.VolumeNotFound(volume_id=name)
# Detach volume from server
self.vpsa.send_cmd('detach_volume',
vpsa_srv=vpsa_srv,
vpsa_vol=vpsa_vol)
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug(_("Updating volume stats"))
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'Zadara Storage'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
(total, free) = self._get_pool_capacity(self.configuration.
zadara_vpsa_poolname)
data['total_capacity_gb'] = total
data['free_capacity_gb'] = free
self._stats = data
| apache-2.0 | -2,693,120,071,967,204,000 | 38.129129 | 79 | 0.526669 | false | 4.231206 | true | false | false |
chitrangpatel/presto | lib/python/psr_utils.py | 2 | 69236 | import numpy as Num
import numpy.fft as FFT
import Pgplot, ppgplot, bisect, sinc_interp, parfile
from scipy.stats import histogram
from scipy.special import ndtr, ndtri, chdtrc, chdtri, fdtr, i0, kolmogorov
from scipy.optimize import leastsq
import scipy.optimize.zeros as zeros
from psr_constants import *
isintorlong = lambda x: type(x) == type(0) or type(x) == type(0L)
def span(Min, Max, Number):
"""
span(Min, Max, Number):
Create a range of 'Num' floats given inclusive 'Min' and 'Max' values.
"""
assert isintorlong(Number)
if isintorlong(Min) and isintorlong(Max) and \
(Max-Min) % (Number-1) != 0:
Max = float(Max) # force floating points
return Min+(Max-Min)*Num.arange(Number)/(Number-1)
def distance(width):
"""
distance(width):
Return a 'width' x 'width' Num Python array with each
point set to the geometric distance from the array's center.
"""
x = Num.arange(-width/2.0+0.5, width/2.0+0.5, 1.0)**2
x = Num.resize(x, (width,width))
return Num.sqrt(x + Num.transpose(x))
def choose_N(orig_N):
"""
choose_N(orig_N):
Choose a time series length that is larger than
the input value but that is highly factorable.
Note that the returned value must be divisible
by at least the maximum downsample factor * 2.
Currently, this is 8 * 2 = 16.
"""
# A list of 4-digit numbers that are highly factorable by small primes
goodfactors = [1008, 1024, 1056, 1120, 1152, 1200, 1232, 1280, 1296,
1344, 1408, 1440, 1536, 1568, 1584, 1600, 1680, 1728,
1760, 1792, 1920, 1936, 2000, 2016, 2048, 2112, 2160,
2240, 2304, 2352, 2400, 2464, 2560, 2592, 2640, 2688,
2800, 2816, 2880, 3024, 3072, 3136, 3168, 3200, 3360,
3456, 3520, 3584, 3600, 3696, 3840, 3872, 3888, 3920,
4000, 4032, 4096, 4224, 4320, 4400, 4480, 4608, 4704,
4752, 4800, 4928, 5040, 5120, 5184, 5280, 5376, 5488,
5600, 5632, 5760, 5808, 6000, 6048, 6144, 6160, 6272,
6336, 6400, 6480, 6720, 6912, 7040, 7056, 7168, 7200,
7392, 7680, 7744, 7776, 7840, 7920, 8000, 8064, 8192,
8400, 8448, 8624, 8640, 8800, 8960, 9072, 9216, 9408,
9504, 9600, 9680, 9856]
if orig_N < 10000:
return 0
# Get the number represented by the first 4 digits of orig_N
first4 = int(str(orig_N)[:4])
# Now get the number that is just bigger than orig_N
# that has its first 4 digits equal to "factor"
for factor in goodfactors:
if factor > first4: break
new_N = factor
while new_N < orig_N:
new_N *= 10
# Finally, compare new_N to the closest power_of_two
# greater than orig_N. Take the closest.
two_N = 2
while two_N < orig_N:
two_N *= 2
return min(two_N, new_N)
def running_avg(arr, navg):
"""
running_avg(arr, navg):
Return an array of the running average of 'navg' bins from the
input array 'arr'.
"""
a = Num.asarray(arr, 'd')
a.shape = (len(a) / navg, navg)
return Num.add.reduce(Num.transpose(a)) / navg
def hist(data, bins, range=None, laby="Number", **kwargs):
"""
hist(data, bins, range=None, laby="Number", **kwargs):
Return and plot a histogram in one variable.
data -- a sequence of data points
bins -- the number of bins into which the data is to be sorted
range -- a tuple of two values, specifying the lower and
the upper end of the interval spanned by the bins.
Any data point outside this interval will be ignored.
If no range is given, the smallest and largest
data values are used to define the interval.
Note: This command also accepts all the keyword arge of plotbinned().
"""
(ys, lox, dx, out) = histogram(data, bins, range)
xs = Num.arange(bins, dtype='d')*dx + lox + 0.5*dx
maxy = int(1.1*max(ys))
if maxy < max(ys):
maxy = max(ys) + 1.0
if 'rangey' not in kwargs.keys():
kwargs['rangey']=[0,maxy]
Pgplot.plotbinned(ys, xs, laby=laby, **kwargs)
return (xs, ys)
def KS_test(data, cumdist, output=0):
"""
KS_test(data, cumdist, output=0):
Perform a Kolmogorov-Smirnov test on data compared to the
cumulative-distribution function cumdist.
"""
nn = len(data)
sdata = Num.sort(Num.asarray(data))
D1 = Num.maximum.reduce(Num.absolute(cumdist(sdata)-
Num.arange(nn, dtype='d')/nn))
D2 = Num.maximum.reduce(Num.absolute(cumdist(sdata)-
Num.arange(1,nn+1, dtype='d')/nn))
D = max((D1, D2))
P = kolmogorov(Num.sqrt(nn)*D)
if (output):
print "Max distance between the cumulative distributions (D) = %.5g" % D
print "Prob the data is from the specified distrbution (P) = %.3g" % P
return (D, P)
def MJD_to_JD(MJD):
"""
MJD_to_JD(MJD):
Convert Modified Julian Date (MJD) to Julian Date (JD)
"""
return MJD+2400000.5
def JD_to_MJD(JD):
"""
JD_to_MJD(JD):
Convert Julian Date (JD) to Modified Julian Date (MJD)
"""
return JD-2400000.5
def MJD_to_Julian_Epoch(MJD):
"""
MJD_to_Julian_Epoch(MJD):
Convert Modified Julian Date (MJD) to Julian Epoch
"""
return 2000.0 + (MJD-51544.5)/365.25
def Julian_Epoch_to_MJD(jepoch):
"""
Julian_Epoch_to_MJD(jepoch):
Convert Julian Epoch to Modified Julian Date (MJD)
"""
return 51544.5 + (jepoch-2000.0)*365.25
def MJD_to_Besselian_Epoch(MJD):
"""
MJD_to_Besselian_Epoch(MJD):
Convert Modified Julian Date (MJD) to Besselian Epoch
"""
return 1900.0 + (MJD-15019.81352)/365.242198781
def Besselian_Epoch_to_MJD(bepoch):
"""
Besselian_Epoch_to_MJD(bepoch):
Convert Besselian Epoch to Modified Julian Date (MJD)
"""
return 15019.81352 + (bepoch-1900.0)*365.242198781
def rad_to_dms(rad):
"""
rad_to_dms(rad):
Convert radians to degrees, minutes, and seconds of arc.
"""
if (rad < 0.0): sign = -1
else: sign = 1
arc = RADTODEG * Num.fmod(Num.fabs(rad), PI)
d = int(arc)
arc = (arc - d) * 60.0
m = int(arc)
s = (arc - m) * 60.0
if sign==-1 and d==0:
return (sign * d, sign * m, sign * s)
else:
return (sign * d, m, s)
def dms_to_rad(deg, min, sec):
"""
dms_to_rad(deg, min, sec):
Convert degrees, minutes, and seconds of arc to radians.
"""
if (deg < 0.0):
sign = -1
elif (deg==0.0 and (min < 0.0 or sec < 0.0)):
sign = -1
else:
sign = 1
return sign * ARCSECTORAD * \
(60.0 * (60.0 * Num.fabs(deg) +
Num.fabs(min)) + Num.fabs(sec))
def dms_to_deg(deg, min, sec):
"""
dms_to_deg(deg, min, sec):
Convert degrees, minutes, and seconds of arc to degrees.
"""
return RADTODEG * dms_to_rad(deg, min, sec)
def rad_to_hms(rad):
"""
rad_to_hms(rad):
Convert radians to hours, minutes, and seconds of arc.
"""
rad = Num.fmod(rad, TWOPI)
if (rad < 0.0): rad = rad + TWOPI
arc = RADTOHRS * rad
h = int(arc)
arc = (arc - h) * 60.0
m = int(arc)
s = (arc - m) * 60.0
return (h, m, s)
def hms_to_rad(hour, min, sec):
"""
hms_to_rad(hour, min, sec):
Convert hours, minutes, and seconds of arc to radians
"""
if (hour < 0.0): sign = -1
else: sign = 1
return sign * SECTORAD * \
(60.0 * (60.0 * Num.fabs(hour) +
Num.fabs(min)) + Num.fabs(sec))
def hms_to_hrs(hour, min, sec):
"""
hms_to_hrs(hour, min, sec):
Convert hours, minutes, and seconds of arc to hours.
"""
return RADTOHRS * hms_to_rad(hour, min, sec)
def coord_to_string(h_or_d, m, s):
"""
coord_to_string(h_or_d, m, s):
Return a formatted string of RA or DEC values as
'hh:mm:ss.ssss' if RA, or 'dd:mm:ss.ssss' if DEC.
"""
retstr = ""
if h_or_d < 0:
retstr = "-"
elif abs(h_or_d)==0:
if (m < 0.0) or (s < 0.0):
retstr = "-"
h_or_d, m, s = abs(h_or_d), abs(m), abs(s)
if (s >= 9.9995):
return retstr+"%.2d:%.2d:%.4f" % (h_or_d, m, s)
else:
return retstr+"%.2d:%.2d:0%.4f" % (h_or_d, m, s)
def ra_to_rad(ra_string):
"""
ra_to_rad(ar_string):
Given a string containing RA information as
'hh:mm:ss.ssss', return the equivalent decimal
radians.
"""
h, m, s = ra_string.split(":")
return hms_to_rad(int(h), int(m), float(s))
def dec_to_rad(dec_string):
"""
dec_to_rad(dec_string):
Given a string containing DEC information as
'dd:mm:ss.ssss', return the equivalent decimal
radians.
"""
d, m, s = dec_string.split(":")
if "-" in d and int(d)==0:
m, s = '-'+m, '-'+s
return dms_to_rad(int(d), int(m), float(s))
def delta_m(flux_factor):
"""
delta_m(flux_factor):
Return the change in magnitudes caused by a change
in flux of flux_factor.
"""
return -2.5*Num.log10(flux_factor)
def flux_factor(delta_m):
"""
flux_factor(delta_m):
Return the change in flux caused by a change
in magnitude of delta_m magnitudes
"""
return 10.0**(delta_m/-2.5)
def distance_modulus_to_distance(dm, absorption=0.0):
"""
distance_modulus_to_distance(dm, absorption=0.0):
Return the distance (kpc) given a distance modulus dm and
an optional absorption.
"""
return 10.0**(((dm-absorption)+5.0)/5.0)/1000.0
def distance_to_distance_modulus(d, absorption=0.0):
"""
distance_to_distance_modulus(d, absorption=0.0):
Return the distance modulus given a distance d and
an optional absorption.
"""
return 5.0*Num.log10(d*1000.0)-5.0+absorption
def true_anomaly(E, ecc):
"""
true_anomaly(E, ecc):
Return the True Anomaly (in radians) given the Eccentric anomaly
(E in radians) and the eccentricity (ecc)
"""
return 2.0*Num.arctan(Num.sqrt((1.0+ecc)/(1.0-ecc))*Num.tan(E/2.0))
def mass_funct(pb, x):
"""
mass_funct(pb, x):
Return the mass function of an orbit given the following:
'pb' is the binary period in days.
'x' is the projected semi-major axis in lt-sec.
"""
pbs = pb * 86400.0
return 8015123.37129 * x**3.0 / (pbs * pbs)
def mass_funct2(mp, mc, i):
"""
mass_funct2(mp, mc, i):
Return the mass function of an orbit given the following:
'mp' is the mass of the primary in solar masses.
'mc' is the mass of the companion in solar masses.
'i' is the orbital inclination (rad).
Note: An 'average' orbit has cos(i) = 0.5, or i = 60 deg
"""
return (mc * Num.sin(i))**3.0 / (mc + mp)**2.0
def asini_c(pb, mf):
"""
asini_c(pb, mf):
Return the orbital projected semi-major axis (lt-sec) given:
'pb' is the binary period in sec.
'mf' is the mass function of the orbit.
"""
return (mf * pb * pb / 8015123.37129)**(1.0 / 3.0)
def ELL1_check(par_file, output=False):
"""
ELL1_check(par_file):
Check the parfile to see if ELL1 can be safely used as the
binary model. To work properly, we should have:
asini/c * ecc**2 << timing precision / sqrt(# TOAs)
"""
psr = parfile.psr_par(par_file)
try:
lhs = psr.A1 * psr.E**2.0 * 1e6
except:
if output:
print "Can't compute asini/c * ecc**2, maybe parfile doesn't have a binary?"
return
try:
rhs = psr.TRES / Num.sqrt(psr.NTOA)
except:
if output:
print "Can't compute TRES / sqrt(# TOAs), maybe this isn't a TEMPO output parfile?"
return
if output:
print "Condition is asini/c * ecc**2 << timing precision / sqrt(# TOAs) to use ELL1:"
print " asini/c * ecc**2 = %8.3g us"%lhs
print " TRES / sqrt(# TOAs) = %8.3g us"%rhs
if lhs * 50.0 < rhs:
if output:
print "Should be fine."
return True
elif lhs * 5.0 < rhs:
if output:
print "Should be OK, but not optimal."
return True
else:
if output:
print "Should probably use BT or DD instead."
return False
def accel_to_z(accel, T, reffreq, harm=1):
"""
accel_to_z(accel, T, reffreq, harm=1):
Return the accelsearch 'z' (i.e. number of bins drifted)
at a reference frequency 'reffreq', for an observation
of duration 'T' seconds and with acceleration (in m/s/s)
'accel'. You can specify the harmonic number in 'harm'.
"""
return accel * harm * reffreq * T * T / SOL
def z_to_accel(z, T, reffreq, harm=1):
"""
z_to_accel(z, T, reffreq, harm=1):
Return the acceleration (in m/s/s) corresponding to the
accelsearch 'z' (i.e. number of bins drifted) at a
reference frequency 'reffreq', for an observation
of duration 'T'. You can specify the harmonic number
in 'harm'.
"""
return z * SOL / (harm * reffreq * T * T)
def bins_to_accel(z, T, f=[1.0, 1000.0], device="/XWIN"):
"""
bins_to_accel(z, T, f=[1.0, 1000.0], device="/XWIN"):
Make a plot showing the acceleration which corresponds
to a certain number of Fourier bins drifted 'z' during
an observation of length 'T'.
"""
fs = span(Num.log10(f[0]), Num.log10(f[1]), 1000)
accels = z_to_accel(z, T, 10.0**fs)
if (device):
Pgplot.plotxy(Num.log10(accels), fs, logx=1, logy=1,
labx="Frequency (Hz)",
laby="Acceleration (m/s\u2\d)", device=device)
ppgplot.pgmtxt("T", -2.0, 0.75, 0.0, "T = %.0f sec"%T)
ppgplot.pgmtxt("T", -3.5, 0.75, 0.0, "r\B\u\.\d = %.1f bins"%z)
if (device != '/XWIN'):
Pgplot.closeplot()
else:
return accels
def pulsar_mass(pb, x, mc, inc):
"""
pulsar_mass(pb, x, mc, inc):
Return the pulsar mass (in solar mass units) for a binary
system with the following characteristics:
'pb' is the binary period in days.
'x' is the projected semi-major axis in lt-sec.
'inc' is the orbital inclination in degrees.
'mc' is the mass of the companion in solar mass units.
"""
massfunct = mass_funct(pb, x)
def localmf(mp, mc=mc, mf=massfunct, i=inc*DEGTORAD):
return mass_funct2(mp, mc, i) - mf
return zeros.bisect(localmf, 0.0, 1000.0)
def companion_mass(pb, x, inc=60.0, mpsr=1.4):
"""
companion_mass(pb, x, inc=60.0, mpsr=1.4):
Return the companion mass (in solar mass units) for a binary
system with the following characteristics:
'pb' is the binary period in days.
'x' is the projected semi-major axis in lt-sec.
'inc' is the orbital inclination in degrees.
'mpsr' is the mass of the pulsar in solar mass units.
"""
massfunct = mass_funct(pb, x)
def localmf(mc, mp=mpsr, mf=massfunct, i=inc*DEGTORAD):
return mass_funct2(mp, mc, i) - mf
return zeros.bisect(localmf, 0.0, 1000.0)
def companion_mass_limit(pb, x, mpsr=1.4):
"""
companion_mass_limit(pb, x, mpsr=1.4):
Return the lower limit (corresponding to i = 90 degrees) of the
companion mass (in solar mass units) in a binary system with
the following characteristics:
'pb' is the binary period in days.
'x' is the projected semi-major axis in lt-sec.
'mpsr' is the mass of the pulsar in solar mass units.
"""
return companion_mass(pb, x, inc=90.0, mpsr=mpsr)
def OMDOT(porb, e, Mp, Mc):
"""
OMDOT(porb, e, Mp, Mc):
Return the predicted advance of periaston (deg/yr) given the
orbital period (days), eccentricity, and pulsar and companion masses.
"""
return 3.0 * (porb*86400.0/TWOPI)**(-5.0/3.0) * \
(Tsun*(Mp+Mc))**(2.0/3.0) / (1.0-e**2.0) * \
RADTODEG * SECPERJULYR
def GAMMA(porb, e, Mp, Mc):
"""
GAMMA(porb, e, Mp, Mc):
Return the predicted value of relativistic gamma (sec) given the
orbital period (days), eccentricity, and pulsar and companion masses.
"""
return e * (porb*86400.0/TWOPI)**(1.0/3.0) * Tsun**(2.0/3.0) * \
(Mp+Mc)**(-4.0/3.0) * Mc * (Mp+2.0*Mc)
def PBDOT(porb, e, Mp, Mc):
"""
PBDOT(porb, e, Mp, Mc):
Return the predicted orbital period derivative (s/s) given the
orbital period (s), eccentricity, and pulsar and companion masses.
"""
return -192.0*PI/5.0 * (porb*86400.0/TWOPI)**(-5.0/3.0) * \
(1.0 + 73.0/24.0*e**2.0 + 37.0/96.0*e**4.0) * \
(1.0-e**2.0)**(-7.0/2.0) * Tsun**(5.0/3.0) * \
Mp * Mc * (Mp+Mc)**(-1.0/3.0)
def OMDOT_to_Mtot(OMDOT, porb, e):
"""
OMDOT_to_Mtot(OMDOT, porb, e):
Return the total mass (in solar units) of a system given an advance
of periastron (OMDOT) in deg/yr. The orbital period should be in days.
"""
wd = OMDOT/SECPERJULYR*DEGTORAD # rad/s
return (wd/3.0*(1.0-e*e)*(porb*SECPERDAY/TWOPI)**(5.0/3.0))**(3.0/2.0)/Tsun
def GAMMA_to_Mc(gamma, porb, e, Mp):
"""
GAMMA_to_Mc(gamma, porb, e, Mp):
Given the relativistic gamma in sec, the orbital period in days,
the eccentricity and the pulsar mass in solar units, return the
predicted companion mass.
"""
def funct(mc, mp=Mp, porb=porb, e=e, gamma=gamma):
return GAMMA(porb, e, mp, mc) - gamma
return zeros.bisect(funct, 0.01, 20.0)
def shklovskii_effect(pm, D):
"""
shklovskii_effect(pm, D):
Return the 'acceleration' due to the transverse Doppler effect
(i.e. the Shklovskii Effect) given the proper motion (pm) in mas/yr
and the distance (D) in kpc. Note: What is returned is a_pm/C,
or equivalently, Pdot_pm/P.
"""
return (pm/1000.0*ARCSECTORAD/SECPERJULYR)**2.0 * KMPERKPC*D / (C/1000.0)
def galactic_accel_simple(l, b, D, v_o=240.0, R_o = 8.34):
"""
galactic_accel_simple(l, b, D, v_o=240.0, R_o = 8.34):
Return the approximate projected acceleration/c (in s^-1)
(a_p - a_ssb) dot n / c, where a_p and a_ssb are acceleration
vectors, and n is the los vector. This assumes a simple spherically
symmetric isothermal sphere with v_o = 220 km/s circular velocity
and R_o = 8 kpc to the center of the sphere from the SSB. l and
b are the galactic longitude and latitude (in deg) respectively,
and D is the distance in kpc. This is eqn 2.4 of Phinney 1992.
The default v_o and R_o values are from Reid et al 2014.
"""
A_sun = v_o*v_o / (C/1000.0 * R_o*KMPERKPC)
d = D/R_o
cbcl = Num.cos(b*DEGTORAD) * Num.cos(l*DEGTORAD)
return -A_sun * (cbcl + (d - cbcl) / (1.0 + d*d - 2.0*d*cbcl))
def galactic_accel(l, b, D, v_o=240.0, R_o = 8.34):
"""
galactic_accel(l, b, D, v_o=240.0, R_o = 8.34):
Return the approximate projected acceleration/c (in s^-1)
(a_p - a_ssb) dot n / c, where a_p and a_ssb are acceleration
vectors, and n is the los vector. This assumes v_o = 220 km/s
circular velocity and R_o = 8 kpc to the center of Galaxy. l and
b are the galactic longitude and latitude (in deg) respectively,
and D is the distance in kpc. This is eqn 5 of Nice & Taylor 1995.
The default v_o and R_o values are from Reid et al 2014.
"""
A_sun = v_o*v_o / (C/1000.0 * R_o*KMPERKPC)
cb = Num.cos(b*DEGTORAD)
cl = Num.cos(l*DEGTORAD)
sl = Num.sin(l*DEGTORAD)
beta = D/R_o * cb - cl
return -A_sun * cb * (cl + beta / (sl**2 + beta**2))
def gal_z_accel(l, b, D):
"""
gal_z_accel(l, b, D):
Return the approximate projected acceleration/c (in s^-1)
(a_p - a_ssb) dot n / c, where a_p and a_ssb are acceleration
vectors, and n is the los vector, caused by the acceleration
of the pulsar towards the plane of the galaxy. l and b are
the galactic longitude and latitude (in deg) respectively, and D
is the distance in kpc. This is eqn 3+4 of Nice & Taylor 1995.
"""
sb = Num.sin(b*DEGTORAD)
z = D * sb
az = 1.08e-19 * (1.25 * z / Num.sqrt(z**2 + 0.0324) + 0.58 * z)
return az * sb
def beam_halfwidth(obs_freq, dish_diam):
"""
beam_halfwidth(obs_freq, dish_diam):
Return the telescope beam halfwidth in arcmin
'obs_freq' = the observing frqeuency in MHz
'dish_diam' = the telescope diameter in m
"""
return 1.2*SOL/(obs_freq*10.0**6)/dish_diam*RADTODEG*60/2
def limiting_flux_dens(Ttot, G, BW, T, P=0.01, W=0.05, polar=2, factor=15.0):
"""
limiting_flux_dens(Ttot, G, BW, T, P=0.01, W=0.05, polar=2, factor=15.0):
Return the approximate limiting flux density for a pulsar
survey in mJy based of the following characteristics:
'Ttot' = sky + system temperature (K)
'G' = forward gain of the antenna (K/Jy)
'BW' = observing bandwidth (MHz)
'T' = integration time (s)
'P' = pulsar period (s) (default = 0.01)
'W' = duty cycle of pulsar (0-1) (default = 0.05)
'polar' = number of polarizations (default = 2)
'factor' = normalization factor that take into account
limiting SNR, hardware limitations etc. (default = 15.0)
Note: This is a _very_ approximate calculation. For a better
calculation, see Cordes and Chernoff, ApJ, 482, p971, App. A.
Observatories:
Parkes Multibeam: Tsys = 21 K, G = 0.735 K/Jy
"""
w = W * P
return Num.sqrt(w/((P-w)*polar*BW*T))*factor*Ttot/G
def dm_info(dm=None, dmstep=1.0, freq=1390.0, numchan=512, chanwidth=0.5):
"""
dm_info(dm=None, dmstep=1.0, freq=1390.0, numchan=512, chanwidth=0.5):
Return info about potential DM smearing during an observation.
"""
BW = chanwidth * numchan
print " Center freq (MHz) = %.3f" % (freq)
print " Number of channels = %d" % (numchan)
print " Channel width (MHz) = %.3g" % (chanwidth)
print " Total bandwidth (MHz) = %.3g" % (BW)
print " DM offset (0.5*step) = %.3g" % (0.5 * dmstep)
print " Smearing over BW (ms) = %.3g" % \
(1000.0 * dm_smear(0.5 * dmstep, BW, freq))
if (dm):
print " Smearing per chan (ms) = %.3g" % \
(1000.0 * dm_smear(dm, chanwidth, freq))
def best_dm_step(maxsmear=0.1, dt=0.00080, dm=0.0, freq=1390.0, numchan=512, chanwidth=0.5):
"""
best_dm_step(maxsmear=0.1, dt=0.00080, dm=0.0, freq=1390.0, numchan=512, chanwidth=0.5):
Return the required DM step to keep the total smearing below 'maxsmear' (in ms).
"""
BW = chanwidth * numchan
tau_tot = maxsmear/1000.0
tau_chan = dm_smear(dm, chanwidth, freq)
tau_samp = dt
if (tau_tot**2.0 < (tau_chan**2.0+tau_samp**2.0)):
print "The requested total smearing is smaller than one or more of the components."
return 0.0
else:
return 0.0001205*freq**3.0*2.0/BW*Num.sqrt(tau_tot**2.0-tau_chan**2.0-tau_samp**2.0)
def dm_smear(dm, BW, center_freq):
"""
dm_smear(dm, BW, center_freq):
Return the smearing in sec caused by a 'dm' over a bandwidth
of 'BW' MHz centered at 'center_freq' MHz.
"""
return dm * BW / (0.0001205 * center_freq * center_freq * center_freq)
def diagonal_DM(dt, chanBW, center_freq):
"""
diagonal_DM(dt, chanBW, center_freq):
Return the so-called "diagonal DM" where the smearing across
one channel is equal to the sample time.
"""
return (0.0001205 * center_freq * center_freq * center_freq) * dt / chanBW
def pulse_broadening(DM, f_ctr):
"""
pulse_broadening(DM, f_ctr):
Return the approximate pulse broadening (tau) in ms due to scattering
based on the rough relation in Cordes' 'Pulsar Observations I' paper.
'f_ctr' should be in MHz. The approximate error is 0.65 in log(tau).
"""
logDM = Num.log10(DM)
return 10.0**(-3.59 + 0.129*logDM + 1.02*logDM**2.0 -
4.4*Num.log10(f_ctr/1000.0))/1000.0
def rrat_period(times, numperiods=20, output=True):
"""
rrat_period(times, numperiods=20, output=True):
Try to determine a RRAT pulse period using a brute force
search when the input times are (real!) single-pulse
arrival times. numperiods is the number of integer pulses
to try between the first two pulses. If output is True,
print some diagnostic information
"""
ts = Num.asarray(sorted(times))
ps = (ts[1]-ts[0])/Num.arange(1, numperiods+1)
dts = Num.diff(ts)
xs = dts / ps[:,Num.newaxis]
metric = Num.sum(Num.fabs((xs - xs.round())), axis=1)
pnum = metric.argmin()
numrots = xs.round()[pnum].sum()
p = (ts[-1] - ts[0]) / numrots
if output:
print "Min, avg, std metric values are %.4f, %.4f, %.4f" % \
(metric.min(), metric.mean(), metric.std())
print " Approx period is likely:", ps[pnum]
print "Refined period is likely:", p
print "Rotations between pulses are:"
print dts / p
return p
def guess_DMstep(DM, dt, BW, f_ctr):
"""
guess_DMstep(DM, dt, BW, f_ctr):
Choose a reasonable DMstep by setting the maximum smearing across the
'BW' to equal the sampling time 'dt'.
"""
return dt*0.0001205*f_ctr**3.0/(0.5*BW)
def delay_from_DM(DM, freq_emitted):
"""
Return the delay in seconds caused by dispersion, given
a Dispersion Measure (DM) in cm-3 pc, and the emitted
frequency (freq_emitted) of the pulsar in MHz.
"""
if (type(freq_emitted)==type(0.0)):
if (freq_emitted > 0.0):
return DM/(0.000241*freq_emitted*freq_emitted)
else:
return 0.0
else:
return Num.where(freq_emitted > 0.0,
DM/(0.000241*freq_emitted*freq_emitted), 0.0)
def delay_from_foffsets(df, dfd, dfdd, times):
"""
Return the delays in phase caused by offsets in
frequency (df), and two frequency derivatives (dfd, dfdd)
at the given times in seconds.
"""
f_delays = df * times
fd_delays = dfd * times**2 / 2.0
fdd_delays = dfdd * times**3 / 6.0
return (f_delays + fd_delays + fdd_delays)
def smear_plot(dm=[1.0,1000.0], dmstep=1.0, subdmstep=10.0, freq=1390.0,
numchan=512, numsub=32, chanwidth=0.5, dt=0.000125,
device='/xwin'):
"""
smear_plot(dm=[0.0,1000.0], dmstep=1.0, subdmstep=10.0, freq=1390.0,
numchan=512, numsub=32, chanwidth=0.5, dt=0.000125,
device='/xwin'):
Show a plot that displays the expected smearing in ms
from various effects during a radio pulsar search.
"""
numpts = 500
BW = numchan * chanwidth
subBW = numchan / numsub * chanwidth
maxDMerror = 0.5 * dmstep
maxsubDMerror = 0.5 * subdmstep
ldms = span(Num.log10(dm[0]), Num.log10(dm[1]), numpts)
dms = 10.0**ldms
# Smearing from sample rate
dts = Num.zeros(numpts) + 1000.0 * dt
# Smearing due to the intrinsic channel width
chan_smear = 1000.0 * dm_smear(dms, chanwidth, freq)
# Smearing across the full BW due to max DM mismatch
BW_smear = Num.zeros(numpts) + \
1000.0 * dm_smear(maxDMerror, BW, freq)
# Smearing in each subband due to max DM mismatch
subband_smear = Num.zeros(numpts) + \
1000.0 * dm_smear(maxsubDMerror, subBW, freq)
total_smear = Num.sqrt(dts**2.0 + chan_smear**2.0 +
subband_smear**2.0 + BW_smear**2.0)
maxval = Num.log10(2.0 * max(total_smear))
minval = Num.log10(0.5 * min([min(dts), min(chan_smear),
min(BW_smear), min(subband_smear)]))
Pgplot.plotxy(Num.log10(total_smear), ldms, rangey=[minval, maxval],
logx=1, logy=1, labx="Dispersion Measure",
laby="Smearing (ms)", device=device)
ppgplot.pgsch(0.8)
ppgplot.pgmtxt("t", 1.5, 1.0/12.0, 0.5, "\(2156)\dcenter\u = %gMHz" % freq)
ppgplot.pgmtxt("t", 1.5, 3.0/12.0, 0.5, "N\dchan\u = %d" % numchan)
ppgplot.pgmtxt("t", 1.5, 5.0/12.0, 0.5, "N\dsub\u = %d" % numsub)
ppgplot.pgmtxt("t", 1.5, 7.0/12.0, 0.5, "BW\dchan\u = %gMHz" % chanwidth)
ppgplot.pgmtxt("t", 1.5, 9.0/12.0, 0.5, "\gDDM = %g" % dmstep)
ppgplot.pgmtxt("t", 1.5, 11.0/12.0, 0.5, "\gDDM\dsub\u = %g" % subdmstep)
ppgplot.pgsch(1.0)
ppgplot.pgmtxt("b", -7.5, 0.95, 1.0, "Total")
Pgplot.plotxy(Num.log10(dts), ldms, color="green",
logx=1, logy=1)
ppgplot.pgmtxt("b", -6.0, 0.95, 1.0, "Sample Rate")
Pgplot.plotxy(Num.log10(chan_smear), ldms, color="purple",
logx=1, logy=1)
ppgplot.pgmtxt("b", -4.5, 0.95, 1.0, "Channel")
Pgplot.plotxy(Num.log10(BW_smear), ldms, color="red",
logx=1, logy=1)
ppgplot.pgmtxt("b", -3.0, 0.95, 1.0, "Full BW")
Pgplot.plotxy(Num.log10(subband_smear), ldms, color="blue",
logx=1, logy=1)
ppgplot.pgmtxt("b", -1.5, 0.95, 1.0, "Subband")
ppgplot.pgsci(1)
def search_sensitivity(Ttot, G, BW, chan, freq, T, dm, ddm, dt, Pmin=0.001,
Pmax=1.0, W=0.1, polar=2, factor=15.0, pts=1000):
"""
(periods, S_min) = search_sensitivity(Ttot, G, BW, chan, freq, T, dm,
ddm, dt, Pmin=0.001, Pmax=1.0, W=0.1, polar=2, factor=15.0, pts=1000):
Return the approximate limiting flux density for a pulsar
survey in mJy based of the following characteristics:
'Ttot' = sky + system temperature (K)
'G' = forward gain of the antenna (K/Jy)
'BW' = observing bandwidth (MHz)
'chan' = number of channels in the filterbank
'freq' = central observing frequency (MHz)
'T' = integration time (s)
'dm' = Dispersion Measure in pc cm^-3
'ddm' = Dispersion Measure stepsize in pc cm^-3
'dt' = Sample time for each data point in sec
'Pmin' = minimum pulsar period (s) (default = 0.001)
'Pmax' = maximum pulsar period (s) (default = 1.0)
'W' = duty cycle of pulsar (0-1) (default = 0.1)
'polar' = number of polarizations (default = 2)
'factor' = normalization factor that take into account
limiting SNR, hardware limitations etc. (default = 15.0)
'pts' = the number of points to calculate
Note: This is a _very_ approximate calculation. For a better
calculation, see Cordes and Chernoff, ApJ, 482, p971, App. A.
Observatories:
Parkes Multibeam: Tsys = 21 K, G = 0.735 K/Jy
"""
periods = span(Pmin, Pmax, pts)
widths = Num.sqrt((W * periods)**2.0 +
dm_smear(dm, BW/chan, freq)**2.0 + \
dm_smear(ddm/2.0, BW, freq)**2.0 + \
dt**2.0) / periods
return (periods, limiting_flux_dens(Ttot, G, BW, T, periods, widths,
polar=polar, factor=factor))
def smin_noise(Ttot, G, BW, dt):
"""
smin_noise(Ttot, G, BW, dt):
Return the 1 sigma Gaussian noise level (mJy) for each time
series bin in a pulsar data simulation. Default is for a
sinusoidal pulse (i.e. W = P / 2) with freq << Nyquist freq.
'Ttot' = sky + system temperature (K)
'G' = forward gain of the antenna (K/Jy)
'BW' = observing bandwidth (MHz)
'dt' = time per time series bin (s)
Observatories:
Parkes Multibeam: Tsys = 21 K, G = 0.735 K/Jy
"""
return Ttot / (G * Num.sqrt(2 * BW * dt))
def read_profile(filenm, normalize=0):
"""
read_profile(filenm, normalize=0):
Read a simple ASCII profile with one bin per line
from the file 'filenm'. Comments are allowed
if they begin with '#'. The profile is pseudo-
normalized if 'normalize' is true.
"""
prof = []
for line in file(filenm):
if line.startswith("#"): continue
else: prof.append(float(line.split()[-1]))
prof = Num.asarray(prof)
if normalize:
prof -= min(prof)
prof /= max(prof)
return prof
def calc_phs(MJD, refMJD, *args):
"""
calc_phs(MJD, refMJD, *args):
Return the rotational phase (0-1) at MJD (can be an array)
given a reference MJD and the rotational freq (f0) and
optional freq derivs (f1...) as ordered in the *args
list (e.g. [f0, f1, f2, ...]).
"""
t = (MJD-refMJD)*SECPERDAY
n = len(args) # polynomial order
nargs = Num.concatenate(([0.0], args))
taylor_coeffs = Num.concatenate(([0.0],
Num.cumprod(1.0/(Num.arange(float(n))+1.0))))
p = Num.poly1d((taylor_coeffs * nargs)[::-1])
return Num.fmod(p(t), 1.0)
def calc_freq(MJD, refMJD, *args):
"""
calc_freq(MJD, refMJD, *args):
Return the instantaneous frequency at an MJD (can be an array)
given a reference MJD and the rotational freq (f0) and
optional freq derivs (f1...) as ordered in the *args
list (e.g. [f0, f1, f2, ...]).
"""
t = (MJD-refMJD)*SECPERDAY
n = len(args) # polynomial order
taylor_coeffs = Num.concatenate(([1.0],
Num.cumprod(1.0/(Num.arange(float(n-1))+1.0))))
p = Num.poly1d((taylor_coeffs * args)[::-1])
return p(t)
def calc_t0(MJD, refMJD, *args):
"""
calc_t0(MJD, refMJD, *args):
Return the closest previous MJD corresponding to phase=0 of the pulse.
*args are the spin freq (f0) and optional freq derivs (f1...)
"""
phs = calc_phs(MJD, refMJD, *args)
p = 1.0 / calc_freq(MJD, refMJD, *args)
return MJD - phs*p/SECPERDAY
def write_princeton_toa(toa_MJDi, toa_MJDf, toaerr, freq, dm, obs='@', name=' '*13):
"""
Princeton Format
columns item
1-1 Observatory (one-character code) '@' is barycenter
2-2 must be blank
16-24 Observing frequency (MHz)
25-44 TOA (decimal point must be in column 30 or column 31)
45-53 TOA uncertainty (microseconds)
69-78 DM correction (pc cm^-3)
"""
# Splice together the fractional and integer MJDs
toa = "%5d"%int(toa_MJDi) + ("%.13f"%toa_MJDf)[1:]
if dm!=0.0:
print obs+" %13s %8.3f %s %8.2f %9.4f" % \
(name, freq, toa, toaerr, dm)
else:
print obs+" %13s %8.3f %s %8.2f" % \
(name, freq, toa, toaerr)
def write_tempo2_toa(toa_MJDi, toa_MJDf, toaerr, freq, dm, obs='@', name='unk', flags=""):
"""
Write Tempo2 format TOAs.
Note that first line of file should be "FORMAT 1"
TOA format is "file freq sat satErr siteID <flags>"
"""
toa = "%5d"%int(toa_MJDi) + ("%.13f"%toa_MJDf)[1:]
if dm != 0.0:
flags += "-dm %.4f" % (dm,)
print "%s %f %s %.2f %s %s" % (name,freq,toa,toaerr,obs,flags)
def rotate(arr, bins):
"""
rotate(arr, bins):
Return an array rotated by 'bins' places to the left
"""
bins = bins % len(arr)
if bins==0:
return arr
else:
return Num.concatenate((arr[bins:], arr[:bins]))
def interp_rotate(arr, bins, zoomfact=10):
"""
interp_rotate(arr, bins, zoomfact=10):
Return a sinc-interpolated array rotated by 'bins' places to the left.
'bins' can be fractional and will be rounded to the closest
whole-number of interpolated bins. The resulting vector will
have the same length as the oiginal.
"""
newlen = len(arr)*zoomfact
rotbins = int(Num.floor(bins*zoomfact+0.5)) % newlen
newarr = sinc_interp.periodic_interp(arr, zoomfact)
return rotate(newarr, rotbins)[::zoomfact]
def fft_rotate(arr, bins):
"""
fft_rotate(arr, bins):
Return array 'arr' rotated by 'bins' places to the left. The
rotation is done in the Fourier domain using the Shift Theorem.
'bins' can be fractional. The resulting vector will have
the same length as the original.
"""
arr = Num.asarray(arr)
freqs = Num.arange(arr.size/2+1, dtype=Num.float)
phasor = Num.exp(complex(0.0, TWOPI) * freqs * bins / float(arr.size))
return Num.fft.irfft(phasor * Num.fft.rfft(arr), arr.size)
def corr(profile, template):
"""
corr(profile, template):
Cross-correlate (using FFTs) a 'profile' and a 'template'.
"""
return FFT.irfft(FFT.rfft(template) * Num.conjugate(FFT.rfft(profile)),
profile.size)
def autocorr(x):
"""
autocorr(x):
Circular normalized auto-correlation of the (real) function x
using FFTs. Returns only N/2+1 points as the remaining N/2-1
points are symmetric (corresponding to negative lags).
"""
fftx = FFT.rfft(x)
acf = FFT.irfft(fftx * Num.conjugate(fftx), x.size)[:len(x)/2+1]
return acf / acf[0]
def maxphase(profile, template):
"""
maxphase(profile, template):
Return the phase offset required to get the 'profile' to best
match the 'template'.
"""
return float(Num.argmax(corr(profile, template))) / len(profile)
def linear_interpolate(vector, zoom=10):
"""
linear_interpolate(vector, zoom=10):
Linearly interpolate 'vector' by a factor of 'zoom'.
"""
n = len(vector)
ivect = Num.zeros(zoom*n, dtype='d')
nvect = Num.concatenate((vector, vector[:1]))
ivals = Num.arange(zoom, dtype='d')/zoom
loy = nvect[0]
for ii in range(n):
hiy = nvect[ii+1]
ivect[ii*zoom:(ii+1)*zoom] = ivals*(hiy-loy) + loy
loy = hiy
return ivect
def downsample(vector, factor):
"""
downsample(vector, factor):
Downsample (i.e. co-add consecutive numbers) a short section
of a vector by an integer factor.
"""
if (len(vector) % factor):
print "Lenght of 'vector' is not divisible by 'factor'=%d!" % factor
return 0
newvector = Num.reshape(vector, (len(vector)/factor, factor))
return Num.add.reduce(newvector, 1)
def measure_phase_corr(profile, template, zoom=10):
"""
measure_phase_corr(profile, template, zoom=10):
Return the phase offset required to get the 'profile' to best
match the 'template', each of which has been interpolated
by a factor of 'zoom'.
"""
zoomprof = zoomtemp = zoom
if (len(template) != len(profile)):
if (len(template)%len(profile) == 0):
zoomprof = zoom*len(template)/len(profile)
else:
print "Warning!: The lengths of the template (%d) and profile (%d)" % \
(len(template), len(profile))
print " are not the same!"
#itemp = linear_interpolate(rotate(template, Num.argmax(template)), zoomtemp)
itemp = linear_interpolate(template, zoomtemp)
iprof = linear_interpolate(profile, zoomprof)
return maxphase(iprof, itemp)
def spike_profile(N, phase, fwhm):
"""
spike_profile(N, phase, fwhm):
Return a triangular pulse profile with 'N' bins and
an integrated 'flux' of 1 unit.
'N' = the number of points in the profile
'phase' = the pulse phase (0-1)
'fwhm' = the triangular pulses full width at half-max
"""
phsval = Num.arange(N, dtype='d') / float(N)
peakst = 0.5 - fwhm
peakend = 0.5 + fwhm
normalize = 1.0 / fwhm
if (mean < 0.5):
phsval = Num.where(Num.greater(phsval, mean+0.5),
phsval-1.0, phsval)
else:
phsval = Num.where(Num.less(phsval, mean-0.5),
phsval+1.0, phsval)
return Num.where(Num.less_equal(phsval, 0.5),
Num.where(Num.less_equal(phsval, peakst),
0.0, (phsval - peakst) *
normalize * normalize),
Num.where(Num.greater(phsval, peakend),
0.0, (1.0 - (phsval - 0.5) *
normalize) * normalize))
def harm_to_sum(fwhm):
"""
harm_to_sum(fwhm):
For an MVMD profile returns the optimal number
of harmonics to sum incoherently
"""
fwhms = [0.0108, 0.0110, 0.0113, 0.0117, 0.0119, 0.0124, 0.0127, 0.0132,
0.0134, 0.0140, 0.0145, 0.0151, 0.0154, 0.0160, 0.0167, 0.0173,
0.0180, 0.0191, 0.0199, 0.0207, 0.0220, 0.0228, 0.0242, 0.0257,
0.0273, 0.0295, 0.0313, 0.0338, 0.0366, 0.0396, 0.0437, 0.0482,
0.0542, 0.0622, 0.0714, 0.0836, 0.1037, 0.1313, 0.1799, 0.2883]
return len(fwhms)-bisect.bisect(fwhms, fwhm)+1
def expcos_profile(N, phase, fwhm):
"""
expcos_profile(N, phase, fwhm):
Return a pulse profile with 'N' bins and an integrated 'flux'
of 1 unit based on the 'Exponentiated Sinusoid'.
'N' = the number of points in the profile
'phase' = the pulse phase (0-1)
'fwhm' = pulse full width at half-max (0.0 < fwhm <= 0.5)
"""
from simple_roots import secant
def fwhm_func(k, fwhm=fwhm):
if (fwhm < 0.02):
return Num.arccos(1.0-Num.log(2.0)/k)/PI-fwhm
else:
return Num.arccos(Num.log(0.5*(Num.exp(k)+
Num.exp(-k)))/k)/PI-fwhm
phsval = TWOPI * Num.arange(N, dtype='d') / float(N)
phi = -phase * TWOPI
if (fwhm >= 0.5):
return Num.cos(phsval + phi) + 1.0
elif (fwhm < 0.02):
# The following is from expanding of iO(x) as x->Infinity.
k = Num.log(2.0) / (1.0 - Num.cos(PI * fwhm))
# print "Expansion: k = %f FWHM = %f" % (k, fwhm_func(k, 0.0))
phsval = Num.fmod(phsval + phi, TWOPI)
phsval = Num.where(Num.greater(phsval, PI),
phsval - TWOPI, phsval)
denom = ((1 + 1/(8*k) + 9/(128*k*k) + 75/(1024*k**3) +
3675/(32768*k**4) + 59535/(262144*k**5)) / Num.sqrt(TWOPI*k))
return Num.where(Num.greater(Num.fabs(phsval/TWOPI), 3.0*fwhm), 0.0,
Num.exp(k*(Num.cos(phsval)-1.0))/denom)
else:
k = secant(fwhm_func, 1e-8, 0.5)
norm = 1.0 / (i0(k) - Num.exp(-k))
# print "Full Calc: k = %f FWHM = %f" % (k, fwhm_func(k, 0.0))
if (k < 0.05):
tmp = Num.cos(phsval + phi)
tmp2 = tmp * tmp
return norm * (k * (tmp + 1) +
k * k * (tmp2 - 1.0) / 2.0 +
k * k * k * (tmp2 * tmp + 1.0) / 6.0)
else:
return norm * (Num.exp(k * Num.cos(phsval + phi)) -
Num.exp(-k))
def read_gaussfitfile(gaussfitfile, proflen):
"""
read_gaussfitfile(gaussfitfile, proflen):
Read a Gaussian-fit file as created by the output of pygaussfit.py.
The input parameters are the name of the file and the number of
bins to include in the resulting template file. A numpy array
of that length is returned.
"""
phass = []
ampls = []
fwhms = []
for line in open(gaussfitfile):
if line.lstrip().startswith("phas"):
phass.append(float(line.split()[2]))
if line.lstrip().startswith("ampl"):
ampls.append(float(line.split()[2]))
if line.lstrip().startswith("fwhm"):
fwhms.append(float(line.split()[2]))
if not (len(phass) == len(ampls) == len(fwhms)):
print "Number of phases, amplitudes, and FWHMs are not the same in '%s'!"%gaussfitfile
return 0.0
phass = Num.asarray(phass)
ampls = Num.asarray(ampls)
fwhms = Num.asarray(fwhms)
# Now sort them all according to decreasing amplitude
new_order = Num.argsort(ampls)
new_order = new_order[::-1]
ampls = Num.take(ampls, new_order)
phass = Num.take(phass, new_order)
fwhms = Num.take(fwhms, new_order)
# Now put the biggest gaussian at phase = 0.0
phass = phass - phass[0]
phass = Num.where(phass<0.0, phass+1.0, phass)
template = Num.zeros(proflen, dtype='d')
for ii in range(len(ampls)):
template += ampls[ii]*gaussian_profile(proflen, phass[ii], fwhms[ii])
return template
def gaussian_profile(N, phase, fwhm):
"""
gaussian_profile(N, phase, fwhm):
Return a gaussian pulse profile with 'N' bins and
an integrated 'flux' of 1 unit.
'N' = the number of points in the profile
'phase' = the pulse phase (0-1)
'fwhm' = the gaussian pulses full width at half-max
Note: The FWHM of a gaussian is approx 2.35482 sigma
"""
sigma = fwhm / 2.35482
mean = phase % 1.0
phsval = Num.arange(N, dtype='d') / float(N)
if (mean < 0.5):
phsval = Num.where(Num.greater(phsval, mean+0.5),
phsval-1.0, phsval)
else:
phsval = Num.where(Num.less(phsval, mean-0.5),
phsval+1.0, phsval)
try:
zs = (phsval-mean)/sigma
okzinds = Num.compress(Num.fabs(zs)<20.0, Num.arange(N))
okzs = Num.take(zs, okzinds)
retval = Num.zeros(N, 'd')
Num.put(retval, okzinds, Num.exp(-0.5*(okzs)**2.0)/(sigma*Num.sqrt(2*PI)))
return retval
except OverflowError:
print "Problem in gaussian prof: mean = %f sigma = %f" % \
(mean, sigma)
return Num.zeros(N, 'd')
def gauss_profile_params(profile, output=0):
"""
gauss_profile_params(profile, output=0):
Return parameters of a best-fit gaussian to a profile.
The funtion returns a tuple containg the following values:
ret[0] = Best-fit gaussian integrated 'flux'.
ret[1] = Best-fit gaussian FWHM.
ret[2] = Best-fit gaussian phase (0.0-1.0).
ret[3] = Baseline (i.e. noise) average value.
ret[4] = Residuals average value.
ret[5] = Residuals standard deviation.
If 'output' is true, the fit will be plotted and
the return values will be printed.
"""
profile = Num.asarray(profile)
def funct(afpo, profile):
return afpo[0] * gaussian_profile(len(profile), afpo[2], afpo[1]) \
+ afpo[3] - profile
ret = leastsq(funct, [profile.max()-profile.min(),
0.25, profile.argmax()/float(len(profile)),
profile.min()], args=(profile))
if (output):
phases = Num.arange(0.0, 1.0,
1.0 / len(profile)) + 0.5 / len(profile)
Pgplot.plotxy(profile, phases, rangex=[0.0, 1.0],
labx='Pulse Phase', laby='Pulse Intensity')
bestfit = ret[0][0] * gaussian_profile(len(profile),
ret[0][2], ret[0][1]) \
+ ret[0][3]
if (output):
Pgplot.plotxy(bestfit, phases, color='red')
Pgplot.closeplot()
residuals = bestfit - profile
resid_avg = residuals.mean()
resid_std = residuals.std()
if (output):
Pgplot.plotxy(residuals, phases, rangex=[0.0, 1.0],
rangey=[min(residuals) - 2 * resid_std,
max(residuals) + 2 * resid_std],
labx='Pulse Phase', laby='Residuals',
line=None, symbol=3)
ppgplot.pgerrb(6, phases, residuals,
Num.zeros(len(residuals), 'd') + \
resid_std, 2)
Pgplot.plotxy([resid_avg, resid_avg], [0.0, 1.0], line=2)
Pgplot.closeplot()
print ""
print " Best-fit gaussian integrated 'flux' = ", ret[0][0]
print " Best-fit gaussian FWHM = ", ret[0][1]
print " Best-fit gaussian phase (0.0-1.0) = ", ret[0][2]
print " Baseline (i.e. noise) average = ", ret[0][3]
print " Residuals average = ", resid_avg
print " Residuals standard deviation = ", resid_std
print ""
return (ret[0][0], ret[0][1], ret[0][2], ret[0][3], resid_avg, resid_std)
def twogauss_profile_params(profile, output=0):
"""
twogauss_profile_params(profile, output=0):
Return parameters of a two best-fit gaussians to a profile.
The function returns a tuple containg the following values:
ret[0] = Best-fit gaussian integrated 'flux'.
ret[1] = Best-fit gaussian FWHM.
ret[2] = Best-fit gaussian phase (0.0-1.0).
ret[3] = Best-fit gaussian integrated 'flux'.
ret[4] = Best-fit gaussian FWHM.
ret[5] = Best-fit gaussian phase (0.0-1.0).
ret[6] = Baseline (i.e. noise) average value.
ret[7] = Residuals average value.
ret[8] = Residuals standard deviation.
If 'output' is true, the fit will be plotted and
the return values will be printed.
"""
def yfunct(afpo, n):
return afpo[0] * gaussian_profile(n, afpo[2], afpo[1]) + \
afpo[3] * gaussian_profile(n, afpo[5], afpo[4]) + afpo[6]
def min_funct(afpo, profile):
return yfunct(afpo, len(profile)) - profile
ret = leastsq(min_funct, [max(profile)-min(profile),
0.05,
Num.argmax(profile)/float(len(profile)),
0.2 * max(profile)-min(profile),
0.1,
Num.fmod(Num.argmax(profile)/float(len(profile))+0.5, 1.0),
min(profile)], args=(profile))
if (output):
phases = Num.arange(0.0, 1.0,
1.0 / len(profile)) + 0.5 / len(profile)
Pgplot.plotxy(profile, phases, rangex=[0.0, 1.0],
labx='Pulse Phase', laby='Pulse Intensity')
bestfit = yfunct(ret[0], len(profile))
if (output):
Pgplot.plotxy(bestfit, phases, color='red')
Pgplot.closeplot()
residuals = bestfit - profile
resid_avg = residuals.mean()
resid_std = residuals.std()
if (output):
Pgplot.plotxy(residuals, phases, rangex=[0.0, 1.0],
rangey=[min(residuals) - 2 * resid_std,
max(residuals) + 2 * resid_std],
labx='Pulse Phase', laby='Residuals',
line=None, symbol=3)
ppgplot.pgerrb(6, phases, residuals,
Num.zeros(len(residuals), 'd') + \
resid_std, 2)
Pgplot.plotxy([resid_avg, resid_avg], [0.0, 1.0], line=2)
Pgplot.closeplot()
print ""
print " Best-fit gaussian integrated 'flux' = ", ret[0][0]
print " Best-fit gaussian FWHM = ", ret[0][1]
print " Best-fit gaussian phase (0.0-1.0) = ", ret[0][2]
print " Best-fit gaussian integrated 'flux' = ", ret[0][3]
print " Best-fit gaussian FWHM = ", ret[0][4]
print " Best-fit gaussian phase (0.0-1.0) = ", ret[0][5]
print " Baseline (i.e. noise) average = ", ret[0][6]
print " Residuals average = ", resid_avg
print " Residuals standard deviation = ", resid_std
print ""
return (ret[0][0], ret[0][1], ret[0][2], ret[0][3], ret[0][4],
ret[0][5], ret[0][6], resid_avg, resid_std)
def estimate_flux_density(profile, N, dt, Ttot, G, BW, prof_stdev, display=0):
"""
estimate_flux_density(profile, N, dt, Ttot, G, BW, prof_stdev, display=0):
Return an estimate of the flux density (mJy) of a pulsar.
'profile' = the pulse profile you are using
'N' = number of time series bins folded
'dt' = time per time series bin (s)
'Ttot' = sky + system temperature (K)
'G' = forward gain of the antenna (K/Jy)
'BW' = observing bandwidth (MHz)
'prof_stdev' = profile standard deviation
'display' = if set, the gaussian fit plots are shown
Observatories:
Parkes Multibeam: Tsys = 21 K, G = 0.735 K/Jy
"""
(amp, fwhm, phase, offset, resid_avg, resid_std) = \
gauss_profile_params(profile, display)
T = N * dt
norm_fact = (prof_stdev * len(profile)) / \
smin_noise(Ttot, G, BW, T / len(profile))
return Num.add.reduce(profile - offset) / norm_fact
def max_spike_power(FWHM):
"""
max_spike_power(FWHM):
Return the (approx.) ratio of the highest power from a
triangular spike pulse profile to the power from a
perfect sinusoidal pulse profile. In other words, if a
sine gives you a power of 1, what power does a spike profile
give you? Both the spike and the sine are assumed to have
an area under one full pulse of 1 unit. Note: A gaussian
profile gives almost identical powers as a spike profile
of the same width. This expression was determined using
a least-squares fit (Max abs error ~ 0.016).
'FWHM' is the full width at half-max of the spike.
(0.0 < FWHM <= 0.5)
"""
return ((36.4165309504 * FWHM - 32.0107844537) * FWHM \
+ 0.239948319674) * FWHM + 4.00277916584
def num_spike_powers(FWHM):
"""
num_spike_powers(FWHM):
Return the (approx.) number of powers from a triangular spike
pulse profile which are greater than one half the power
perfect sinusoidal pulse profile. Both the spike and the
sine are assumed to have an area under one full pulse of 1 unit.
Note: A gaussian profile gives almost identical numbers of
high powers as a spike profile of the same width. This
expression was determined using a least-squares fit.
(Errors get large as FWHM -> 0).
'FWHM' is the full width at half-max of the spike.
(0.0 < FWHM <= 0.5)
"""
return -3.95499721563e-05 / FWHM**2 + 0.562069634689 / FWHM - \
0.683604041138
def incoherent_sum(amps):
"""
incoherent_sum(amps):
Given a series of complex Fourier amplitudes, return a vector
showing the accumulated incoherently-summed powers.
"""
return Num.add.accumulate(Num.absolute(amps)**2.0)
def coherent_sum(amps):
"""
coherent_sum(amps):
Given a series of complex Fourier amplitudes, return a vector
showing the accumulated coherently-summed powers.
"""
phss = Num.arctan2(amps.imag, amps.real)
phs0 = phss[0]
phscorr = phs0 - Num.fmod((Num.arange(len(amps), dtype='d')+1.0)*phs0, TWOPI)
sumamps = Num.add.accumulate(amps*Num.exp(complex(0.0, 1.0)*phscorr))
return Num.absolute(sumamps)**2.0
def dft_vector_response(roff, z=0.0, w=0.0, phs=0.0, N=1000):
"""
dft_vector_response(roff, z=0.0, w=0.0, phs=0.0, N=1000):
Return a complex vector addition of N vectors showing the DFT
response for a noise-less signal with Fourier frequency
offset roff, (roff=0 would mean that we are exactly at the
signal freq), average Fourier f-dot, z, and Fourier 2nd
deriv, w. An optional phase in radians can be added.
"""
r0 = roff - 0.5 * z + w / 12.0 # Make symmetric for all z and w
z0 = z - 0.5 * w
us = Num.linspace(0.0, 1.0, N)
phss = 2.0 * Num.pi * (us * (us * (us * w/6.0 + z0/2.0) + r0) + phs)
return Num.cumsum(Num.exp(Num.complex(0.0, 1.0) * phss)) / N
def prob_power(power):
"""
prob_power(power):
Return the probability for noise to exceed a normalized power
level of 'power' in a power spectrum.
"""
return Num.exp(-power)
def Ftest(chi2_1, dof_1, chi2_2, dof_2):
"""
Ftest(chi2_1, dof_1, chi2_2, dof_2):
Compute an F-test to see if a model with extra parameters is
significant compared to a simpler model. The input values are the
(non-reduced) chi^2 values and the numbers of DOF for '1' the
original model and '2' for the new model (with more fit params).
The probability is computed exactly like Sherpa's F-test routine
(in Ciao) and is also described in the Wikipedia article on the
F-test: http://en.wikipedia.org/wiki/F-test
The returned value is the probability that the improvement in
chi2 is due to chance (i.e. a low probability means that the
new fit is quantitatively better, while a value near 1 means
that the new model should likely be rejected).
"""
delta_chi2 = chi2_1 - chi2_2
delta_dof = dof_1 - dof_2
new_redchi2 = chi2_2 / dof_2
F = (delta_chi2 / delta_dof) / new_redchi2
return 1.0 - fdtr(delta_dof, dof_2, F)
def equivalent_gaussian_sigma(p):
"""
equivalent_gaussian_sigma(p):
Return the equivalent gaussian sigma corresponding
to the cumulative gaussian probability p. In other
words, return x, such that Q(x) = p, where Q(x) is the
cumulative normal distribution. For very small
"""
logp = Num.log(p)
if type(1.0) == type(logp):
if logp > -30.0:
return ndtri(1.0 - p)
else:
return extended_equiv_gaussian_sigma(logp)
else: # Array input
return Num.where(logp>-30.0,
ndtri(1.0-p),
extended_equiv_gaussian_sigma(logp))
def extended_equiv_gaussian_sigma(logp):
"""
extended_equiv_gaussian_sigma(logp):
Return the equivalent gaussian sigma corresponding
to the log of the cumulative gaussian probability logp.
In other words, return x, such that Q(x) = p, where Q(x)
is the cumulative normal distribution. This version uses
the rational approximation from Abramowitz and Stegun,
eqn 26.2.23. Using the log(P) as input gives a much
extended range.
"""
t = Num.sqrt(-2.0 * logp)
num = 2.515517 + t * (0.802853 + t * 0.010328)
denom = 1.0 + t * (1.432788 + t * (0.189269 + t * 0.001308))
return t - num / denom
def log_asymtotic_incomplete_gamma(a, z):
"""
log_asymtotic_incomplete_gamma(a, z):
Return the log of the incomplete gamma function in its
asymtotic limit as z->infty. This is from Abramowitz
and Stegun eqn 6.5.32.
"""
x = 1.0
newxpart = 1.0
term = 1.0
ii = 1
while (Num.fabs(newxpart) > 1e-15):
term *= (a - ii)
newxpart = term / z**ii
x += newxpart
ii += 1
return (a-1.0)*Num.log(z) - z + Num.log(x)
def log_asymtotic_gamma(z):
"""
log_asymtotic_gamma(z):
Return the log of the gamma function in its asymtotic limit
as z->infty. This is from Abramowitz and Stegun eqn 6.1.41.
"""
x = (z-0.5) * Num.log(z) - z + 0.91893853320467267
y = 1.0/(z*z)
x += (((- 5.9523809523809529e-4 * y
+ 7.9365079365079365079365e-4) * y
- 2.7777777777777777777778e-3) * y
+ 8.3333333333333333333333e-2) / z;
return x
def prob_sum_powers(power, nsum):
"""
prob_sum_powers(power, nsum):
Return the probability for noise to exceed 'power' in
the sum of 'nsum' normalized powers from a power spectrum.
"""
# Notes:
# prob_sum_powers(power, nsum)
# = scipy.special.gammaincc(nsum, power)
# = statdists.chi_prob(power*2, nsum*2)
# = scipy.special.chdtrc(nsum*2, power*2)
# = Q(power*2|nsum*2) (from A&S 26.4.19)
# = Gamma(nsum,power)/Gamma(nsum)
# = [Gamma(nsum) - gamma(nsum,power)]/Gamma(nsum)
return chdtrc(2*nsum, 2.0*power)
def log_prob_sum_powers(power, nsum):
"""
log_prob_sum_powers(power, nsum):
Return the log of the probability for noise to exceed
'power' in the sum of 'nsum' normalized powers from a
power spectrum. This version uses allows the use of
very large powers by using asymtotic expansions from
Abramowitz and Stegun Chap 6.
"""
# Notes:
# prob_sum_powers(power, nsum)
# = scipy.special.gammaincc(nsum, power)
# = statdists.chi_prob(power*2, nsum*2)
# = scipy.special.chdtrc(nsum*2, power*2)
# = Q(power*2|nsum*2) (from A&S 26.4.19)
# = Gamma(nsum,power)/Gamma(nsum)
# = [Gamma(nsum) - gamma(nsum,power)]/Gamma(nsum)
if type(1.0) == type(power):
if power < 100.0:
return Num.log(prob_sum_powers(power, nsum))
else:
return log_asymtotic_incomplete_gamma(nsum, power) - \
log_asymtotic_gamma(nsum)
else:
return Num.where(power < 100.0,
Num.log(prob_sum_powers(power, nsum)),
log_asymtotic_incomplete_gamma(nsum, power) - \
log_asymtotic_gamma(nsum))
def sigma_power(power):
"""
sigma_power(power):
Return the approximate equivalent Gaussian sigma for noise
to exceed a normalized power level given as 'power'
in a power spectrum.
"""
if type(1.0) == type(power):
if power > 36.0:
return Num.sqrt(2.0 * power - Num.log(PI * power))
else:
return equivalent_gaussian_sigma(prob_power(power))
else:
return Num.where(power > 36.0,
Num.sqrt(2.0 * power - Num.log(PI * power)),
extended_equiv_gaussian_sigma(log_prob_sum_powers(power, 1)))
def sigma_sum_powers(power, nsum):
"""
sigma_sum_powers(power, nsum):
Return the approximate equivalent Gaussian sigma for noise
to exceed a sum of 'nsum' normalized powers given by 'power'
in a power spectrum.
"""
if type(1.0) == type(power):
if power < 100.0:
return equivalent_gaussian_sigma(prob_sum_powers(power, nsum))
else:
return extended_equiv_gaussian_sigma(log_prob_sum_powers(power, nsum))
else: # Array input
return Num.where(power < 100.0,
equivalent_gaussian_sigma(prob_sum_powers(power, nsum)),
extended_equiv_gaussian_sigma(log_prob_sum_powers(power, nsum)))
def power_at_sigma(sigma):
"""
power_at_sigma(sigma):
Return the approximate normalized power level that is
equivalent to a detection of significance 'sigma'.
"""
return sigma**2 / 2.0 + Num.log(Num.sqrt(PIBYTWO)
* sigma)
def powersum_at_sigma(sigma, nsum):
"""
powersum_at_sigma(sigma, nsum):
Return the approximate sum of 'nsum' normalized powers that is
equivalent to a detection of significance 'sigma'.
"""
return 0.5 * chdtri(2.0 * nsum, 1.0 - ndtr(sigma))
def cand_sigma(N, power):
"""
cand_sigma(N, power):
Return the sigma of a candidate found in a power
spectrum of 'N' bins after taking into account the
number of bins searched.
"""
return ndtri(1.0 - N * prob_power(power))
def fft_max_pulsed_frac(N, numphot, sigma=3.0):
"""
fft_max_pulsed_frac(N, numphot, sigma=3.0):
Return the approximate maximum pulsed fraction for a
sinusoidal signal that _wasn't_ found in a FFT-based
search. 'N' is the number of bins searched in the FFT.
'numphot' is the number of photons present in the data.
And 'sigma' is your confidence (in sigma) that you
have in expressing this limit.
"""
# The following is the power level required to get a
# noise spike that would appear somewhere in N bins
# at the 'sigma' level
power_required = -Num.log((1.0-ndtr(sigma))/N)
return Num.sqrt(4.0 * numphot * power_required)/N
def p_to_f(p, pd, pdd=None):
"""
p_to_f(p, pd, pdd=None):
Convert period, period derivative and period second
derivative to the equivalent frequency counterparts.
Will also convert from f to p.
"""
f = 1.0 / p
fd = -pd / (p * p)
if (pdd==None):
return [f, fd]
else:
if (pdd==0.0):
fdd = 0.0
else:
fdd = 2.0 * pd * pd / (p**3.0) - pdd / (p * p)
return [f, fd, fdd]
def pferrs(porf, porferr, pdorfd=None, pdorfderr=None):
"""
pferrs(porf, porferr, pdorfd=None, pdorfderr=None):
Calculate the period or frequency errors and
the pdot or fdot errors from the opposite one.
"""
if (pdorfd==None):
return [1.0 / porf, porferr / porf**2.0]
else:
forperr = porferr / porf**2.0
fdorpderr = Num.sqrt((4.0 * pdorfd**2.0 * porferr**2.0) / porf**6.0 +
pdorfderr**2.0 / porf**4.0)
[forp, fdorpd] = p_to_f(porf, pdorfd)
return [forp, forperr, fdorpd, fdorpderr]
def pdot_from_B(p, B):
"""
pdot_from_B(p, B):
Return a pdot (or p, actually) that a pulsar with spin
period (or pdot) 'p' (in sec) would experience given a
magnetic field strength 'B' in gauss.
"""
return (B / 3.2e19)**2.0 / p
def pdot_from_age(p, age):
"""
pdot_from_age(p, age):
Return the pdot that a pulsar with spin period 'p' (in sec)
would experience given a characteristic age 'age' (in yrs).
"""
return p / (2.0 * age * SECPERJULYR)
def pdot_from_edot(p, edot, I=1.0e45):
"""
pdot_from_edot(p, edot, I=1.0e45):
Return the pdot that a pulsar with spin period 'p (in sec)
would experience given an Edot 'edot' (in ergs/s) and a
moment of inertia I.
"""
return (p**3.0 * edot) / (4.0 * PI * PI * I)
def pulsar_age(f, fdot, n=3, fo=1e99):
"""
pulsar_age(f, fdot, n=3, fo=1e99):
Return the age of a pulsar (in years) given the spin frequency
and frequency derivative. By default, the characteristic age
is returned (assuming a braking index 'n'=3 and an initial
spin freqquency fo >> f). But 'n' and 'fo' can be set.
"""
return -f / ((n-1.0) * fdot) * (1.0 - (f / fo)**(n-1.0)) / SECPERJULYR
def pulsar_edot(f, fdot, I=1.0e45):
"""
pulsar_edot(f, fdot, I=1.0e45):
Return the pulsar Edot (in erg/s) given the spin frequency and
frequency derivative. The NS moment of inertia is assumed to be
I = 1.0e45 g cm^2
"""
return -4.0 * PI * PI * I * f * fdot
def pulsar_B(f, fdot):
"""
pulsar_B(f, fdot):
Return the estimated pulsar surface magnetic field strength
(in Gauss) given the spin frequency and frequency derivative.
"""
return 3.2e19 * Num.sqrt(-fdot/f**3.0)
def pulsar_B_lightcyl(f, fdot):
"""
pulsar_B_lightcyl(f, fdot):
Return the estimated pulsar magnetic field strength at the
light cylinder (in Gauss) given the spin frequency and
frequency derivative.
"""
p, pd = p_to_f(f, fdot)
return 2.9e8 * p**(-5.0/2.0) * Num.sqrt(pd)
def psr_info(porf, pdorfd, time=None, input=None, I=1e45):
"""
psr_info(porf, pdorfd, time=None, input=None, I=1e45):
Print a list of standard derived pulsar parameters based
on the period (or frequency) and its first derivative. The
routine will automatically assume you are using periods if
'porf' <= 1.0 and frequencies otherwise. You can override this
by setting input='p' or 'f' appropriately. If time is specified
(duration of an observation) it will also return the Fourier
frequency 'r' and Fourier fdot 'z'. I is the NS moment of inertia.
"""
if ((input==None and porf > 1.0) or
(input=='f' or input=='F')):
pdorfd = - pdorfd / (porf * porf)
porf = 1.0 / porf
[f, fd] = p_to_f(porf, pdorfd)
print ""
print " Period = %f s" % porf
print " P-dot = %g s/s" % pdorfd
print " Frequency = %f Hz" % f
print " F-dot = %g Hz/s" % fd
if (time):
print " Fourier Freq = %g bins" % (f * time)
print " Fourier F-dot = %g bins" % (fd * time * time)
print " E-dot = %g ergs/s" % pulsar_edot(f, fd, I)
print " Surface B Field = %g gauss" % pulsar_B(f, fd)
print " Characteristic Age = %g years" % pulsar_age(f, fd)
print " Assumed I = %g g cm^2" % I
print ""
def doppler(freq_observed, voverc):
"""doppler(freq_observed, voverc):
This routine returns the frequency emitted by a pulsar
(in MHz) given that we observe the pulsar at frequency
freq_observed (MHz) while moving with radial velocity
(in units of v/c) of voverc wrt the pulsar.
"""
return freq_observed * (1.0 + voverc)
| gpl-2.0 | -6,768,480,292,747,545,000 | 38.249433 | 95 | 0.571162 | false | 3.028564 | false | false | false |
goldeneye-source/ges-python | ges/GamePlay/GunTrade.py | 1 | 10612 | ################ Copyright 2005-2016 Team GoldenEye: Source #################
#
# This file is part of GoldenEye: Source's Python Library.
#
# GoldenEye: Source's Python Library is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or(at your option) any later version.
#
# GoldenEye: Source's Python Library is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GoldenEye: Source's Python Library.
# If not, see <http://www.gnu.org/licenses/>.
#############################################################################
import math
import random
from GamePlay import GEScenario
from .Utils import GetPlayers, _
from .Utils.GEPlayerTracker import GEPlayerTracker
from random import shuffle
import GEPlayer, GEUtil, GEMPGameRules as GERules, GEGlobal as Glb, GEWeapon
USING_API = Glb.API_VERSION_1_2_0
# Organized by strength, in groups of 4. Stronger weapons are higher.
weaponList = [
"weapon_golden_pp7", "weapon_golden_gun", "weapon_rocket_launcher", "weapon_grenade_launcher",
"weapon_moonraker", "weapon_silver_pp7", "weapon_rcp90", "weapon_auto_shotgun",
"weapon_cmag", "weapon_ar33", "weapon_phantom", "weapon_shotgun",
"weapon_kf7", "weapon_knife_throwing", "weapon_sniper_rifle", "weapon_zmg",
"weapon_d5k_silenced", "weapon_d5k", "weapon_pp7", "weapon_pp7_silenced",
"weapon_klobb", "weapon_knife", "weapon_dd44", "weapon_grenade" ]
TR_WEPINDEX = "wepindex" # Index of the weapon the player has on the list above.
class GunTrade( GEScenario ):
def __init__( self ):
GEScenario.__init__( self )
self.indexQueue = [0] * 24
self.pltracker = GEPlayerTracker( self )
def GetPrintName( self ):
return "#GES_GP_GT_NAME"
def GetScenarioHelp( self, help_obj ):
help_obj.SetDescription( "#GES_GP_GT_HELP" )
def GetGameDescription( self ):
if GERules.IsTeamplay():
return "Team Gun Trade"
else:
return "Gun Trade"
def GetTeamPlay( self ):
return Glb.TEAMPLAY_TOGGLE
def OnLoadGamePlay( self ):
GEUtil.PrecacheSound( "GEGamePlay.Woosh" ) # Plays when level is lost
GERules.EnableInfiniteAmmo()
def OnUnloadGamePlay(self):
super( GunTrade, self ).OnUnloadGamePlay()
self.pltracker = None
def OnRoundBegin( self ):
GEScenario.OnRoundBegin( self )
GERules.DisableWeaponSpawns()
GERules.DisableAmmoSpawns()
#Reorder the weapon index queue and then issue a unique weapon to each player.
self.indexQueue = [0] * 24
#Take all the player's weapons away so we don't get duplicates
for player in GetPlayers():
self.pltracker[player][TR_WEPINDEX] = -1
for i in range(24):
self.indexQueue[i] = i
self.gt_QueueShuffle()
def OnPlayerConnect( self, player ):
self.pltracker[player][TR_WEPINDEX] = -1 # Give them an index of -1 so we know to give them a new weapons when they spawn.
def OnPlayerDisconnect( self, player ):
self.indexQueue.append( self.pltracker[player][TR_WEPINDEX] ) # Put their weapon back in the queue so we don't lose it.
def OnPlayerSpawn( self, player ):
if (self.pltracker[player][TR_WEPINDEX] == -1): # If we haven't been issued a weapon, pull one from the stack.
self.gt_IssueWeapon( player )
self.gt_SpawnWeapon( player )
if player.IsInitialSpawn():
GEUtil.PopupMessage( player, "#GES_GP_GT_NAME", "#GES_GPH_GT_GOAL" )
def OnPlayerKilled( self, victim, killer, weapon ):
# Let the base scenario behavior handle scoring so we can just worry about the gun swap mechanics.
GEScenario.OnPlayerKilled( self, victim, killer, weapon )
if not victim:
return
if killer and victim != killer:
# Normal kill
# Equip new weapons
wepname = weapon.GetClassname().lower()
if (wepname == "weapon_slappers" or wepname == "trigger_trap"): #Slapper kills replace the victim's weapon with a random new one.
self.gt_SubSwapWeapon( killer, victim )
elif wepname == "player" and self.gt_GetWeaponTierOfPlayer(victim) >= self.gt_GetWeaponTierOfPlayer(killer): #Killbind greifing protection, lower tiers are better.
self.gt_SubSwapWeapon( killer, victim )
else:
self.gt_SwapWeapons( killer, victim ) #Normal swap.
#Killer ID, Victim ID, Weapon Killer Traded Away, Weapon Victim Traded Away
GEUtil.EmitGameplayEvent( "gt_weaponswap" , str( killer.GetUserID()), str( victim.GetUserID() ), weaponList[ self.pltracker[victim][TR_WEPINDEX] ], weaponList[ self.pltracker[killer][TR_WEPINDEX] ], True )
self.gt_SpawnWeapon( killer ) # Only killer gets their weapon right now.
GEUtil.PlaySoundTo( victim, "GEGamePlay.Woosh" )
GEUtil.PlaySoundTo( killer, "GEGamePlay.Woosh" )
victim.StripAllWeapons() # Victim always loses their weapons so they never drop anything, as there are no weapon pickups in this mode.
# This is used to make sure we can't pick up any weapons we're not supposed to. Players shouldn't drop weapons in this
# mode but it doesn't hurt to cut out other ways of getting weapons too.
def CanPlayerHaveItem( self, player, item ):
weapon = GEWeapon.ToGEWeapon( item )
if weapon:
name = weapon.GetClassname().lower()
wI = self.pltracker[player][TR_WEPINDEX]
if name == weaponList[wI] or name == "weapon_slappers":
return True
return False
return True
# ---------------------------
# GAMEPLAY SPECIFIC FUNCTIONS
# ---------------------------
# We shuffle the weapon indexes this way to make sure that there's roughly an even destribution of the different
# weapon strengths in play at any given time. Since this que controls the weapons coming into play, having it be a controlled
# mix means there will typically be a nice destribution of weapon strengths getting substituted in.
# Won't be perfect with higher playercounts if a bunch of a given strength of weapon gets knocked out, but that's the name of the game.
# If someone decides to get rid of all the weak/strong weapons then they'll have to deal with an oversaturated queue.
# Shuffle kind of sucks since stuff like 123443211234 can happen, but it should do the job well enough.
def gt_QueueShuffle( self ):
holdingList = [ [],[],[],[],[],[] ]
entries = len(self.indexQueue)
# Sort the indexes into seperate lists based on their strength
for i in range(entries):
holdingList[math.floor(self.indexQueue[i] / 4)].append(self.indexQueue[i])
self.indexQueue = [] # Wipe the index queue now that all of our data is in the holding list
viablelists = [] # Lists ordered by weapon strength that still have one weapon in them
unchosenlists = [] # Lists that haven't been chosen this shuffle
# Get the lists that actually have anything in them.
for i in range(6):
if holdingList[i]:
viablelists.append(i)
for i in range(24):
if not unchosenlists: # If unchosenlists is empty, refill it with all the lists that still have entries
unchosenlists = list(viablelists)
pickedlist = random.choice(unchosenlists) # Pick a random list we haven't used yet
unchosenlists.remove(pickedlist) # This is just to make sure we get a decent mix of catagories
pickedindex = random.choice(holdingList[pickedlist]) # Pick a random weapon from that list
holdingList[pickedlist].remove(pickedindex) # Then remove that weapon from the list so we don't pick it again
if not holdingList[pickedlist]: # If this list no longer has anything in it, it's not viable anymore
viablelists.remove(pickedlist)
self.indexQueue.append(pickedindex) # Finally add it back to our index que
# Get the strength rating of the player's weapon.
def gt_GetWeaponTierOfPlayer( self, player ):
if not player:
return -1
return math.floor(self.pltracker[player][TR_WEPINDEX] / 4)
# Give the player a weapon from the queue and add their existing one to the queue if they have one, then return it.
def gt_IssueWeapon( self, player ):
if not player:
return
if (self.pltracker[player][TR_WEPINDEX] != -1):
self.indexQueue.append( self.pltracker[player][TR_WEPINDEX] )
self.pltracker[player][TR_WEPINDEX] = self.indexQueue.pop(0) #Pull the index at the bottom of the queue and give it to the player.
return self.pltracker[player][TR_WEPINDEX]
# Actually give the player their weapon.
def gt_SpawnWeapon( self, player ):
if not player:
return
player.StripAllWeapons()
player.GiveNamedWeapon( "weapon_slappers", 0 )
player.GiveNamedWeapon( weaponList[ self.pltracker[player][TR_WEPINDEX] ], 800 ) # We don't care about ammo because it is infinite.
player.WeaponSwitch( weaponList[ self.pltracker[player][TR_WEPINDEX] ] )
# Swap weapons
def gt_SwapWeapons( self, player1, player2 ):
if not player1 or not player2:
return
index1 = self.pltracker[player1][TR_WEPINDEX]
self.pltracker[player1][TR_WEPINDEX] = self.pltracker[player2][TR_WEPINDEX]
self.pltracker[player2][TR_WEPINDEX] = index1
# Swap weapons and substitute in a new one for player1, telling the players what got swapped.
def gt_SubSwapWeapon( self, player1, player2 ):
if not player1 or not player2:
return
self.gt_SwapWeapons( player1, player2 )
oldwep = self.pltracker[player1][TR_WEPINDEX]
newwep = self.gt_IssueWeapon( player1 )
msg = _( "#GES_GP_GT_SWAP", GEWeapon.WeaponPrintName(weaponList[oldwep]), GEWeapon.WeaponPrintName(weaponList[newwep]) )
GEUtil.PostDeathMessage( msg )
| gpl-3.0 | -6,168,713,502,720,763,000 | 42.314286 | 217 | 0.651715 | false | 3.582714 | false | false | false |
markgw/pimlico | src/python/pimlico/core/dependencies/licenses.py | 1 | 5685 | # This file is part of Pimlico
# Copyright (C) 2020 Mark Granroth-Wilding
# Licensed under the GNU LGPL v3.0 - https://www.gnu.org/licenses/lgpl-3.0.en.html
"""
Software licenses, for referring to in software dependency documentation.
Literals here are used to refer to the licenses that software uses.
See https://choosealicense.com/licenses/ for more details and comparison.
"""
class SoftwareLicense(object):
def __init__(self, name, description=None, url=None):
self.url = url
self.description = description
self.name = name
GNU_AGPL_V3 = SoftwareLicense(
"GNU AGPLv3",
description="""\
Permissions of this strongest copyleft license are conditioned on making available complete source code of
licensed works and modifications, which include larger works using a licensed work, under the same license. Copyright
and license notices must be preserved. Contributors provide an express grant of patent rights. When a modified
version is used to provide a service over a network, the complete source code of the modified version must be made
available.
""",
url="https://www.gnu.org/licenses/agpl-3.0.en.html"
)
GNU_GPL_V3 = SoftwareLicense(
"GNU GPLv3",
description="""\
Permissions of this strong copyleft license are conditioned on making available complete source code of licensed
works and modifications, which include larger works using a licensed work, under the same license. Copyright and
license notices must be preserved. Contributors provide an express grant of patent rights.
""",
url="https://www.gnu.org/licenses/gpl-3.0.html"
)
GNU_LGPL_V3 = SoftwareLicense(
"GNU LGPLv3",
description="""\
Permissions of this copyleft license are conditioned on making available complete source code of licensed works
and modifications under the same license or the GNU GPLv3. Copyright and license notices must be preserved.
Contributors provide an express grant of patent rights. However, a larger work using the licensed work through
interfaces provided by the licensed work may be distributed under different terms and without source code for the
larger work.
""",
url="https://www.gnu.org/licenses/lgpl-3.0.html"
)
GNU_LGPL_V2 = SoftwareLicense(
"GNU LGPLv2",
description="""\
Permissions of this copyleft license are conditioned on making available complete source code of licensed works
and modifications under the same license or the GNU GPLv2. Copyright and license notices must be preserved.
Contributors provide an express grant of patent rights. However, a larger work using the licensed work through
interfaces provided by the licensed work may be distributed under different terms and without source code for the
larger work.
""",
url="https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html"
)
MOZILLA_V2 = SoftwareLicense(
"Mozilla Public License 2.0",
description="""\
Permissions of this weak copyleft license are conditioned on making available source code of licensed files and
modifications of those files under the same license (or in certain cases, one of the GNU licenses). Copyright and
license notices must be preserved. Contributors provide an express grant of patent rights. However, a larger work
using the licensed work may be distributed under different terms and without source code for files added in the
larger work.
""",
url="https://www.mozilla.org/en-US/MPL/"
)
APACHE_V2 = SoftwareLicense(
"Apache License 2.0",
description="""\
A permissive license whose main conditions require preservation of copyright and license notices. Contributors
provide an express grant of patent rights. Licensed works, modifications, and larger works may be distributed under
different terms and without source code.
""",
url="https://www.apache.org/licenses/LICENSE-2.0"
)
MIT = SoftwareLicense(
"MIT License",
description="""\
A short and simple permissive license with conditions only requiring preservation of copyright and license notices.
Licensed works, modifications, and larger works may be distributed under different terms and without source code.
""",
url="https://opensource.org/licenses/MIT"
)
BOOST = SoftwareLicense(
"Boost Software License 1.0",
description="""\
A simple permissive license only requiring preservation of copyright and license notices for source (and not binary)
distribution. Licensed works, modifications, and larger works may be distributed under different terms and without
source code.
""",
url="https://www.boost.org/users/license.html"
)
UNLICENSE = SoftwareLicense(
"The Unlicense",
description="""\
A license with no conditions whatsoever which dedicates works to the public domain. Unlicensed works, modifications,
and larger works may be distributed under different terms and without source code.
""",
url="https://unlicense.org/"
)
BSD = SoftwareLicense(
"BSD License, 3-clause",
url="https://opensource.org/licenses/BSD-3-Clause",
)
BSD_2CLAUSE = SoftwareLicense(
"BSD License, 2-clause",
url="https://opensource.org/licenses/BSD-2-Clause"
)
PSF = SoftwareLicense(
"Python Software Foundation License",
url="https://docs.python.org/3/license.html"
)
NOT_RELEVANT = SoftwareLicense(
"Not relevant for licensing",
description="This is simply a placeholder to denote that it is not relevant to say what the license of "
"the software in question is. For example, it might be part of some other licensed software, "
"whose license is already covered elsewhere."
)
# Mark this license as the one we use for Pimlico itself, for easy reference
pimlico_license = GNU_LGPL_V2
| gpl-3.0 | 9,130,525,526,853,532,000 | 39.319149 | 118 | 0.753386 | false | 4.026204 | false | false | false |
MobleyLab/Lomap | lomap/graphgen.py | 2 | 39722 | #******************
# MODULE DOCSTRING
#******************
"""
LOMAP: Graph generation
=====
Alchemical free energy calculations hold increasing promise as an aid to drug
discovery efforts. However, applications of these techniques in discovery
projects have been relatively few, partly because of the difficulty of planning
and setting up calculations. The Lead Optimization Mapper (LOMAP) is an
automated algorithm to plan efficient relative free energy calculations between
potential ligands within a substantial of compounds.
"""
#*****************************************************************************
# Lomap2: A toolkit to plan alchemical relative binding affinity calculations
# Copyright 2015 - 2016 UC Irvine and the Authors
#
# Authors: Dr Gaetano Calabro' and Dr David Mobley
#
# This part of the code has been originally made by Jonathan Redmann,
# and Christopher Summa at Summa Lab, Dept. of Computer Science,
# University of New Orleans and it has just been adapded to the new Lomap code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see http://www.gnu.org/licenses/
#*****************************************************************************
#****************
# MODULE IMPORTS
#****************
import networkx as nx
import numpy as np
import sys
import matplotlib.pyplot as plt
import copy
from operator import itemgetter
from rdkit.Chem import Draw
from rdkit.Chem import AllChem
import os.path
import logging
from PyQt4 import QtGui
import tempfile
import shutil
__all__ = ['GraphGen']
#*************************
# Graph Class
#*************************
class GraphGen(object):
"""
This class is used to set and generate the graph used to plan
binding free energy calculation
"""
def __init__(self, dbase):
"""
Inizialization function
Parameters
----------
dbase : dbase object
the molecule container
"""
self.dbase = dbase
self.maxPathLength = dbase.options.max
self.similarityScoresLimit = dbase.options.cutoff
if dbase.options.radial:
self.lead_index = self.pick_lead()
else:
self.lead_index = None
# A set of nodes that will be used to save nodes that are not a cycle cover for a given subgraph
self.nonCycleNodesSet = set()
# Draw Parameters
# THIS PART MUST BE CHANGED
# Max number of displayed chemical compound images as graph nodes
self.max_images = 2000
# Max number of displayed nodes in the graph
self.max_nodes = 100
# The maximum threshold distance in angstroms unit used to select if a molecule is depicted
self.max_mol_size = 50.0
self.edge_labels = False
# The following Section has been strongly copied/adapted from the original implementation
# Generate a list related to the disconnected graphs present in the initial graph
if dbase.options.fast and dbase.options.radial:
#only enable the fast map option if use the radial option
self.initialSubgraphList = self.generateInitialSubgraphList(fast_map = True)
else:
self.initialSubgraphList = self.generateInitialSubgraphList()
# A list of elementes made of [edge, weights] for each subgraph
self.subgraphScoresLists = self.generateSubgraphScoresLists(self.initialSubgraphList)
# Elimintates from each subgraph those edges whose weights are less than the hard limit
self.removeEdgesBelowHardLimit()
# Make a new master list of subgraphs now that there may be more disconnected components
self.workingSubgraphsList = self.generateWorkingSubgraphsList()
# Make a new sorted list of [edge, weights] for each subgraph now that there may be new subgraphs
self.workingSubgraphScoresLists = self.generateSubgraphScoresLists(self.workingSubgraphsList)
# Remove edges, whose removal does not violate constraints, from the subgraphs,
# starting with lowest similarity score first
if dbase.options.fast and dbase.options.radial:
#if we use the fast and radial option, just need to add the surrounding edges from the initial graph
self.resultGraph = self.addsurroundEdges()
#after adding the surround edges, some subgraphs may merge into a larger graph and so need to update the current subgraphs
#self.resultingSubgraphsList = copy.deepcopy(self.workingSubgraphsList)
#merge all Subgraphs together for layout
#self.resultGraph = self.mergeAllSubgraphs()
else:
#>>>>>>>>>>>>>>>>>>>>>>>>>>>ISSUE ORDER PROBLEM<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
self.minimizeEdges()
#>>>>>>>>>>>>>>>>>>>>>>>>>>>ISSUE ORDER PROBLEM<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# Collect together disjoint subgraphs of like charge into subgraphs
self.resultingSubgraphsList = copy.deepcopy(self.workingSubgraphsList)
# Combine seperate subgraphs into a single resulting graph
self.resultGraph = self.mergeAllSubgraphs()
# Make a copy of the resulting graph for later processing in connectResultingComponents()
self.copyResultGraph = self.resultGraph.copy()
# Holds list of edges that were added in the connect components phase
self.edgesAddedInFirstTreePass = []
# Add edges to the resultingGraph to connect its components
self.connectSubgraphs()
return
def pick_lead(self):
if (self.dbase.nums() * (self.dbase.nums() - 1)/2) != self.dbase.strict_mtx.size:
raise ValueError("There are errors in the similarity score matrices")
if not self.dbase.options.hub == "None":
#hub radial option. Use the provided reference compound as a hub
hub_index = None
for i in range(0, self.dbase.nums()):
if os.path.basename(self.dbase[i].getName()) == self.dbase.options.hub:
hub_index = i
if hub_index is None:
logging.info("Warning: the specified center ligand %s is not in the ligand database, will not use the radial option."%self.dbase.options.hub)
return hub_index
else:
#complete radial option. Pick the compound with the highest total similarity to all other compounds to use as a hub
all_sum_i = []
for i in range(0, self.dbase.nums()):
sum_i = 0
for j in range(0, self.dbase.nums()):
sum_i += self.dbase.strict_mtx[i,j]
all_sum_i.append(sum_i)
max_value = max(all_sum_i)
max_index = [i for i, x in enumerate(all_sum_i) if x == max_value]
max_index_final = max_index[0]
return max_index_final
def generateInitialSubgraphList(self, fast_map = False):
"""
This function generates a starting graph connecting with edges all the
compounds with a positive strict similarity score
Returns
-------
initialSubgraphList : list of NetworkX graph
the list of connected component graphs
"""
compound_graph = nx.Graph()
if (self.dbase.nums() * (self.dbase.nums() - 1)/2) != self.dbase.strict_mtx.size:
raise ValueError("There are errors in the similarity score matrices")
if not fast_map:
#if not fast map option, connect all possible nodes to generate the initial graph
for i in range(0, self.dbase.nums()):
if i==0:
compound_graph.add_node(i,ID=self.dbase[i].getID(), fname_comp = os.path.basename(self.dbase[i].getName()))
for j in range(i+1, self.dbase.nums()):
if i == 0:
compound_graph.add_node(j,ID=self.dbase[j].getID(), fname_comp = os.path.basename(self.dbase[j].getName()))
wgt = self.dbase.strict_mtx[i,j]
if wgt > 0.0:
compound_graph.add_edge(i,j,similarity = wgt, strict_flag = True)
else:
#if fast map option, then add all possible radial edges as the initial graph
for i in range(0, self.dbase.nums()):
#add the node for i
compound_graph.add_node(i,ID=self.dbase[i].getID(), fname_comp = os.path.basename(self.dbase[i].getName()))
if i != self.lead_index:
wgt = self.dbase.strict_mtx[i, self.lead_index]
if wgt > 0:
compound_graph.add_edge(i,self.lead_index,similarity = wgt, strict_flag = True)
initialSubgraphGen = nx.connected_component_subgraphs(compound_graph)
initialSubgraphList = [x for x in initialSubgraphGen]
return initialSubgraphList
def generateSubgraphScoresLists(self, subgraphList):
"""
This function generate a list of lists where each inner list is the
weights of each edge in a given subgraph in the subgraphList,
sorted from lowest to highest
Returns
-------
subgraphScoresLists : list of lists
each list contains a tuple with the graph node indexes and their
similatiry as weigth
"""
subgraphScoresLists = []
for subgraph in subgraphList:
weightsDictionary = nx.get_edge_attributes(subgraph, 'similarity')
subgraphWeightsList = [(edge[0], edge[1], weightsDictionary[edge]) for edge in weightsDictionary.keys()]
subgraphWeightsList.sort(key = lambda entry: entry[2])
subgraphScoresLists.append(subgraphWeightsList)
return subgraphScoresLists
def removeEdgesBelowHardLimit(self):
"""
This function removes edges below the set hard limit from each subGraph
and from each weightsList
"""
totalEdges = 0
for subgraph in self.initialSubgraphList:
weightsList = self.subgraphScoresLists[self.initialSubgraphList.index(subgraph)]
index = 0
for edge in weightsList:
if edge[2] < self.similarityScoresLimit:
subgraph.remove_edge(edge[0],edge[1])
index = weightsList.index(edge)
del weightsList[:index + 1]
totalEdges = totalEdges + subgraph.number_of_edges()
#print "Removed = ", totalEdges
def generateWorkingSubgraphsList(self):
"""
After the deletition of the edges that have a weigth less than the
selected threshould the subgraph maybe disconnected and a new master
list of connected subgraphs is genereted
Returns
-------
workingSubgraphsList : list of lists
each list contains a tuple with the graph node indexes and their
similatiry as weigth
"""
workingSubgraphsList = []
for subgraph in self.initialSubgraphList:
newSubgraphList = nx.connected_component_subgraphs(subgraph)
for newSubgraph in newSubgraphList:
workingSubgraphsList.append(newSubgraph)
return workingSubgraphsList
def minimizeEdges(self):
"""
Minimize edges in each subgraph while ensuring constraints are met
"""
for subgraph in self.workingSubgraphsList:
weightsList = self.workingSubgraphScoresLists[self.workingSubgraphsList.index(subgraph)]
# ISSUE ORDER IS ORIGINATED HERE
#weightsList = sorted(weightsList, key = itemgetter(1))
# This part has been copied from the original code
self.nonCycleNodesSet = self.findNonCyclicNodes(subgraph)
numberOfComponents = nx.number_connected_components(subgraph)
if len(subgraph.edges()) > 2: # Graphs must have at least 3 edges to be minimzed
for edge in weightsList:
if self.lead_index is not None:
#Here the radial option is appplied, will check if the remove_edge is connect to the hub(lead) compound, if the edge is connected to the lead compound, then add it back into the graph.
if self.lead_index not in [edge[0], edge[1]]:
subgraph.remove_edge(edge[0], edge[1])
if self.checkConstraints(subgraph, numberOfComponents) == False:
subgraph.add_edge(edge[0], edge[1], similarity = edge[2], strict_flag = True)
else:
subgraph.remove_edge(edge[0], edge[1])
if self.checkConstraints(subgraph, numberOfComponents) == False:
subgraph.add_edge(edge[0], edge[1], similarity = edge[2], strict_flag = True)
def addsurroundEdges(self):
"""
Add surrounding edges in each subgraph to make sure all nodes are in cycle
"""
for subgraph in self.workingSubgraphsList:
subgraph_nodes = subgraph.nodes()
if self.lead_index in subgraph_nodes:
#here we only consider the subgraph with lead compound
self.nonCycleNodesSet = self.findNonCyclicNodes(subgraph)
for node in self.nonCycleNodesSet:
#for each node in the noncyclenodeset, find the fingerprint similarity compare to all other surrounding nodes and pick the one with the max score and connect them
node_score_list = []
for i in range(0, self.dbase.nums()):
if i != node and i != self.lead_index:
node_score_list.append(self.dbase.strict_mtx[node, i])
else:
node_score_list.append(0.0)
max_value = max(node_score_list)
if max_value > self.similarityScoresLimit:
max_index = [i for i, x in enumerate(node_score_list) if x == max_value]
max_index_final = max_index[0]
subgraph.add_edge(node, max_index_final, similarity = self.dbase.strict_mtx[node, max_index_final], strict_flag = True )
return subgraph
def findNonCyclicNodes(self, subgraph):
"""
Generates a list of nodes of the subgraph that are not in a cycle
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for not cycle nodes
Returns
-------
missingNodesSet : set of graph nodes
the set of graph nodes that are not in a cycle
"""
missingNodesSet = set()
cycleNodes = []
cycleList = nx.cycle_basis(subgraph)
cycleNodes = [node for cycle in cycleList for node in cycle]
missingNodesSet = set([node for node in subgraph.nodes() if node not in cycleNodes])
return missingNodesSet
def checkConstraints(self, subgraph, numComp):
"""
Determine if the given subgraph still meets the constraints
Parameters
----------
subgraph : NetworkX subgraph obj
the subgraph to check for the constraints
numComp : int
the number of connected componets
Returns
-------
constraintsMet : bool
True if all the constraints are met, False otherwise
"""
constraintsMet = True
if not self.remainsConnected(subgraph, numComp):
constraintsMet = False
if constraintsMet:
if not self.checkCycleCovering(subgraph):
constraintsMet = False
if constraintsMet:
if not self.checkMaxDistance(subgraph):
constaintsMet = False
return constraintsMet
def remainsConnected(self, subgraph, numComponents):
"""
Determine if the subgraph remains connected after an edge has been
removed
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for connection after the edge deletition
numComp : int
the number of connected componets
Returns
-------
isConnected : bool
True if the subgraph is connected, False otherwise
"""
isConnected = False
if numComponents == nx.number_connected_components(subgraph): isConnected = True
return isConnected
def checkCycleCovering(self, subgraph):
"""
Checks if the subgraph has a cycle covering
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for connection after the edge deletition
Returns
-------
hasCovering : bool
True if the subgraph has a cycle covering, False otherwise
"""
hasCovering = False
# if it is not the same set as before
if(not self.findNonCyclicNodes(subgraph).difference(self.nonCycleNodesSet)): hasCovering = True
return hasCovering
def checkMaxDistance(self, subgraph):
"""
Check to see if the graph has paths from all compounds to all other
compounds within the specified limit
Parameters
---------
subgraph : NetworkX subgraph obj
the subgraph to check for the max distance between nodes
Returns
-------
withinMaxDistance : bool
True if the subgraph has all the nodes within the specified
max distance
"""
withinMaxDistance = True
for node in subgraph:
eccentricity = nx.eccentricity(subgraph, node)
if eccentricity > self.maxPathLength: withinMaxDistance = False
return withinMaxDistance
def mergeAllSubgraphs(self):
"""Generates a single networkx graph object from the subgraphs that have
been processed
Returns
-------
finalGraph : NetworkX graph obj
the final graph produced merging all the subgraphs. The produced
graph may have disconneted parts
"""
finalGraph = nx.Graph()
for subgraph in self.workingSubgraphsList:
finalGraph = nx.union(finalGraph, subgraph)
return finalGraph
def connectSubgraphs(self):
"""
Adds edges to the resultGraph to connect as many components of the final
graph possible
"""
connectSuccess = self.connectGraphComponents_brute_force()
while (connectSuccess) :
connectSuccess = self.connectGraphComponents_brute_force()
# WARNING: The self.workingSubgraphsList at this point is different from
# the copy self.resultingSubgraphsList made before
connectSuccess = self.connectGraphComponents_brute_force_2()
while (connectSuccess) :
connectSuccess = self.connectGraphComponents_brute_force_2()
def connectGraphComponents_brute_force(self):
"""
Adds edges to the resultGraph to connect all components that can be
connected, only one edge is added per component, to form a tree like
structure between the different components of the resultGraph
Returns
-------
bool
True if the addition of edges was possible in strict mode, False otherwise
"""
generator_graph = nx.connected_component_subgraphs(self.resultGraph)
self.workingSubgraphsList = [x for x in generator_graph]
if len(self.workingSubgraphsList) == 1:
return False
edgesToCheck = []
edgesToCheckAdditionalInfo = []
numzeros = 0
for i in range(0,len(self.workingSubgraphsList)):
nodesOfI = self.workingSubgraphsList[i].nodes()
for j in range(i+1,len(self.workingSubgraphsList)):
nodesOfJ = self.workingSubgraphsList[j].nodes()
for k in range(0,len(nodesOfI)):
for l in range(0,len(nodesOfJ)):
"""produce an edge from nodesOfI[k] and nodesofJ[l] if nonzero weights push this edge into possibleEdgeList """
#print 'Molecules (%d,%d)' % (nodesOfI[k],nodesOfJ[l])
# I assumed that the score matrix is symmetric. In the Graph part this does not seems to be true: <<<<<<<<<<<<<DEBUG>>>>>>>>>>>>>>>
similarity = self.dbase.loose_mtx[nodesOfI[k],nodesOfJ[l]]
if similarity > 0.0 :
edgesToCheck.append((nodesOfI[k], nodesOfJ[l], similarity))
edgesToCheckAdditionalInfo.append((nodesOfI[k], nodesOfJ[l], similarity, i, j))
else :
numzeros = numzeros + 1
if len(edgesToCheck) > 0:
sortedList = sorted(edgesToCheck, key = itemgetter(2), reverse=True)
sortedListAdditionalInfo = sorted(edgesToCheckAdditionalInfo, key = itemgetter(2), reverse=True)
edgeToAdd = sortedList[0]
#self.edgeFile.write("\n" + str(edgeToAdd))
edgeToAddAdditionalInfo = sortedListAdditionalInfo[0]
self.edgesAddedInFirstTreePass.append(edgeToAdd)
self.resultGraph.add_edge(edgeToAdd[0], edgeToAdd[1], similarity=edgeToAdd[2], strict_flag = False)
generator_graph = nx.connected_component_subgraphs(self.resultGraph)
self.workingSubgraphsList = [x for x in generator_graph]
return True
else:
return False
def connectGraphComponents_brute_force_2(self):
"""
Adds a second edge between each of the (former) components of the
resultGraph to try to provide cycles between (former) components
Returns
-------
bool
True if the addition of edges was possible in loose mode, False otherwise
"""
if len(self.resultingSubgraphsList) == 1:
return False
edgesToCheck = []
for i in range(0,len(self.resultingSubgraphsList)):
nodesOfI = self.resultingSubgraphsList[i].nodes()
for j in range(i+1,len(self.resultingSubgraphsList)):
nodesOfJ = self.resultingSubgraphsList[j].nodes()
#print '(%d,%d)' % (i,j)
for k in range(0,len(nodesOfI)):
for l in range(0,len(nodesOfJ)):
"""produce an edge from nodesOfI[k] and nodesofJ[l] if nonzero weights push this edge into possibleEdgeList """
#print 'Molecules (%d,%d)' % (nodesOfI[k],nodesOfJ[l])
# I assumed that the score matrix is symmetric. In the Graph part this does not seems to be true: <<<<<<<<<<<<<DEBUG>>>>>>>>>>>>>>>
similarity = self.dbase.loose_mtx[nodesOfI[k],nodesOfJ[l]]
if (similarity > 0.0):
edgesToCheck.append((nodesOfI[k], nodesOfJ[l], similarity))
finalEdgesToCheck = [edge for edge in edgesToCheck if edge not in self.edgesAddedInFirstTreePass]
if len(finalEdgesToCheck) > 0:
sortedList = sorted(finalEdgesToCheck, key = itemgetter(2), reverse=True)
edgeToAdd = sortedList[0]
self.resultGraph.add_edge(edgeToAdd[0], edgeToAdd[1], similarity=edgeToAdd[2], strict_flag = False)
self.copyResultGraph.add_edge(edgeToAdd[0], edgeToAdd[1], similarity=edgeToAdd[2], strict_flag = False)
generator_graph = nx.connected_component_subgraphs(self.copyResultGraph)
self.resultingSubgraphsList = [x for x in generator_graph]
return True
else:
return False
def getGraph(self):
"""
Returns the final generated NetworkX graph
"""
return self.resultGraph
def generate_depictions(self):
def max_dist_mol(mol):
max_dist = 0.0
conf = mol.GetConformer()
for i in range(0,conf.GetNumAtoms()):
crdi = np.array([conf.GetAtomPosition(i).x,conf.GetAtomPosition(i).y,conf.GetAtomPosition(i).z])
for j in range(i+1,conf.GetNumAtoms()):
crdj = np.array([conf.GetAtomPosition(j).x,conf.GetAtomPosition(i).y,conf.GetAtomPosition(j).z])
dist = np.linalg.norm(crdi-crdj)
if dist > max_dist:
max_dist = dist
return max_dist
directory_name = tempfile.mkdtemp()
temp_graph = self.resultGraph.copy()
if nx.number_of_nodes(temp_graph) <= self.max_images:
#Draw.DrawingOptions.atomLabelFontSize=30
#Draw.DrawingOptions.dotsPerAngstrom=100
for n in temp_graph:
id_mol = temp_graph.node[n]['ID']
mol = self.dbase[id_mol].getMolecule()
max_dist = max_dist_mol(mol)
if max_dist < self.max_mol_size:
fname = os.path.join(directory_name, self.dbase[id_mol].getName() + ".png")
#1, modify here to calculate the 2D structure for ligands cannot remove Hydrogens by rdkit
#2, change the graph size to get better resolution
try:
mol = AllChem.RemoveHs(mol)
AllChem.Compute2DCoords(mol)
from rdkit.Chem.Draw.MolDrawing import DrawingOptions
DrawingOptions.bondLineWidth = 2.5
Draw.MolToFile(mol, fname, size=(200,200), kekulize=False, fitimage=True, imageType='png', options=DrawingOptions)
except:
######need to ask RDKit to fix this if possible, see the code issue tracker for more details######
logging.info("Error attempting to remove hydrogens for molecule %s using RDKit. RDKit cannot kekulize the molecule"%self.dbase[id_mol].getName())
AllChem.Compute2DCoords(mol)
from rdkit.Chem.Draw.MolDrawing import DrawingOptions
DrawingOptions.bondLineWidth = 2.5
Draw.MolToFile(mol, fname, size=(200,200), kekulize=False, fitimage=True, imageType='png', options=DrawingOptions)
temp_graph.node[n]['image'] = fname
#self.resultGraph.node[n]['label'] = ''
temp_graph.node[n]['labelloc'] = 't'
temp_graph.node[n]['penwidth'] =2.5
#self.resultGraph.node[n]['xlabel'] = self.resultGraph.node[n]['ID']
for u,v,d in temp_graph.edges(data=True):
if d['strict_flag']==True:
temp_graph[u][v]['color'] = 'cyan'
temp_graph[u][v]['penwidth'] = 2.5
else:
temp_graph[u][v]['color'] = 'red'
temp_graph[u][v]['penwidth'] = 2.5
nx.nx_agraph.write_dot(temp_graph, self.dbase.options.name+'_tmp.dot')
cmd = 'dot -Tpng ' + self.dbase.options.name + '_tmp.dot -o ' + self.dbase.options.name + '.png'
os.system(cmd)
cmd = 'dot -Teps ' + self.dbase.options.name + '_tmp.dot -o ' + self.dbase.options.name + '.eps'
os.system(cmd)
cmd = 'dot -Tpdf ' + self.dbase.options.name + '_tmp.dot -o ' + self.dbase.options.name + '.pdf'
os.system(cmd)
os.remove(self.dbase.options.name+'_tmp.dot')
shutil.rmtree(directory_name, ignore_errors=True)
#The function to output the score and connectivity txt file
def layout_info(self):
#pass the lead compound index if the radial option is on and generate the morph type of output required by FESetup
if self.lead_index is not None:
morph_txt = open(self.dbase.options.name+"_morph.txt", "w")
morph_data = "morph_pairs = "
info_txt = open(self.dbase.options.name+"_score_with_connection.txt", "w")
all_key_id = self.dbase.dic_mapping.keys()
data = ["%-10s,%-10s,%-25s,%-25s,%-15s,%-15s,%-15s,%-10s\n"%("Index_1", "Index_2","Filename_1","Filename_2", "Erc_sim","Str_sim", "Loose_sim", "Connect")]
for i in range (len(all_key_id)-1):
for j in range(i+1, len(all_key_id)):
morph_string = None
connected = False
try:
similarity = self.resultGraph.edge[i][j]['similarity']
#print "Check the similarity", similarity
connected = True
except:
pass
Filename_i = self.dbase.dic_mapping[i]
Filename_j = self.dbase.dic_mapping[j]
#print "Check the filename", Filename_i, Filename_j
strict_similarity = self.dbase.strict_mtx[i,j]
loose_similarity = self.dbase.loose_mtx[i,j]
ecr_similarity = self.dbase.ecr_mtx[i,j]
if connected:
new_line = "%-10s,%-10s,%-25s,%-25s,%-15.2f,%-15.5f,%-15.5f,%-10s\n"%(i, j, Filename_i, Filename_j, ecr_similarity, strict_similarity, loose_similarity, "Yes")
#generate the morph type, and pick the start ligand based on the similarity
if self.lead_index is not None:
morph_i = Filename_i.split(".")[0]
morph_j = Filename_j.split(".")[0]
if i == self.lead_index:
morph_string = "%s > %s, "%(morph_i, morph_j)
elif j == self.lead_index:
morph_string = "%s > %s, "%(morph_j, morph_i)
else:
#compare i and j with the lead compound, and pick the one with the higher similarity as the start ligand
similarity_i = self.dbase.strict_mtx[self.lead_index, i]
similarity_j = self.dbase.strict_mtx[self.lead_index, j]
if similarity_i> similarity_j:
morph_string = "%s > %s, "%(morph_i, morph_j)
else:
morph_string = "%s > %s, "%(morph_j, morph_i)
morph_data += morph_string
else:
new_line = "%-10s,%-10s,%-25s,%-25s,%-15.2f,%-15.5f,%-15.5f,%-10s\n"%(i, j, Filename_i, Filename_j, ecr_similarity, strict_similarity, loose_similarity, "No")
data.append(new_line)
info_txt.writelines(data)
if self.lead_index is not None:
morph_txt.write(morph_data)
def writeGraph(self):
"""
This function writes to a file the final generated NetworkX graph as
.dot and the .ps files. The mapping between molecule IDs and compounds
name is saved as text file
"""
try:
self.dbase.write_dic()
self.layout_info()
except Exception as e:
raise IOError("%s: %s.txt" % (str(e), self.dbase.options.name))
try:
self.generate_depictions()
nx.nx_agraph.write_dot(self.resultGraph, self.dbase.options.name+'.dot')
except Exception as e:
raise IOError('Problems during the file generation: %s' % str(e))
logging.info(30*'-')
logging.info('The following files have been generated:\n%s.dot\tGraph file\n%s.png\tPng file\n%s.txt\tMapping Text file' % (self.dbase.options.name, self.dbase.options.name, self.dbase.options.name ))
logging.info(30*'-')
return
###### Still in developing stage ######
def draw(self):
"""
This function plots the NetworkX graph by using Matplotlib
"""
logging.info('\nDrawing....')
if nx.number_of_nodes(self.resultGraph) > self.max_nodes:
logging.info('The number of generated graph nodes %d exceed the max number of drawable nodes %s' % (nx.number_of_nodes(self.resultGraph), self.max_nodes))
return
def max_dist_mol(mol):
max_dist = 0.0
conf = mol.GetConformer()
for i in range(0,conf.GetNumAtoms()):
crdi = np.array([conf.GetAtomPosition(i).x,conf.GetAtomPosition(i).y,conf.GetAtomPosition(i).z])
for j in range(i+1,conf.GetNumAtoms()):
crdj = np.array([conf.GetAtomPosition(j).x,conf.GetAtomPosition(i).y,conf.GetAtomPosition(j).z])
dist = np.linalg.norm(crdi-crdj)
if dist > max_dist:
max_dist = dist
return max_dist
# Determine the screen resolution by using PyQt4
app = QtGui.QApplication([])
screen_resolution = app.desktop().screenGeometry()
# Canvas scale factor
scale_canvas = 0.75
# Canvas resolution
max_canvas_size = (int(screen_resolution.width() * scale_canvas) , int(screen_resolution.height() * scale_canvas))
fig = plt.figure(1,facecolor='white')
fig.set_dpi(100)
fig.set_size_inches(max_canvas_size[0]/fig.get_dpi(), max_canvas_size[1]/fig.get_dpi(), forward=True)
ax = plt.subplot(111)
plt.axis('off')
pos=nx.nx_agraph.graphviz_layout( self.resultGraph, prog="neato")
strict_edges = [(u,v) for (u,v,d) in self.resultGraph.edges(data=True) if d['strict_flag'] == True]
loose_edges = [(u,v) for (u,v,d) in self.resultGraph.edges(data=True) if d['strict_flag'] == False]
node_labels = dict([(u, d['ID']) for u,d in self.resultGraph.nodes(data=True)])
#Draw nodes
nx.draw_networkx_nodes(self.resultGraph, pos , node_size=500, node_color='r')
#Draw node labels
nx.draw_networkx_labels(self.resultGraph, pos,labels=node_labels,font_size=10)
if self.edge_labels:
edge_weight_strict = dict([((u,v,), d['similarity']) for u,v,d in self.resultGraph.edges(data=True) if d['strict_flag'] == True])
edge_weight_loose = dict([((u,v,), d['similarity']) for u,v,d in self.resultGraph.edges(data=True) if d['strict_flag'] == False])
for key in edge_weight_strict:
edge_weight_strict[key] = round(edge_weight_strict[key],2)
for key in edge_weight_loose:
edge_weight_loose[key] = round(edge_weight_loose[key],2)
#edge strict
nx.draw_networkx_edge_labels(self.resultGraph, pos, edge_labels=edge_weight_strict, font_color='g')
#edge loose
nx.draw_networkx_edge_labels(self.resultGraph, pos, edge_labels=edge_weight_loose, font_color='r')
#edges strict
nx.draw_networkx_edges(self.resultGraph, pos, edgelist=strict_edges, edge_color='g')
#edges loose
nx.draw_networkx_edges(self.resultGraph, pos, edgelist=loose_edges, edge_color='r')
if nx.number_of_nodes(self.resultGraph) <= self.max_images:
trans = ax.transData.transform
trans2 = fig.transFigure.inverted().transform
cut = 1.0
frame = 10
xmax = cut * max(xx for xx, yy in pos.values()) + frame
ymax = cut * max(yy for xx, yy in pos.values()) + frame
xmin = cut * min(xx for xx, yy in pos.values()) - frame
ymin = cut * min(yy for xx, yy in pos.values()) - frame
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
h = 20
w = 20
mol_size = (200,200)
for each_node in self.resultGraph:
id_mol = self.resultGraph.node[each_node]['ID']
#skip remove Hs by rdkit if Hs cannot be removed
try:
mol = AllChem.RemoveHs(self.dbase[id_mol].getMolecule())
except:
######need to ask RDKit to fix this if possible, see the code issue tracker for more details######
mol = self.dbase[id_mol].getMolecule()
logging.info("Error attempting to remove hydrogens for molecule %s using RDKit. RDKit cannot kekulize the molecule"%self.dbase[id_mol].getName())
# max_dist = max_dist_mol(mol)
# if max_dist > 7.0:
# continue
AllChem.Compute2DCoords(mol)
#add try exception for cases cannot be draw
try:
img_mol = Draw.MolToImage(mol,mol_size, kekulize = False)
except Exception as ex:
img_mol = None
logging.exception("This mol cannot be draw using the RDKit Draw function, need to check for more details...")
xx, yy = trans(pos[each_node])
xa, ya = trans2((xx,yy))
nodesize_1 = (300.0/(h*100))
nodesize_2 = (300.0/(w*100))
p2_2 = nodesize_2/2
p2_1 = nodesize_1/2
a = plt.axes([xa - p2_2, ya - p2_1, nodesize_2, nodesize_1])
#self.resultGraph.node[id_mol]['image'] = img_mol
#a.imshow(self.resultGraph.node[each_node]['image'])
a.imshow(img_mol)
a.axis('off')
# plt.savefig('graph.png', facecolor=fig.get_facecolor())
# print 'Graph .png file has been generated...'
plt.show()
return
| mit | 5,039,775,959,600,353 | 35.677747 | 209 | 0.562509 | false | 4.271642 | false | false | false |
zyga/plainbox | setup.py | 2 | 1216 | #!/usr/bin/env python3
# This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <[email protected]>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup, find_packages
setup(
name="plainbox",
version="0.2",
packages=find_packages(),
author="Zygmunt Krynicki",
test_suite='plainbox.tests.test_suite',
author_email="[email protected]",
license="GPLv3+",
description="Simple replacement for checkbox",
entry_points={
'console_scripts': [
'plainbox=plainbox.public:main',
]
})
| gpl-3.0 | 210,228,250,651,786,050 | 31 | 70 | 0.713816 | false | 3.684848 | false | false | false |
Cog-Creators/Red-DiscordBot | redbot/core/bank.py | 3 | 28666 | from __future__ import annotations
import asyncio
import logging
from datetime import datetime, timezone
from typing import Union, List, Optional, TYPE_CHECKING, Literal
from functools import wraps
import discord
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import humanize_number
from . import Config, errors, commands
from .i18n import Translator
from .errors import BankPruneError
if TYPE_CHECKING:
from .bot import Red
_ = Translator("Bank API", __file__)
__all__ = [
"Account",
"get_balance",
"set_balance",
"withdraw_credits",
"deposit_credits",
"can_spend",
"transfer_credits",
"wipe_bank",
"get_account",
"is_global",
"set_global",
"get_bank_name",
"set_bank_name",
"get_currency_name",
"set_currency_name",
"get_default_balance",
"set_default_balance",
"get_max_balance",
"set_max_balance",
"cost",
"AbortPurchase",
"bank_prune",
]
_MAX_BALANCE = 2 ** 63 - 1
_SCHEMA_VERSION = 1
_DEFAULT_GLOBAL = {
"schema_version": 0,
"is_global": False,
"bank_name": "Twentysix bank",
"currency": "credits",
"default_balance": 100,
"max_balance": _MAX_BALANCE,
}
_DEFAULT_GUILD = {
"bank_name": "Twentysix bank",
"currency": "credits",
"default_balance": 100,
"max_balance": _MAX_BALANCE,
}
_DEFAULT_MEMBER = {"name": "", "balance": 0, "created_at": 0}
_DEFAULT_USER = _DEFAULT_MEMBER
_config: Config = None
log = logging.getLogger("red.core.bank")
_data_deletion_lock = asyncio.Lock()
_cache_is_global = None
_cache = {"bank_name": None, "currency": None, "default_balance": None, "max_balance": None}
async def _init():
global _config
_config = Config.get_conf(None, 384734293238749, cog_name="Bank", force_registration=True)
_config.register_global(**_DEFAULT_GLOBAL)
_config.register_guild(**_DEFAULT_GUILD)
_config.register_member(**_DEFAULT_MEMBER)
_config.register_user(**_DEFAULT_USER)
await _migrate_config()
async def _migrate_config():
schema_version = await _config.schema_version()
if schema_version == _SCHEMA_VERSION:
return
if schema_version == 0:
await _schema_0_to_1()
schema_version += 1
await _config.schema_version.set(schema_version)
async def _schema_0_to_1():
# convert floats in bank balances to ints
# don't use anything seen below in extensions, it's optimized and controlled for here,
# but can't be safe in 3rd party use
# this CANNOT use ctx manager, because ctx managers compare before and after,
# and floats can be equal to ints: (1.0 == 1) is True
group = _config._get_base_group(_config.USER)
bank_user_data = await group.all()
for user_config in bank_user_data.values():
if "balance" in user_config:
user_config["balance"] = int(user_config["balance"])
await group.set(bank_user_data)
group = _config._get_base_group(_config.MEMBER)
bank_member_data = await group.all()
for guild_data in bank_member_data.values():
for member_config in guild_data.values():
if "balance" in member_config:
member_config["balance"] = int(member_config["balance"])
await group.set(bank_member_data)
async def _process_data_deletion(
*, requester: Literal["discord_deleted_user", "owner", "user", "user_strict"], user_id: int
):
"""
Bank has no reason to keep any of this data
if the user doesn't want it kept,
we won't special case any request type
"""
if requester not in ("discord_deleted_user", "owner", "user", "user_strict"):
log.warning(
"Got unknown data request type `{req_type}` for user, deleting anyway",
req_type=requester,
)
async with _data_deletion_lock:
await _config.user_from_id(user_id).clear()
all_members = await _config.all_members()
async for guild_id, member_dict in AsyncIter(all_members.items(), steps=100):
if user_id in member_dict:
await _config.member_from_ids(guild_id, user_id).clear()
class Account:
"""A single account.
This class should ONLY be instantiated by the bank itself."""
def __init__(self, name: str, balance: int, created_at: datetime):
self.name = name
self.balance = balance
self.created_at = created_at
def _encoded_current_time() -> int:
"""Get the current UTC time as a timestamp.
Returns
-------
int
The current UTC timestamp.
"""
now = datetime.now(timezone.utc)
return _encode_time(now)
def _encode_time(time: datetime) -> int:
"""Convert a datetime object to a serializable int.
Parameters
----------
time : datetime.datetime
The datetime to convert.
Returns
-------
int
The timestamp of the datetime object.
"""
ret = int(time.timestamp())
return ret
def _decode_time(time: int) -> datetime:
"""Convert a timestamp to a datetime object.
Parameters
----------
time : int
The timestamp to decode.
Returns
-------
datetime.datetime
The datetime object from the timestamp.
"""
return datetime.utcfromtimestamp(time)
async def get_balance(member: discord.Member) -> int:
"""Get the current balance of a member.
Parameters
----------
member : discord.Member
The member whose balance to check.
Returns
-------
int
The member's balance
"""
acc = await get_account(member)
return acc.balance
async def can_spend(member: discord.Member, amount: int) -> bool:
"""Determine if a member can spend the given amount.
Parameters
----------
member : discord.Member
The member wanting to spend.
amount : int
The amount the member wants to spend.
Raises
------
TypeError
If the amount is not an `int`.
Returns
-------
bool
:code:`True` if the member has a sufficient balance to spend the
amount, else :code:`False`.
"""
if not isinstance(amount, int):
raise TypeError("Amount must be of type int, not {}.".format(type(amount)))
if _invalid_amount(amount):
return False
return await get_balance(member) >= amount
async def set_balance(member: Union[discord.Member, discord.User], amount: int) -> int:
"""Set an account balance.
Parameters
----------
member : Union[discord.Member, discord.User]
The member whose balance to set.
amount : int
The amount to set the balance to.
Returns
-------
int
New account balance.
Raises
------
ValueError
If attempting to set the balance to a negative number.
RuntimeError
If the bank is guild-specific and a discord.User object is provided.
BalanceTooHigh
If attempting to set the balance to a value greater than
``bank._MAX_BALANCE``.
TypeError
If the amount is not an `int`.
"""
if not isinstance(amount, int):
raise TypeError("Amount must be of type int, not {}.".format(type(amount)))
if amount < 0:
raise ValueError("Not allowed to have negative balance.")
guild = getattr(member, "guild", None)
max_bal = await get_max_balance(guild)
if amount > max_bal:
currency = await get_currency_name(guild)
raise errors.BalanceTooHigh(
user=member.display_name, max_balance=max_bal, currency_name=currency
)
if await is_global():
group = _config.user(member)
else:
group = _config.member(member)
await group.balance.set(amount)
if await group.created_at() == 0:
time = _encoded_current_time()
await group.created_at.set(time)
if await group.name() == "":
await group.name.set(member.display_name)
return amount
def _invalid_amount(amount: int) -> bool:
return amount < 0
async def withdraw_credits(member: discord.Member, amount: int) -> int:
"""Remove a certain amount of credits from an account.
Parameters
----------
member : discord.Member
The member to withdraw credits from.
amount : int
The amount to withdraw.
Returns
-------
int
New account balance.
Raises
------
ValueError
If the withdrawal amount is invalid or if the account has insufficient
funds.
TypeError
If the withdrawal amount is not an `int`.
"""
if not isinstance(amount, int):
raise TypeError("Withdrawal amount must be of type int, not {}.".format(type(amount)))
if _invalid_amount(amount):
raise ValueError(
"Invalid withdrawal amount {} < 0".format(
humanize_number(amount, override_locale="en_US")
)
)
bal = await get_balance(member)
if amount > bal:
raise ValueError(
"Insufficient funds {} > {}".format(
humanize_number(amount, override_locale="en_US"),
humanize_number(bal, override_locale="en_US"),
)
)
return await set_balance(member, bal - amount)
async def deposit_credits(member: discord.Member, amount: int) -> int:
"""Add a given amount of credits to an account.
Parameters
----------
member : discord.Member
The member to deposit credits to.
amount : int
The amount to deposit.
Returns
-------
int
The new balance.
Raises
------
ValueError
If the deposit amount is invalid.
TypeError
If the deposit amount is not an `int`.
"""
if not isinstance(amount, int):
raise TypeError("Deposit amount must be of type int, not {}.".format(type(amount)))
if _invalid_amount(amount):
raise ValueError(
"Invalid deposit amount {} <= 0".format(
humanize_number(amount, override_locale="en_US")
)
)
bal = await get_balance(member)
return await set_balance(member, amount + bal)
async def transfer_credits(
from_: Union[discord.Member, discord.User],
to: Union[discord.Member, discord.User],
amount: int,
):
"""Transfer a given amount of credits from one account to another.
Parameters
----------
from_: Union[discord.Member, discord.User]
The member to transfer from.
to : Union[discord.Member, discord.User]
The member to transfer to.
amount : int
The amount to transfer.
Returns
-------
int
The new balance of the member gaining credits.
Raises
------
ValueError
If the amount is invalid or if ``from_`` has insufficient funds.
TypeError
If the amount is not an `int`.
RuntimeError
If the bank is guild-specific and a discord.User object is provided.
BalanceTooHigh
If the balance after the transfer would be greater than
``bank._MAX_BALANCE``.
"""
if not isinstance(amount, int):
raise TypeError("Transfer amount must be of type int, not {}.".format(type(amount)))
if _invalid_amount(amount):
raise ValueError(
"Invalid transfer amount {} <= 0".format(
humanize_number(amount, override_locale="en_US")
)
)
guild = getattr(to, "guild", None)
max_bal = await get_max_balance(guild)
if await get_balance(to) + amount > max_bal:
currency = await get_currency_name(guild)
raise errors.BalanceTooHigh(
user=to.display_name, max_balance=max_bal, currency_name=currency
)
await withdraw_credits(from_, amount)
return await deposit_credits(to, amount)
async def wipe_bank(guild: Optional[discord.Guild] = None) -> None:
"""Delete all accounts from the bank.
Parameters
----------
guild : discord.Guild
The guild to clear accounts for. If unsupplied and the bank is
per-server, all accounts in every guild will be wiped.
"""
if await is_global():
await _config.clear_all_users()
else:
await _config.clear_all_members(guild)
async def bank_prune(bot: Red, guild: discord.Guild = None, user_id: int = None) -> None:
"""Prune bank accounts from the bank.
Parameters
----------
bot : Red
The bot.
guild : discord.Guild
The guild to prune. This is required if the bank is set to local.
user_id : int
The id of the user whose account will be pruned.
If supplied this will prune only this user's bank account
otherwise it will prune all invalid users from the bank.
Raises
------
BankPruneError
If guild is :code:`None` and the bank is Local.
"""
global_bank = await is_global()
if global_bank:
_guilds = set()
_uguilds = set()
if user_id is None:
async for g in AsyncIter(bot.guilds, steps=100):
if not g.unavailable and g.large and not g.chunked:
_guilds.add(g)
elif g.unavailable:
_uguilds.add(g)
group = _config._get_base_group(_config.USER)
else:
if guild is None:
raise BankPruneError("'guild' can't be None when pruning a local bank")
if user_id is None:
_guilds = {guild} if not guild.unavailable and guild.large else set()
_uguilds = {guild} if guild.unavailable else set()
group = _config._get_base_group(_config.MEMBER, str(guild.id))
if user_id is None:
for _guild in _guilds:
await _guild.chunk()
accounts = await group.all()
tmp = accounts.copy()
members = bot.get_all_members() if global_bank else guild.members
user_list = {str(m.id) for m in members if m.guild not in _uguilds}
async with group.all() as bank_data: # FIXME: use-config-bulk-update
if user_id is None:
for acc in tmp:
if acc not in user_list:
del bank_data[acc]
else:
user_id = str(user_id)
if user_id in bank_data:
del bank_data[user_id]
async def get_leaderboard(positions: int = None, guild: discord.Guild = None) -> List[tuple]:
"""
Gets the bank's leaderboard
Parameters
----------
positions : `int`
The number of positions to get
guild : discord.Guild
The guild to get the leaderboard of. If the bank is global and this
is provided, get only guild members on the leaderboard
Returns
-------
`list` of `tuple`
The sorted leaderboard in the form of :code:`(user_id, raw_account)`
Raises
------
TypeError
If the bank is guild-specific and no guild was specified
"""
if await is_global():
raw_accounts = await _config.all_users()
if guild is not None:
tmp = raw_accounts.copy()
for acc in tmp:
if not guild.get_member(acc):
del raw_accounts[acc]
else:
if guild is None:
raise TypeError("Expected a guild, got NoneType object instead!")
raw_accounts = await _config.all_members(guild)
sorted_acc = sorted(raw_accounts.items(), key=lambda x: x[1]["balance"], reverse=True)
if positions is None:
return sorted_acc
else:
return sorted_acc[:positions]
async def get_leaderboard_position(
member: Union[discord.User, discord.Member]
) -> Union[int, None]:
"""
Get the leaderboard position for the specified user
Parameters
----------
member : `discord.User` or `discord.Member`
The user to get the leaderboard position of
Returns
-------
`int`
The position of the user on the leaderboard
Raises
------
TypeError
If the bank is currently guild-specific and a `discord.User` object was passed in
"""
if await is_global():
guild = None
else:
guild = member.guild if hasattr(member, "guild") else None
try:
leaderboard = await get_leaderboard(None, guild)
except TypeError:
raise
else:
pos = discord.utils.find(lambda x: x[1][0] == member.id, enumerate(leaderboard, 1))
if pos is None:
return None
else:
return pos[0]
async def get_account(member: Union[discord.Member, discord.User]) -> Account:
"""Get the appropriate account for the given user or member.
A member is required if the bank is currently guild specific.
Parameters
----------
member : `discord.User` or `discord.Member`
The user whose account to get.
Returns
-------
Account
The user's account.
"""
if await is_global():
all_accounts = await _config.all_users()
else:
all_accounts = await _config.all_members(member.guild)
if member.id not in all_accounts:
acc_data = {"name": member.display_name, "created_at": _DEFAULT_MEMBER["created_at"]}
try:
acc_data["balance"] = await get_default_balance(member.guild)
except AttributeError:
acc_data["balance"] = await get_default_balance()
else:
acc_data = all_accounts[member.id]
acc_data["created_at"] = _decode_time(acc_data["created_at"])
return Account(**acc_data)
async def is_global() -> bool:
"""Determine if the bank is currently global.
Returns
-------
bool
:code:`True` if the bank is global, otherwise :code:`False`.
"""
global _cache_is_global
if _cache_is_global is None:
_cache_is_global = await _config.is_global()
return _cache_is_global
async def set_global(global_: bool) -> bool:
"""Set global status of the bank.
.. important::
All accounts are reset when you switch!
Parameters
----------
global_ : bool
:code:`True` will set bank to global mode.
Returns
-------
bool
New bank mode, :code:`True` is global.
Raises
------
RuntimeError
If bank is becoming global and a `discord.Member` was not provided.
"""
if (await is_global()) is global_:
return global_
global _cache_is_global
if await is_global():
await _config.clear_all_users()
else:
await _config.clear_all_members()
await _config.is_global.set(global_)
_cache_is_global = global_
return global_
async def get_bank_name(guild: discord.Guild = None) -> str:
"""Get the current bank name.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the bank name for (required if bank is
guild-specific).
Returns
-------
str
The bank's name.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
global _cache
if _cache["bank_name"] is None:
_cache["bank_name"] = await _config.bank_name()
return _cache["bank_name"]
elif guild is not None:
return await _config.guild(guild).bank_name()
else:
raise RuntimeError("Guild parameter is required and missing.")
async def set_bank_name(name: str, guild: discord.Guild = None) -> str:
"""Set the bank name.
Parameters
----------
name : str
The new name for the bank.
guild : `discord.Guild`, optional
The guild to set the bank name for (required if bank is
guild-specific).
Returns
-------
str
The new name for the bank.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
await _config.bank_name.set(name)
global _cache
_cache["bank_name"] = name
elif guild is not None:
await _config.guild(guild).bank_name.set(name)
else:
raise RuntimeError("Guild must be provided if setting the name of a guild-specific bank.")
return name
async def get_currency_name(guild: discord.Guild = None) -> str:
"""Get the currency name of the bank.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the currency name for (required if bank is
guild-specific).
Returns
-------
str
The currency name.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
global _cache
if _cache["currency"] is None:
_cache["currency"] = await _config.currency()
return _cache["currency"]
elif guild is not None:
return await _config.guild(guild).currency()
else:
raise RuntimeError("Guild must be provided.")
async def set_currency_name(name: str, guild: discord.Guild = None) -> str:
"""Set the currency name for the bank.
Parameters
----------
name : str
The new name for the currency.
guild : `discord.Guild`, optional
The guild to set the currency name for (required if bank is
guild-specific).
Returns
-------
str
The new name for the currency.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
await _config.currency.set(name)
global _cache
_cache["currency"] = name
elif guild is not None:
await _config.guild(guild).currency.set(name)
else:
raise RuntimeError(
"Guild must be provided if setting the currency name of a guild-specific bank."
)
return name
async def get_max_balance(guild: discord.Guild = None) -> int:
"""Get the max balance for the bank.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the max balance for (required if bank is
guild-specific).
Returns
-------
int
The maximum allowed balance.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
if _cache["max_balance"] is None:
_cache["max_balance"] = await _config.max_balance()
return _cache["max_balance"]
elif guild is not None:
return await _config.guild(guild).max_balance()
else:
raise RuntimeError("Guild must be provided.")
async def set_max_balance(amount: int, guild: discord.Guild = None) -> int:
"""Set the maximum balance for the bank.
Parameters
----------
amount : int
The new maximum balance.
guild : `discord.Guild`, optional
The guild to set the max balance for (required if bank is
guild-specific).
Returns
-------
int
The new maximum balance.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
ValueError
If the amount is less than 0 or higher than 2 ** 63 - 1.
TypeError
If the amount is not an `int`.
"""
if not isinstance(amount, int):
raise TypeError("Amount must be of type int, not {}.".format(type(amount)))
if not (0 < amount <= _MAX_BALANCE):
raise ValueError(
"Amount must be greater than zero and less than {max}.".format(
max=humanize_number(_MAX_BALANCE, override_locale="en_US")
)
)
if await is_global():
await _config.max_balance.set(amount)
global _cache
_cache["max_balance"] = amount
elif guild is not None:
await _config.guild(guild).max_balance.set(amount)
else:
raise RuntimeError(
"Guild must be provided if setting the maximum balance of a guild-specific bank."
)
return amount
async def get_default_balance(guild: discord.Guild = None) -> int:
"""Get the current default balance amount.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the default balance for (required if bank is
guild-specific).
Returns
-------
int
The bank's default balance.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
if await is_global():
if _cache["default_balance"] is None:
_cache["default_balance"] = await _config.default_balance()
return _cache["default_balance"]
elif guild is not None:
return await _config.guild(guild).default_balance()
else:
raise RuntimeError("Guild is missing and required!")
async def set_default_balance(amount: int, guild: discord.Guild = None) -> int:
"""Set the default balance amount.
Parameters
----------
amount : int
The new default balance.
guild : `discord.Guild`, optional
The guild to set the default balance for (required if bank is
guild-specific).
Returns
-------
int
The new default balance.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
ValueError
If the amount is less than 0 or higher than the max allowed balance.
TypeError
If the amount is not an `int`.
"""
if not isinstance(amount, int):
raise TypeError("Amount must be of type int, not {}.".format(type(amount)))
max_bal = await get_max_balance(guild)
if not (0 <= amount <= max_bal):
raise ValueError(
"Amount must be greater than or equal zero and less than or equal {max}.".format(
max=humanize_number(max_bal, override_locale="en_US")
)
)
if await is_global():
await _config.default_balance.set(amount)
global _cache
_cache["default_balance"] = amount
elif guild is not None:
await _config.guild(guild).default_balance.set(amount)
else:
raise RuntimeError("Guild is missing and required.")
return amount
class AbortPurchase(Exception):
pass
def cost(amount: int):
"""
Decorates a coroutine-function or command to have a cost.
If the command raises an exception, the cost will be refunded.
You can intentionally refund by raising `AbortPurchase`
(this error will be consumed and not show to users)
Other exceptions will propagate and will be handled by Red's (and/or
any other configured) error handling.
"""
# TODO: Add documentation for input/output/exceptions
if not isinstance(amount, int) or amount < 0:
raise ValueError("This decorator requires an integer cost greater than or equal to zero")
def deco(coro_or_command):
is_command = isinstance(coro_or_command, commands.Command)
if not is_command and not asyncio.iscoroutinefunction(coro_or_command):
raise TypeError("@bank.cost() can only be used on commands or `async def` functions")
coro = coro_or_command.callback if is_command else coro_or_command
@wraps(coro)
async def wrapped(*args, **kwargs):
context: commands.Context = None
for arg in args:
if isinstance(arg, commands.Context):
context = arg
break
if not context.guild and not await is_global():
raise commands.UserFeedbackCheckFailure(
_("Can't pay for this command in DM without a global bank.")
)
try:
await withdraw_credits(context.author, amount)
except Exception:
credits_name = await get_currency_name(context.guild)
raise commands.UserFeedbackCheckFailure(
_("You need at least {cost} {currency} to use this command.").format(
cost=humanize_number(amount), currency=credits_name
)
)
else:
try:
return await coro(*args, **kwargs)
except AbortPurchase:
await deposit_credits(context.author, amount)
except Exception:
await deposit_credits(context.author, amount)
raise
if not is_command:
return wrapped
else:
wrapped.__module__ = coro_or_command.callback.__module__
coro_or_command.callback = wrapped
return coro_or_command
return deco
| gpl-3.0 | -1,633,177,258,842,648,800 | 26.405354 | 98 | 0.599246 | false | 4.042589 | true | false | false |
rspavel/spack | var/spack/repos/builtin/packages/r-ncdf4/package.py | 5 | 1824 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RNcdf4(RPackage):
"""Provides a high-level R interface to data files written using Unidata's
netCDF library (version 4 or earlier), which are binary data files that are
portable across platforms and include metadata information in addition to
the data sets. Using this package, netCDF files (either version 4 or
"classic" version 3) can be opened and data sets read in easily. It is also
easy to create new netCDF dimensions, variables, and files, in either
version 3 or 4 format, and manipulate existing netCDF files. This package
replaces the former ncdf package, which only worked with netcdf version 3
files. For various reasons the names of the functions have had to be
changed from the names in the ncdf package. The old ncdf package is still
available at the URL given below, if you need to have backward
compatibility. It should be possible to have both the ncdf and ncdf4
packages installed simultaneously without a problem. However, the ncdf
package does not provide an interface for netcdf version 4 files."""
homepage = "http://cirrus.ucsd.edu/~pierce/ncdf"
url = "https://cloud.r-project.org/src/contrib/ncdf4_1.15.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/ncdf4"
version('1.16.1', sha256='0dde2d6d1e8474f4abd15a61af8a2f7de564f13da00f1a01d7a479ab88587a20')
version('1.16', sha256='edd5731a805bbece3a8f6132c87c356deafc272351e1dd07256ca00574949253')
version('1.15', sha256='d58298f4317c6c80a041a70216126492fd09ba8ecde9da09d5145ae26f324d4d')
depends_on('[email protected]:')
| lgpl-2.1 | -6,404,480,735,709,509,000 | 54.272727 | 96 | 0.758772 | false | 3.428571 | false | false | false |
woutdenolf/spectrocrunch | scraps/ffnoisesimul.py | 1 | 9053 | # -*- coding: utf-8 -*-
import os, sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from spectrocrunch.materials.compoundfromformula import compoundfromformula
from spectrocrunch.materials.compoundfromname import compoundfromname
from spectrocrunch.materials.mixture import mixture
from spectrocrunch.materials.types import fraction
from spectrocrunch.simulation import calcnoise
from spectrocrunch.simulation import materials
from spectrocrunch.math import noisepropagation
import numpy as np
import scipy.optimize
import matplotlib.pyplot as plt
class sample(object):
@staticmethod
def getnframes(totaltime, frametime, fracflat):
n = int(round(totaltime / frametime))
nflat = max(int(round(fracflat * n / 2.0)), 1)
nflat *= 2 # before and after
ndata = max(n - nflat, 1)
return ndata, nflat
@staticmethod
def getrealtime(totaltime, frametime, fracflat):
ndata, nflat = self.getnframes(totaltime, frametime, fracflat)
n = ndata + nflat
overhead = 6.50305 + 0.0131498 * n
return frametime * n + overhead
def xanes(
self, I0, energy, totaltime=None, frametime=None, fracflat=None, ndark=None
):
ndata, nflat = self.getnframes(totaltime, frametime, fracflat)
energy = np.asarray(energy)
N, N0, D, D0 = calcnoise.id21_ffnoise(
I0,
energy,
self.composition,
tframe_data=frametime,
nframe_data=ndata,
tframe_flat=frametime,
nframe_flat=nflat,
nframe_dark=ndark,
)
T = calcnoise.transmission(
N,
N0,
D=D,
D0=D0,
tframe_data=frametime,
nframe_data=ndata,
tframe_flat=frametime,
nframe_flat=nflat,
nframe_dark=ndark,
)
XAS = calcnoise.absorbance(T)
signal = noisepropagation.E(XAS)
noise = noisepropagation.S(XAS)
return signal, noise
def costfunc(self, I0, energy, **kwargs):
signal, noise = self.xanes(I0, energy, **kwargs)
# return np.max(noise/signal*100)
return np.mean(noise) / (signal[-1] - signal[0])
def __str__(self):
return str(self.composition)
def plotxanesnoise(self, I0, energy, **kwargs):
signal, noise = self.xanes(I0, energy, **kwargs)
plt.plot(energy, noise / signal * 100)
plt.xlabel("Energy (keV)")
plt.ylabel("N/S (%)")
def plotxanes(self, I0, energy, **kwargs):
signal, _ = self.xanes(I0, energy, **kwargs)
plt.plot(energy, signal)
plt.xlabel("Energy (keV)")
plt.ylabel("Absorbance")
class sample_hg115(sample):
def __init__(self, wpigment=10, paintthickness=10):
binder = compoundfromname("linseed oil")
pigment = compoundfromname("verdigris")
paint = mixture(
[binder, pigment], [1 - wpigment / 100.0, wpigment / 100.0], fraction.mass
)
ultralene = compoundfromname("ultralene")
sfreetape = compoundfromname("sulfur-free tape")
# ultralene = compoundfromname("vacuum")
# sfreetape = compoundfromname("vacuum")
m = [ultralene, paint, sfreetape]
thickness = [4, paintthickness, 10]
# m = [compoundfromname("vacuum"),compoundfromname("vacuum"),compoundfromname("vacuum")]
self.composition = materials.factory(
"Multilayer",
material=m,
thickness=thickness,
anglein=0,
angleout=0,
azimuth=0,
)
self.paintindex = 1
def set_wpigment(self, wpigment):
w = self.composition.material[self.paintindex].massfractions()
w["verdigris"] = wpigment / 100.0
w["linseed oil"] = 1 - wpigment / 100.0
self.composition.material[self.paintindex].change_fractions(w, fraction.mass)
def get_wpigment(self):
return (
self.composition.material[self.paintindex].massfractions()["verdigris"]
* 100
)
def set_paintthickness(self, paintthickness):
self.composition.thickness[self.paintindex] = paintthickness
def get_paintthickness(self):
return self.composition.thickness[self.paintindex]
def optimize_thickness(self, I0, energy, **kwargs):
def costfunc(paintthickness):
self.set_paintthickness(paintthickness[0])
c = self.costfunc(I0, energy, **kwargs)
return c
guess = self.get_paintthickness()
result = scipy.optimize.least_squares(costfunc, guess, gtol=1e-015, ftol=1e-015)
print result.message
return result.x[0], result.success
def optimize_wpigment(self, I0, energy, **kwargs):
def costfunc(wpigment):
self.set_wpigment(wpigment[0])
c = self.costfunc(I0, energy, **kwargs)
return c
guess = self.get_wpigment()
result = scipy.optimize.least_squares(
costfunc, guess, bounds=([0, 100]), gtol=1e-015, ftol=1e-015
)
print result.message
return result.x[0], result.success
def optimize_thickness_plot(self, I0, energy, **kwargs):
thickness = self.get_paintthickness()
t = np.linspace(max(thickness - 100, 0), thickness + 100, 50)
r = np.zeros(len(t))
for i, paintthickness in enumerate(t):
self.set_paintthickness(paintthickness)
r[i] = self.costfunc(I0, energy, **kwargs)
self.set_paintthickness(thickness)
plt.plot(t, 1 / r, "-o", label="{} %".format(self.get_wpigment()))
plt.xlabel("thickness ($\mu$m)")
plt.ylabel("Jump-to-noise")
def optimize_wpigment_plot(self, I0, energy, **kwargs):
w = self.get_wpigment()
t = np.linspace(0, 20, 50)
r = np.zeros(len(t))
for i, wpigment in enumerate(t):
self.set_wpigment(wpigment)
r[i] = self.costfunc(I0, energy, **kwargs)
self.set_wpigment(w)
plt.plot(t, 1 / r, "-o", label="{} $\mu$m".format(self.get_paintthickness()))
plt.xlabel("Verdigris (%)")
plt.ylabel("Jump-to-noise")
def optimize(self, I0, energy, **kwargs):
def costfunc(p):
self.set_wpigment(p[0])
self.set_paintthickness(p[1])
return self.costfunc(I0, energy, **kwargs)
guess = (self.get_wpigment(), self.get_paintthickness())
result = scipy.optimize.least_squares(
costfunc, guess, bounds=([0, 0], [100, 1e6]), gtol=1e-015
)
print result.message
return result.x, result.success
def hg115_ff():
sample = sample_hg115()
I0 = 1e6
energy = np.linspace(8.9, 9.3, 100)
totaltime = 70
frametime = 0.07
fracflat = 1 / 3.0
ndark = 30
kwargs = {
"totaltime": totaltime,
"frametime": frametime,
"fracflat": fracflat,
"ndark": ndark,
}
opt = 1
energyopt = [8.97, 9]
if opt == 0:
sample.set_wpigment(10)
t, s = sample.optimize_thickness(I0, energyopt, **kwargs)
sample.set_paintthickness(t)
elif opt == 1:
sample.set_paintthickness(20)
w, s = sample.optimize_wpigment(I0, energyopt, **kwargs)
sample.set_wpigment(w)
else:
wt, s = sample.optimize(I0, energy, **kwargs)
sample.set_wpigment(wt[0])
sample.set_paintthickness(wt[1])
print "Thickness = {} μm".format(sample.get_paintthickness())
print "Verdigris = {} wt%".format(sample.get_wpigment())
print "Jump to noise = {}".format(1 / sample.costfunc(I0, energyopt, **kwargs))
print ""
plt.figure()
for thickness in [10, 15, 20]:
sample.set_paintthickness(thickness)
sample.optimize_wpigment_plot(I0, energy, **kwargs)
plt.legend(loc="best")
plt.show()
exit()
sample.optimize_thickness_plot(I0, energy, **kwargs)
sample.optimize_wpigment_plot(I0, energy, **kwargs)
plt.figure()
sample.plotxanes(I0, energy, **kwargs)
plt.figure()
sample.plotxanesnoise(I0, energy, **kwargs)
plt.show()
def hg115_xrd():
sample = sample_hg115()
energy = 8.5
sample.set_wpigment(100)
r = np.linspace(10, 20, 50)
n = [None] * len(r)
for i, t in enumerate(r):
sample.set_paintthickness(t)
n[i] = noisepropagation.E(
sample.composition.propagate(
noisepropagation.poisson(1e7),
energy,
interaction=materials.interactionType.elastic,
)
)
print n[-1] / n[0]
plt.plot(r, n)
plt.show()
if __name__ == "__main__":
hg115_ff()
# I0 = 1e5
# energy = np.linspace(3,5,100)
# tframe = 0.07
# nframe = 100
# ndark = 30
# tframe_data=tframe,nframe_data=nframe,\
# tframe_flat=tframe,nframe_flat=nframe,\
# nframe_dark=ndark
| mit | 5,490,147,310,723,721,000 | 28.106109 | 96 | 0.589593 | false | 3.395349 | false | false | false |
martinwicke/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/distribution_util_test.py | 5 | 13735 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from scipy import special
import tensorflow as tf
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import tensor_util
class AssertCloseTest(tf.test.TestCase):
def testAssertCloseIntegerDtype(self):
x = [1, 5, 10, 15, 20]
y = x
z = [2, 5, 10, 15, 20]
with self.test_session():
with tf.control_dependencies([distribution_util.assert_close(x, y)]):
tf.identity(x).eval()
with tf.control_dependencies([distribution_util.assert_close(y, x)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(x, z)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(y, z)]):
tf.identity(y).eval()
def testAssertCloseNonIntegerDtype(self):
x = np.array([1., 5, 10, 15, 20], dtype=np.float32)
y = x + 1e-8
z = [2., 5, 10, 15, 20]
with self.test_session():
with tf.control_dependencies([distribution_util.assert_close(x, y)]):
tf.identity(x).eval()
with tf.control_dependencies([distribution_util.assert_close(y, x)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(x, z)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(y, z)]):
tf.identity(y).eval()
def testAssertCloseEpsilon(self):
x = [0., 5, 10, 15, 20]
# x != y
y = [0.1, 5, 10, 15, 20]
# x = z
z = [1e-8, 5, 10, 15, 20]
with self.test_session():
with tf.control_dependencies([distribution_util.assert_close(x, z)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(x, y)]):
tf.identity(x).eval()
with self.assertRaisesOpError("Condition x ~= y"):
with tf.control_dependencies([distribution_util.assert_close(y, z)]):
tf.identity(y).eval()
def testAssertIntegerForm(self):
# This should only be detected as an integer.
x = [1., 5, 10, 15, 20]
y = [1.1, 5, 10, 15, 20]
# First component isn't less than float32.eps = 1e-7
z = [1.0001, 5, 10, 15, 20]
# This shouldn"t be detected as an integer.
w = [1e-8, 5, 10, 15, 20]
with self.test_session():
with tf.control_dependencies([distribution_util.assert_integer_form(x)]):
tf.identity(x).eval()
with self.assertRaisesOpError("x has non-integer components"):
with tf.control_dependencies([
distribution_util.assert_integer_form(y)]):
tf.identity(y).eval()
with self.assertRaisesOpError("x has non-integer components"):
with tf.control_dependencies([
distribution_util.assert_integer_form(z)]):
tf.identity(z).eval()
with self.assertRaisesOpError("x has non-integer components"):
with tf.control_dependencies([
distribution_util.assert_integer_form(w)]):
tf.identity(w).eval()
class GetLogitsAndProbTest(tf.test.TestCase):
def testGetLogitsAndProbImproperArguments(self):
with self.test_session():
with self.assertRaises(ValueError):
distribution_util.get_logits_and_prob(logits=None, p=None)
with self.assertRaises(ValueError):
distribution_util.get_logits_and_prob(logits=[0.1], p=[0.1])
def testGetLogitsAndProbLogits(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
logits = special.logit(p)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_prob(
logits=logits, validate_args=True)
self.assertAllClose(p, new_p.eval())
self.assertAllClose(logits, new_logits.eval())
def testGetLogitsAndProbLogitsMultidimensional(self):
p = np.array([0.2, 0.3, 0.5], dtype=np.float32)
logits = np.log(p)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_prob(
logits=logits, multidimensional=True, validate_args=True)
self.assertAllClose(new_p.eval(), p)
self.assertAllClose(new_logits.eval(), logits)
def testGetLogitsAndProbProbability(self):
p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_prob(
p=p, validate_args=True)
self.assertAllClose(special.logit(p), new_logits.eval())
self.assertAllClose(p, new_p.eval())
def testGetLogitsAndProbProbabilityMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
with self.test_session():
new_logits, new_p = distribution_util.get_logits_and_prob(
p=p, multidimensional=True, validate_args=True)
self.assertAllClose(np.log(p), new_logits.eval())
self.assertAllClose(p, new_p.eval())
def testGetLogitsAndProbProbabilityValidateArgs(self):
p = [0.01, 0.2, 0.5, 0.7, .99]
# Component less than 0.
p2 = [-1, 0.2, 0.5, 0.3, .2]
# Component greater than 1.
p3 = [2, 0.2, 0.5, 0.3, .2]
with self.test_session():
_, prob = distribution_util.get_logits_and_prob(p=p, validate_args=True)
prob.eval()
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = distribution_util.get_logits_and_prob(
p=p2, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_prob(p=p2, validate_args=False)
prob.eval()
with self.assertRaisesOpError("p has components greater than 1"):
_, prob = distribution_util.get_logits_and_prob(
p=p3, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_prob(p=p3, validate_args=False)
prob.eval()
def testGetLogitsAndProbProbabilityValidateArgsMultidimensional(self):
p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component less than 0. Still sums to 1.
p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32)
# Component greater than 1. Does not sum to 1.
p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32)
# Does not sum to 1.
p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32)
with self.test_session():
_, prob = distribution_util.get_logits_and_prob(
p=p, multidimensional=True)
prob.eval()
with self.assertRaisesOpError("Condition x >= 0"):
_, prob = distribution_util.get_logits_and_prob(
p=p2, multidimensional=True, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_prob(
p=p2, multidimensional=True, validate_args=False)
prob.eval()
with self.assertRaisesOpError(
"(p has components greater than 1|p does not sum to 1)"):
_, prob = distribution_util.get_logits_and_prob(
p=p3, multidimensional=True, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_prob(
p=p3, multidimensional=True, validate_args=False)
prob.eval()
with self.assertRaisesOpError("p does not sum to 1"):
_, prob = distribution_util.get_logits_and_prob(
p=p4, multidimensional=True, validate_args=True)
prob.eval()
_, prob = distribution_util.get_logits_and_prob(
p=p4, multidimensional=True, validate_args=False)
prob.eval()
class LogCombinationsTest(tf.test.TestCase):
def testLogCombinationsBinomial(self):
n = [2, 5, 12, 15]
k = [1, 2, 4, 11]
log_combs = np.log(special.binom(n, k))
with self.test_session():
n = np.array(n, dtype=np.float32)
counts = [[1., 1], [2., 3], [4., 8], [11, 4]]
log_binom = distribution_util.log_combinations(n, counts)
self.assertEqual([4], log_binom.get_shape())
self.assertAllClose(log_combs, log_binom.eval())
def testLogCombinationsShape(self):
# Shape [2, 2]
n = [[2, 5], [12, 15]]
with self.test_session():
n = np.array(n, dtype=np.float32)
# Shape [2, 2, 4]
counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]
log_binom = distribution_util.log_combinations(n, counts)
self.assertEqual([2, 2], log_binom.get_shape())
class RotateTransposeTest(tf.test.TestCase):
def _np_rotate_transpose(self, x, shift):
if not isinstance(x, np.ndarray):
x = np.array(x)
return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))
def testRollStatic(self):
with self.test_session():
with self.assertRaisesRegexp(
ValueError, "None values not supported."):
distribution_util.rotate_transpose(None, 1)
for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):
for shift in np.arange(-5, 5):
y = distribution_util.rotate_transpose(x, shift)
self.assertAllEqual(self._np_rotate_transpose(x, shift),
y.eval())
self.assertAllEqual(np.roll(x.shape, shift),
y.get_shape().as_list())
def testRollDynamic(self):
with self.test_session() as sess:
x = tf.placeholder(tf.float32)
shift = tf.placeholder(tf.int32)
for x_value in (np.ones(1, dtype=x.dtype.as_numpy_dtype()),
np.ones((2, 1), dtype=x.dtype.as_numpy_dtype()),
np.ones((3, 2, 1), dtype=x.dtype.as_numpy_dtype())):
for shift_value in np.arange(-5, 5):
self.assertAllEqual(
self._np_rotate_transpose(x_value, shift_value),
sess.run(distribution_util.rotate_transpose(x, shift),
feed_dict={x: x_value, shift: shift_value}))
class PickVectorTest(tf.test.TestCase):
def testCorrectlyPicksVector(self):
with self.test_session():
x = np.arange(10, 12)
y = np.arange(15, 18)
self.assertAllEqual(
x, distribution_util.pick_vector(
tf.less(0, 5), x, y).eval())
self.assertAllEqual(
y, distribution_util.pick_vector(
tf.less(5, 0), x, y).eval())
self.assertAllEqual(
x, distribution_util.pick_vector(
tf.constant(True), x, y)) # No eval.
self.assertAllEqual(
y, distribution_util.pick_vector(
tf.constant(False), x, y)) # No eval.
class FillLowerTriangularTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _fill_lower_triangular(self, x):
"""Numpy implementation of `fill_lower_triangular`."""
x = np.asarray(x)
d = x.shape[-1]
# d = n(n+1)/2 implies n is:
n = int(0.5 * (math.sqrt(1. + 8. * d) - 1.))
ids = np.tril_indices(n)
y = np.zeros(list(x.shape[:-1]) + [n, n], dtype=x.dtype)
y[..., ids[0], ids[1]] = x
return y
def testCorrectlyMakes1x1LowerTril(self):
with self.test_session():
x = tf.convert_to_tensor(self._rng.randn(3, 1))
expected = self._fill_lower_triangular(tensor_util.constant_value(x))
actual = distribution_util.fill_lower_triangular(x, validate_args=True)
self.assertAllEqual(expected.shape, actual.get_shape())
self.assertAllEqual(expected, actual.eval())
def testCorrectlyMakesNoBatchLowerTril(self):
with self.test_session():
x = tf.convert_to_tensor(self._rng.randn(10))
expected = self._fill_lower_triangular(tensor_util.constant_value(x))
actual = distribution_util.fill_lower_triangular(x, validate_args=True)
self.assertAllEqual(expected.shape, actual.get_shape())
self.assertAllEqual(expected, actual.eval())
g = tf.gradients(distribution_util.fill_lower_triangular(x), x)
self.assertAllEqual(np.tri(4).reshape(-1), g[0].values.eval())
def testCorrectlyMakesBatchLowerTril(self):
with self.test_session():
x = tf.convert_to_tensor(self._rng.randn(2, 2, 6))
expected = self._fill_lower_triangular(tensor_util.constant_value(x))
actual = distribution_util.fill_lower_triangular(x, validate_args=True)
self.assertAllEqual(expected.shape, actual.get_shape())
self.assertAllEqual(expected, actual.eval())
self.assertAllEqual(
np.ones((2, 2, 6)),
tf.gradients(distribution_util.fill_lower_triangular(
x), x)[0].eval())
class GenNewSeedTest(tf.test.TestCase):
def testOnlyNoneReturnsNone(self):
self.assertFalse(distribution_util.gen_new_seed(0, "salt") is None)
self.assertTrue(distribution_util.gen_new_seed(None, "salt") is None)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -8,454,266,680,957,920,000 | 35.823056 | 80 | 0.627375 | false | 3.384672 | true | false | false |
s-silva/roadhouse | Server/tools/database-init.py | 1 | 3975 | #!/usr/bin/python
import sys
import pymongo
def main(args):
# If your database server is running in auth mode, you will need user and
# database info. Ex:
# mongodb_uri = 'mongodb://username:password@localhost:27017/dbname'
#
mongodb_uri = 'mongodb://localhost:27017'
db_name = 'roadhouse'
try:
connection = pymongo.Connection(mongodb_uri)
database = connection[db_name]
except:
print('Error: Unable to connect to database.')
connection = None
# What follows is insert, update, and selection code that can vary widely
# depending on coding style.
#
if connection is not None:
# users
#database.users.insert({'email':
# To begin with, we'll add a few adventurers to the database. Note that
# nothing is required to create the adventurers collection--it is
# created automatically when we insert into it. These are simple JSON
# objects.
#
database.adventurers.insert({'name': 'Cooper',
'class': 'fighter',
'level': 5,
'equipment': {'main-hand': 'sword',
'off-hand': 'shield',
'armor': 'plate'}})
database.adventurers.insert({'name': 'Nishira',
'class': 'warlock',
'level': 10,
'equipment': {'main-hand': 'wand',
'off-hand': 'dagger',
'armor': 'cloth'}})
database.adventurers.insert({'name': 'Mordo',
'class': 'wizard',
'level': 11,
'equipment': {'off-hand': 'dagger',
'armor': 'leather'}})
# Because it seems we forgot to equip Mordo, we'll need to get him
# ready. Note the dot notation used to address the 'main-hand' key.
# Don't send a JSON object describing the 'main-hand' key in the
# context of the 'equipment' key, or MongoDB will overwrite the other
# keys stored under 'equipment'. Mordo would be embarassed without
# armor.
#
# Note that in python, MongoDB $ operators should be quoted.
#
database.adventurers.update({'name': 'Mordo' },
{'$set': {'equipment.main-hand': 'staff'}})
# Now that everyone's ready, we'll send them off through standard
# output. Unfortunately this adventure is is for adventurers level 10
# or higher. We pass a JSON object describing our query as the value
# of the key we'd like to evaluate.
#
party = database.adventurers.find({'level': {'$gte': 10}})
# Our query returns a Cursor, which can be counted and iterated
# normally.
#
if party.count() > 0:
print('The quest begins!')
for adventurer in party:
print('%s, level %s %s, departs wearing %s and wielding a %s and %s.'
% ( adventurer['name'], adventurer['level'],
adventurer['class'],
adventurer['equipment']['armor'],
adventurer['equipment']['main-hand'],
adventurer['equipment']['off-hand'] ))
print('Good luck, you %s brave souls!' % party.count())
else:
print('No one is high enough level!')
# Since this is an example, we'll clean up after ourselves.
database.drop_collection('adventurers')
if __name__ == '__main__':
main(sys.argv[1:]) | lgpl-2.1 | -5,710,898,928,369,122,000 | 41.297872 | 85 | 0.490566 | false | 4.574223 | false | false | false |
cemarchi/biosphere | Src/BioAnalyzer/Managers/GenePrioritization/DataIntegratorManager.py | 1 | 2195 | from typing import Dict
from yaak import inject
import paginate
from Src.BioAnalyzer.CrossCutting.DTOs.GenePrioritization.DataIntegrationDto import DataIntegrationDto
from Src.BioAnalyzer.CrossCutting.Filters.GenePrioritization.FeSingleDataIntegration import FeSingleDataIntegration
from Src.BioAnalyzer.DataAccess.Entities.GenePrioritization.DataIntegration import DataIntegration
from Src.Core.Manager.ManagerBase import ManagerBase
class DataIntegratorManager(ManagerBase):
"""description of class"""
@inject.Param(repository='DataIntegrationRepositoryBase')
def __init__(self, repository):
"""
:param repository:
"""
super().__init__(repository)
self.__page_size = 10
def add_one(self, data_integration_dto: DataIntegrationDto):
fe_data_integration = self._repository.get_one(FeSingleDataIntegration(data_type=data_integration_dto.data_type,
conditional=data_integration_dto.conditional),
DataIntegration,
{'data_type': 1})
if fe_data_integration.result:
self._repository.delete_one(fe_data_integration)
adj_list = data_integration_dto.adjacency_list
page = paginate.Page(adj_list, page=0, items_per_page=self.__page_size)
while True:
if not page.next_page:
break
data_integration_dto.adjacency_list = page.items
self._repository.add_one(data_integration_dto)
page = paginate.Page(adj_list, page=page.next_page, items_per_page=self.__page_size)
def get_one(self, fe_data_integration: FeSingleDataIntegration,
include_or_exclude_fields: Dict[str, int] = None) -> FeSingleDataIntegration:
"""
:param fe_data_integration:
:param include_or_exclude_fields:
:return:
"""
return self._repository.get_one(fe_data_integration,
DataIntegration,
include_or_exclude_fields) | bsd-3-clause | 2,711,308,980,709,018,000 | 38.214286 | 125 | 0.613212 | false | 4.295499 | false | false | false |
mattliston/examples | example009.py | 1 | 5626 | # wget http://stuff.mit.edu/afs/sipb/contrib/pi/pi-billion.txt
# THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python example009.py
from __future__ import division
import numpy as np
import theano
import theano.tensor as T
import lasagne as L
import argparse
import time
from six.moves import cPickle
np.set_printoptions(threshold='nan')
np.set_printoptions(linewidth=200)
np.set_printoptions(formatter={'float': '{:12.8f}'.format, 'int': '{:4d}'.format})
print 'numpy ' + np.__version__
print 'theano ' + theano.__version__
print 'lasagne ' + L.__version__
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--ndigits', help='number of digits, default 1000000', default=1000000, type=int)
parser.add_argument('--window', help='window size, default=100', default=100, type=int)
parser.add_argument('--lr', help='learning rate, default 0.001', default=0.001, type=float)
parser.add_argument('--nepoch', help='number of epochs, default=100', default=100, type=int)
parser.add_argument('--nbatch', help='number of batches per eopch, default=100', default=100, type=int)
parser.add_argument('--batchsize', help='batch size, default 1000', default=1000, type=int)
parser.add_argument('--test', help='test fraction, default 0.2', default=0.2, type=float)
parser.add_argument('--model', help='output model filename')
args = parser.parse_args()
print args
# load data
with open('pi-billion.txt') as f:
s = f.read()
f.close()
pi = np.empty([args.ndigits],dtype='float32')
i=0
for c in s:
if c.isdigit():
pi[i] = float(c)
i+=1
if i==args.ndigits:
break
print 'pi.shape',pi.shape
input_var = T.matrix(dtype=theano.config.floatX)
target_var = T.vector(dtype='int32')
network = L.layers.InputLayer((None, args.window), input_var)
print 'input', L.layers.get_output_shape(network)
network = L.layers.ReshapeLayer(network, ((-1, 1, args.window)))
print 'reshape', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
conv = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(conv)
gap = L.layers.Pool1DLayer(conv, pool_size=L.layers.get_output_shape(conv)[2], stride=None, pad=0, mode='average_inc_pad')
print 'gap', L.layers.get_output_shape(gap)
network = L.layers.DenseLayer(gap, 2, nonlinearity=L.nonlinearities.softmax)
print 'output', L.layers.get_output_shape(network)
#input_var = T.matrix(dtype=theano.config.floatX)
#target_var = T.vector(dtype='int32')
#network = L.layers.InputLayer((None, args.window), input_var)
#network = L.layers.DenseLayer(network, 10000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 100)
#network = L.layers.DenseLayer(network, 2, nonlinearity=L.nonlinearities.softmax)
prediction = L.layers.get_output(network)
loss = L.objectives.aggregate(L.objectives.categorical_crossentropy(prediction, target_var), mode='mean')
params = L.layers.get_all_params(network, trainable=True)
updates = L.updates.adam(loss, params, learning_rate=args.lr)
scaled_grads,norm = L.updates.total_norm_constraint(T.grad(loss,params), np.inf, return_norm=True)
train_fn = theano.function([input_var, target_var], [loss,norm], updates=updates)
test_fn = theano.function([input_var], L.layers.get_output(network, deterministic=True))
d = np.empty([args.batchsize,args.window],dtype='float32')
l = np.empty([args.batchsize],dtype='int32')
t0 = time.time()
t = time.time()
for i in range(args.nepoch):
tloss=0
tnorm=0
#train
for j in range(args.nbatch):
for k in range(args.batchsize):
#w = np.random.randint(int(pi.shape[0]*args.test),pi.shape[0]-args.window)
w = np.random.randint(0,int(pi.shape[0]*(1-args.test))-args.window)
d[k] = pi[w:w+args.window]
if np.random.randint(0,2)==0:
l[k]=0
else:
np.random.shuffle(d[k])
l[k]=1
bloss,bnorm = train_fn(d,l)
tloss += bloss
tnorm += bnorm
#test
for k in range(args.batchsize):
#w = np.random.randint(0,int(pi.shape[0]*args.test-args.window))
w = np.random.randint(int(pi.shape[0]*(1-args.test)),pi.shape[0]-args.window)
d[k] = pi[w:w+args.window]
if np.random.randint(0,2)==0:
l[k]=0
else:
np.random.shuffle(d[k])
l[k]=1
val_output = test_fn(d)
val_predictions = np.argmax(val_output, axis=1)
tacc = np.mean(val_predictions == l)
print 'epoch {:8d} loss {:12.8f} grad {:12.8f} accuracy {:12.8f} n_zero {:6d} n_one {:6d} t_epoch {:4d} t_total {:8d}'.format(i, tloss/args.nbatch, tnorm/args.nbatch, tacc, np.sum(val_predictions==0), np.sum(val_predictions==1), int(time.time()-t), int(time.time()-t0))
t = time.time()
f = open(args.model, 'wb')
cPickle.dump(L.layers.get_all_param_values(network), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
| mit | 1,036,101,989,835,955,000 | 42.953125 | 273 | 0.690899 | false | 2.91805 | true | false | false |
decause/hflossk | hflossk/blueprints.py | 1 | 1542 | import os
from flask import Blueprint
from flask.ext.mako import render_template
homework = Blueprint('homework', __name__, template_folder='templates')
lectures = Blueprint('lectures', __name__, template_folder='templates')
quizzes = Blueprint('quizzes', __name__, template_folder='templates')
@homework.route('/', defaults={'page': 'index'})
@homework.route('/<page>')
def display_homework(page):
if page == 'index':
hws = os.listdir(os.path.join(os.path.split(__file__)[0],
'static', 'hw'))
hws.extend(os.listdir(os.path.join(os.path.split(__file__)[0],
'templates', 'hw')))
hws = [hw for hw in sorted(hws) if not hw == "index.mak"]
else:
hws = None
return render_template('hw/{}.mak'.format(page), name='mako', hws=hws)
@lectures.route('/', defaults={'page': 'index'})
@lectures.route('/<page>')
def display_lecture(page):
if page == 'index':
lecture_notes = os.listdir(os.path.join(os.path.split(__file__)[0],
'templates', 'lectures'))
lecture_notes = [note for note in sorted(lecture_notes)
if not note == "index.mak"]
else:
lecture_notes = None
return render_template('lectures/{}.mak'.format(page), name='mako',
lectures=lecture_notes)
@quizzes.route('/<quiz_num>')
def show_quiz(quiz_num):
return render_template('quiz/{}.mak'.format(quiz_num), name='mako')
| apache-2.0 | 3,167,597,891,608,636,400 | 34.860465 | 75 | 0.568093 | false | 3.528604 | false | false | false |
zamudio-fabian/ri | repositories/CorpusRepository.py | 1 | 1045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from os import listdir
from models.Documento import *
from os.path import relpath,join
class CorpusRepository:
path = ''
documentos = []
def __init__(self,path):
self.path = path
def getListDocuments(self):
self.documentos = []
for documentName in listdir(relpath(self.path)):
if (documentName[0] != u'.'): # Protección para no leer archivos de sistema MAC ej: .DS_store
self.documentos.append(self.getDocument(documentName))
return self.documentos
def getFullStringFromDocument(self,documentName):
filePath = join(self.path,documentName)
with codecs.open(filePath, mode='rt', encoding='utf-8') as fp:
return fp.read()
return None
def getDocument(self,documentName):
filePath = join(self.path,documentName)
with codecs.open(filePath, mode='rt', encoding='utf-8') as fp:
return Documento(documentName,fp.read())
return None | mit | 1,656,043,298,754,695,000 | 30.666667 | 105 | 0.640805 | false | 3.824176 | false | false | false |
jonobrien/School_Backups | cs1-python/Homework/week 10/myListIter.py | 1 | 9811 | #!/usr/local/bin/python3
"""
Author: Jon O'Brien
Due Date: 11/9/13
Assignment: linked list homework
File: myListRec.py
Author: Sean Strout <[email protected]>
Language: Python 3
Description: An iterative implementation of a node based single linked list
data structure.
Purpose: LECTURE VERSION
"""
from myNode import *
###########################################################
# LINKED LIST CLASS DEFINITION
###########################################################
class MyList():
"""A class that encapsulates a node based linked list"""
__slots__ = ('head', 'size', 'cursor')
###########################################################
# LINKED LIST CLASS BUILDER
###########################################################
def mkMyList():
"""
Constructs and returns an empty list.
Parameters:
None
Returns:
An empty list
"""
lst = MyList()
lst.head = mkEmptyNode()
lst.size = 0
lst.cursor = mkEmptyNode()
return lst
###########################################################
# LINKED LIST CURSOR FUNCTIONS
###########################################################
def reset(lst):
"""
Resets the cursor to the start of the list
Paramters:
lst (MyList) - the linked list
Returns:
None
"""
lst.cursor = lst.head
def hasNext(lst):
"""
Returns True if the list has more elements.
Paramters:
lst (MyList) - the linked list
Returns:
True (bool) if the cursor is value
"""
return not isinstance(lst.cursor, EmptyNode)
def next(lst):
"""
Returns the next element in the iteration.
Paramters:
lst (MyList) - the linked list
Preconditions:
If cursor is invalid, raises an IndexError exception
Returns:
The value (any type) referenced by the cursor
"""
if isinstance(lst.cursor, EmptyNode):
raise IndexError("cursor is invalid")
val = lst.cursor.data
lst.cursor = lst.cursor.next
return val
###########################################################
# LINKED LIST FUNCTIONS
###########################################################
def clear(lst):
"""
Make a list empty.
Parameters:
lst (MyList) - the linked list
Returns:
None
"""
lst.head = mkEmptyNode()
lst.size = 0
lst.cursor = mkEmptyNode()
def toString(lst):
"""
Converts our linked list into a string form that is similar to Python's
printed list.
Parameters:
lst (MyList) - The linked list
Returns:
A string representation of the list (e.g. '[1,2,3]')
"""
result = '['
curr = lst.head
while not isinstance(curr, EmptyNode):
if isinstance(curr.next, EmptyNode):
result += str(curr.data)
else:
result += str(curr.data) + ', '
curr = curr.next
result += ']'
return result
def append(lst, value):
"""
Add a node containing the value to the end of the list.
Parameters:
lst (MyList) - The linked list
value (any type) - The data to append to the end of the list
Returns:
None
"""
if isinstance(lst.head, EmptyNode):
lst.head = mkNode(value, EmptyNode())
else:
curr = lst.head
while not isinstance(curr.next, EmptyNode):
curr = curr.next
curr.next = mkNode(value, EmptyNode())
lst.size += 1
def insertAt(lst, index, value):
"""
Insert a new element before the index.
Parameters:
lst (MyList) - The list to insert value into
index (int) - The 0 based index to insert before
value (any type) - The data to be inserted into the list
Preconditions:
0 <= index <= lst.size, raises IndexError exception
Returns:
None
"""
if index < 0 or index > lst.size:
raise IndexError(str(index) + ' is out of range.')
if index == 0:
lst.head = mkNode(value, lst.head)
else:
prev = lst.head
while index > 1:
prev = prev.next
index -= 1
prev.next = mkNode(value, prev.next)
lst.size += 1
def get(lst, index):
"""
Returns the element that is at index in the list.
Parameters:
lst (MyList) - The list to insert value into
index (int) - The 0 based index to get
Preconditions:
0 <= index <= lst.size, raises IndexError exception
Returns:
None
"""
if index < 0 or index >= lst.size:
raise IndexError(str(index) + ' is out of range.')
curr = lst.head
while index > 0:
curr = curr.next
index -= 1
return curr.data
def set(lst, index, value):
"""
Sets the element that is at index in the list to the value.
Parameters:
lst (MyList) - The list to insert value into
index (int) - The 0 based index to set
value (any type)
Preconditions:
0 <= index <= lst.size, raises IndexError exception
Returns:
None
"""
if index < 0 or index >= lst.size:
raise IndexError(str(index) + ' is out of range.')
curr = lst.head
while index > 0:
curr = curr.next
index -= 1
curr.data = value
def pop(lst, index):
"""
Remove and return the element at index.
Parameters:
lst (MyList) - The list to insert value into
index (int) - The 0 based index to remove
Preconditions:
0 <= index <= lst.size, raises IndexError exception
Returns:
The value (any type) being popped
"""
if index < 0 or index >= lst.size:
raise IndexError(str(index) + ' is out of range.')
lst.cursor = mkEmptyNode()
if index == 0:
value = lst.head.data
lst.head = lst.head.next
else:
prev = lst.head
while index > 1:
prev = prev.next
index -= 1
value = prev.next.data
prev.next = prev.next.next
lst.size -=1
return value
def index(lst, value):
"""
Returns the index of the first occurrence of a value in the list
Parameters:
lst (MyList) - The list to insert value into
value (any type) - The data being searched for
Preconditions:
value exists in list, otherwise raises ValueError exception
Returns:
The index (int) of value or None if value is not present in the list
"""
pos = 0
curr = lst.head
while not isinstance(curr, EmptyNode):
if curr.data == value:
return pos
pos += 1
curr = curr.next
raise ValueError(str(value) + " is not present in the list")
def count(lst, value):
"""
This function takes the paramters for the list and the value being counted.
Then the count is accumulated in a while loop that checks for the current
node not being an EmptyNode. The node being checked is iterated over each
consecutive node until this while loop is broken out of. The count is
returned at the end of the function.
"""
c=0
curr=lst.head
while not isinstance (curr,EmptyNode):
if curr.data == value:
c+=1
curr=curr.next
return c
def myListToPyList(lst):
"""
This function takes the list parameter. Then this function converts the
node list into a python list. This is done by assigning pylist to a
built-in list. A while not loop is checked if the current node is not an
EmptyNode, in the loop the curent nodes' data is appended to the python
list and the current node is iterated over until the emptyNode is reached.
Then the finished and constructed python list is returned.
"""
pylst=list()
curr=lst.head
while not isinstance(curr,EmptyNode):
pylst.append(curr.data)
curr=curr.next
return pylst
def pyListToMyList(pylst):
"""
This function takes the pylist as a parameter. It converts the pylist
into a node list called MyList. The node list is initiated and a for
loop is used to take every value in the pylist and append it to a node
in the node list objects. The node based list is returned at the
completion of the function to convert the list into a MyList.
"""
MyList=mkMyList()
for val in pylst:
append(MyList, val)
return MyList
def remove(lst,value):
"""
Remove takes parameters of list and value. It searches through the nodes
and removes the selected value from the list. This is done with a while
not loop that checks if the current node is not an emptyNode. In this
loop, the data of the node is tested if equal to the value and the head
is reassigned to the next node until the value is found, that is,
iterated over each node, and returned true. Otherwise the node with
the data that equals the value is reassigned until the value is found
and the size is decremented by one until the value is located. When
the while not loop is broken out of, False is returned.
"""
curr=lst.head
lst.cursor=mkEmptyNode()
while not isinstance(curr,EmptyNode):
if lst.head.data==value:
lst.head=lst.head.next
return True
elif curr.next.data==value: #starting to skip object
curr.next=curr.next.next #reassigns to not skipped object
lst.size-=1
return True
curr=curr.next
return False
| gpl-3.0 | 6,148,938,740,121,813,000 | 25.879452 | 79 | 0.564163 | false | 4.30874 | false | false | false |
muraliselva10/cloudkitty-dashboard | cloudkittydashboard/dashboards/admin/pyscripts/views.py | 1 | 3397 | # Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from horizon import tables
from horizon import views
from cloudkittydashboard.api import cloudkitty as api
from cloudkittydashboard.dashboards.admin.pyscripts import forms \
as pyscripts_forms
from cloudkittydashboard.dashboards.admin.pyscripts import tables \
as pyscripts_tables
class IndexView(tables.DataTableView):
table_class = pyscripts_tables.PyScriptsTable
template_name = 'admin/pyscripts/pyscripts_list.html'
def get_data(self):
data = api.cloudkittyclient(self.request).pyscripts.scripts.list()
data = api.identify(data, name=False)
return data
class ScriptCreateView(forms.ModalFormView):
form_class = pyscripts_forms.CreateScriptForm
form_id = "create_script"
modal_header = _("Create Script")
page_title = _("Create Script")
submit_url = reverse_lazy('horizon:admin:pyscripts:script_create')
success_url = reverse_lazy('horizon:admin:pyscripts:index')
template_name = 'admin/pyscripts/form.html'
def get_object_id(self, obj):
return obj
class ScriptUpdateView(forms.ModalFormView):
form_class = pyscripts_forms.EditScriptForm
form_id = "update_script"
modal_header = _("Update Script")
page_title = _("Update Script")
submit_url = 'horizon:admin:pyscripts:script_update'
success_url = 'horizon:admin:pyscripts:script_update'
template_name = 'admin/pyscripts/form.html'
def get_initial(self):
script = api.cloudkittyclient(self.request).pyscripts.scripts.get(
script_id=self.kwargs['script_id'])
self.initial = script.to_dict()
self.initial['script_data'] = self.initial['data']
return self.initial
def get_context_data(self, **kwargs):
context = super(ScriptUpdateView, self).get_context_data(**kwargs)
context['script_id'] = self.kwargs.get('script_id')
context['submit_url'] = reverse_lazy(self.submit_url,
args=(context['script_id'], ))
return context
def get_success_url(self, **kwargs):
return reverse('horizon:admin:pyscripts:index')
class ScriptDetailsView(views.APIView):
template_name = 'admin/pyscripts/details.html'
page_title = _("Script Details : {{ script.name }}")
def get_data(self, request, context, *args, **kwargs):
script_id = kwargs.get("script_id")
try:
script = api.cloudkittyclient(self.request).pyscripts.scripts.get(
script_id=script_id)
except Exception:
script = None
context['script'] = script
return context
| apache-2.0 | -1,410,229,382,648,221,400 | 36.32967 | 78 | 0.688549 | false | 3.89118 | false | false | false |
http2d/core | lib/libchula/gen-config.py | 2 | 2132 | #!/usr/bin/env python
import os, re, sys
CONFIG_H = """\
#ifndef CONFIG_H_
#define CONFIG_H_
/* Detected headers */
${{includes}}
/* Functions */
${{functions}}
/* Sizes */
${{sizes}}
/* Definitions */
${{definitions}}
#endif /* CONFIG_H_ */
"""
PATH_SRC = sys.argv[1]
PATH_BIN = sys.argv[2]
FILENAME_CMK = os.path.join (PATH_SRC, 'CMakeLists.txt')
FILENAME_NEW = os.path.join (PATH_BIN, 'config.h.in')
# Parse CMakeLists.txt
with open(FILENAME_CMK, 'r') as f:
cont = f.read()
includes_t = ''
for h in re.findall (r'CHULA_CHECK_INCLUDE *\(.+? *(\w+)\)', cont, re.IGNORECASE):
includes_t += '#cmakedefine %s\n' %(h)
functions_t = ''
for f in re.findall (r'CHECK_FUNCTION_EXISTS *\(.+? *(\w+)\)', cont, re.IGNORECASE):
functions_t += '#cmakedefine %s\n' %(f)
for f in re.findall (r'CHECK_C_SOURCE_COMPILES *\(.+?(HAVE_.+?)\)\n', cont, re.S):
functions_t += '#cmakedefine %s\n' %(f)
for f in re.findall (r'CHECK_C_SOURCE_RUNS *\(.+?(HAVE_.+?)\)\n', cont, re.S):
functions_t += '#cmakedefine %s\n' %(f)
definitions_t = ''
for f in re.findall (r'DEF_SET *\((\w+)? +(.+?)\)', cont, re.IGNORECASE):
definitions_t += '#cmakedefine %s %s\n' %(f[0], f[1])
for f in re.findall (r'DEF_SET_IFNDEF *\((\w+)? +(.+?)\)', cont, re.IGNORECASE):
definitions_t += '#ifndef %s\n' %(f[0])
definitions_t += '#cmakedefine %s %s\n' %(f[0], f[1])
definitions_t += '#endif\n'
for f in re.findall (r'DEF_DEFINE *\((\w+)?\)', cont, re.IGNORECASE):
definitions_t += '#cmakedefine %s\n' %(f)
sizes_t = ''
for h in re.findall (r'CHECK_TYPE_SIZE *\(.+? *(\w+)\)', cont, re.IGNORECASE):
sizes_t += '@%s_CODE@\n' %(h)
sizes_t += '#cmakedefine HAVE_%s\n' %(h)
sizes_t += '#ifdef HAVE_%s\n' %(h)
sizes_t += '# define HAVE_%s\n' %(h.replace('SIZEOF_',''))
sizes_t += '#endif\n'
# Replacements
config_h = CONFIG_H
config_h = config_h.replace ("${{includes}}", includes_t)
config_h = config_h.replace ("${{functions}}", functions_t)
config_h = config_h.replace ("${{sizes}}", sizes_t)
config_h = config_h.replace ("${{definitions}}", definitions_t)
# Write config.h
with open(FILENAME_NEW, 'w+') as f:
f.write (config_h)
| bsd-2-clause | 8,433,817,983,938,177,000 | 29.028169 | 84 | 0.588649 | false | 2.559424 | true | false | false |
AnnieJumpCannon/RAVE | article/figures/plot_hrd_test_color.py | 1 | 2417 |
import locale
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib.ticker import MaxNLocator
locale.setlocale(locale.LC_ALL, 'en_US')
try:
rave_cannon_dr1
except NameError:
from rave_io import rave_cannon_dr1
else:
print("Using pre-loaded data")
t = rave_cannon_dr1
#ok = (t["SNRK"] > 50) * (t["r_chi_sq_ms"] < 3) * (t["r_chi_sq_giant"] < 3) #* (t["WEIGHTED_VSINI"] < 1)
xlim = (7500, 3500)
ylim = (5.5, 0)
vmin, vmax = (-3, 0.5)
snrs = (100, 50, 25, 10)
M, N = (len(snrs), 50)
factor = 3.5
lbdim = 0.2 * factor
trdim = 0.1 * factor
whspace = 0.05
yspace = factor
xspace = factor * M + factor * (M - 1) * whspace + lbdim * (M - 1)
xdim = lbdim + xspace + trdim
ydim = lbdim + yspace + trdim
fig, axes = plt.subplots(1, M, figsize=(xdim, ydim))
fig.subplots_adjust(
left=lbdim/xdim, bottom=lbdim/ydim, right=(xspace + lbdim)/xdim,
top=(yspace + lbdim)/ydim, wspace=whspace, hspace=whspace)
for ax, snr in zip(axes, snrs):
ok = (t["SNRK"] > snr) * (t["R_CHI_SQ"] < 3) * (t["R"] > 25) #* (t["VSINI"] < 1)
ax.scatter(t["TEFF"][ok], t["LOGG"][ok], c=t["FE_H"][ok],
vmin=vmin, vmax=vmax, alpha=0.25, s=50, edgecolor="none", cmap="plasma",
rasterized=True)
if ax.is_last_col():
scat = ax.scatter([0], [0], c=[0], vmin=vmin, vmax=vmax, cmap="plasma")
K = locale.format("%d", sum(ok), grouping=True).replace(",", "$,$")
ax.text(0.05, 0.9, r"$S/N > {:.0f}$".format(snr),
horizontalalignment="left", verticalalignment="bottom",
transform=ax.transAxes, fontsize=14)
ax.text(0.05, 0.82, r"${}$".format(K) + r" ${\rm stars}$",
horizontalalignment="left", verticalalignment="bottom",
transform=ax.transAxes, fontsize=14)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.xaxis.set_major_locator(MaxNLocator(6))
ax.xaxis.set_ticks([7000, 6000, 5000, 4000])
ax.yaxis.set_major_locator(MaxNLocator(6))
ax.set_xlabel(r"$T_{\rm eff}$ $({\rm K})$")
if ax.is_first_col():
ax.set_ylabel(r"$\log{g}$")
else:
ax.set_yticklabels([])
cax, kw = mpl.colorbar.make_axes(list(axes), fraction=0.075, pad=0.025, aspect=10)
cbar = plt.colorbar(scat, cax=cax, ticks=[-3, -2, -1, 0])
cbar.set_label(r"$[{\rm Fe/H}]$")
fig.savefig("hrd-test-set-color.png")
fig.savefig("hrd-test-set-color.pdf", dpi=300) | mit | -7,570,299,001,141,537,000 | 25 | 104 | 0.60571 | false | 2.50207 | false | false | false |
kirmani/hlpr_cadence | third_party/vector_v1/vector_common/vector_ros/src/vector/vector_teleop_full_system.py | 1 | 20366 | """--------------------------------------------------------------------
COPYRIGHT 2016 Stanley Innovation Inc.
Software License Agreement:
The software supplied herewith by Stanley Innovation Inc. (the "Company")
for its licensed SI Vector Platform is intended and supplied to you,
the Company's customer, for use solely and exclusively with Stanley Innovation
products. The software is owned by the Company and/or its supplier, and is
protected under applicable copyright laws. All rights are reserved. Any use in
violation of the foregoing restrictions may subject the user to criminal
sanctions under applicable laws, as well as to civil liability for the
breach of the terms and conditions of this license. The Company may
immediately terminate this Agreement upon your use of the software with
any products that are not Stanley Innovation products.
The software was written using Python programming language. Your use
of the software is therefore subject to the terms and conditions of the
OSI- approved open source license viewable at http://www.python.org/.
You are solely responsible for ensuring your compliance with the Python
open source license.
You shall indemnify, defend and hold the Company harmless from any claims,
demands, liabilities or expenses, including reasonable attorneys fees, incurred
by the Company as a result of any claim or proceeding against the Company
arising out of or based upon:
(i) The combination, operation or use of the software by you with any hardware,
products, programs or data not supplied or approved in writing by the Company,
if such claim or proceeding would have been avoided but for such combination,
operation or use.
(ii) The modification of the software by or on behalf of you
(iii) Your use of the software.
THIS SOFTWARE IS PROVIDED IN AN "AS IS" CONDITION. NO WARRANTIES,
WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
\file vector_teleop_full_system.py
\brief This module contains a class for teleoperating all the vector
platform DOF with a joystick controller; only works with logitech
extreme 3d
\Platform: Linux/ROS Indigo
Edited 7/25/2016: Vivian Chu, vchu@gatech - included support for simulation
--------------------------------------------------------------------"""
from utils import *
from system_defines import *
from vector_msgs.msg import *
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from std_msgs.msg import Bool,Float64
from trajectory_msgs.msg import JointTrajectoryPoint
from dynamixel_controllers.srv import *
from control_msgs.msg import JointTrajectoryAction, JointTrajectoryGoal, FollowJointTrajectoryAction, FollowJointTrajectoryGoal, JointTrajectoryControllerState
import rospy
import sys
import math
import actionlib
class VectorTeleopFullSystem(object):
def __init__(self):
self.is_sim = rospy.get_param('~sim',True)
self.lincmd = LinearActuatorCmd()
if (False == self.is_sim):
"""
Subscribe to the configuration message
"""
self.config_updated = False
rospy.Subscriber("/vector/feedback/active_configuration", Configuration, self._update_configuration_limits)
start_time = rospy.get_time()
while ((rospy.get_time() - start_time) < 10.0) and (False == self.config_updated):
rospy.sleep(0.05)
if (False == self.config_updated):
rospy.logerr("Timed out waiting for Vector feedback topics make sure the driver is running")
sys.exit(0)
return
"""
Initialize the linear actuator position if this is the real system
"""
vector_dynamics = rospy.wait_for_message("/vector/feedback/dynamics", Dynamics)
self.lincmd.desired_position_m = vector_dynamics.linear_actuator_position_m
else:
self.x_vel_limit_mps = rospy.get_param('~sim_teleop_x_vel_limit_mps',0.5)
self.y_vel_limit_mps = rospy.get_param('~sim_teleop_y_vel_limit_mps',0.5)
self.yaw_rate_limit_rps = rospy.get_param('~sim_teleop_yaw_rate_limit_rps',0.5)
self.accel_lim = rospy.get_param('~sim_teleop_accel_lim',0.5)
self.yaw_accel_lim = rospy.get_param('~sim_teleop_yaw_accel_lim',1.0)
# Simulation flags for linear actuator
self.linact_sub = rospy.Subscriber('/linear_actuator_controller/state', JointTrajectoryControllerState, self._update_simulation_linear_actuator, queue_size=1)
self.sim_lin_actuator_position = 0.0 # init to 0 for now
self.sim_lin_init = False
self.last_arm_update = rospy.get_time()
"""
Set the mapping for the various commands
"""
self.ctrl_map = dict({'momentary': {'dead_man' : {'is_button':True,'index':0,'set_val':1},
'man_ovvrd' : {'is_button':True,'index':1,'set_val':1},
'standby' : {'is_button':True,'index':2,'set_val':1},
'tractor' : {'is_button':True,'index':3,'set_val':1},
'estop' : {'is_button':True,'index':4,'set_val':1},
'pan_tilt_ctl' : {'is_button':True,'index':8,'set_val':1},
'base_ctl' : {'is_button':True,'index':9,'set_val':1},
'arm_ctl_right': {'is_button':True,'index':10,'set_val':1},
'arm_ctl_left' : {'is_button':True,'index':11,'set_val':1}},
'axis' : {'left_right' : {'index' :0, 'invert_axis':False},
'for_aft' : {'index' :1, 'invert_axis':False},
'twist' : {'index' :2, 'invert_axis':False},
'flipper' : {'index' :3, 'invert_axis':False},
'dpad_lr' : {'index' :4, 'invert_axis':False},
'dpad_ud' : {'index' :5, 'invert_axis':False}}})
"""
Initialize the debounce logic states
"""
self.db_cnt = dict()
self.axis_value = dict()
self.button_state = dict()
for key, value in self.ctrl_map.iteritems():
if key == 'momentary':
for key, value2 in value.iteritems():
self.db_cnt[key]=0
self.button_state[key]=False
else:
self.db_cnt[key]=0
self.axis_value[key]=0.0
self.send_cmd_none = False
self.no_motion_commands = True
self.last_motion_command_time = 0.0
self.last_joy = rospy.get_time()
self._last_gripper_val = 0.0
self.run_arm_ctl_right = False
self.run_arm_ctl_left = False
self.run_pan_tilt_ctl = False
self._init_pan_tilt = True
self._last_angles = [0.0,0.0]
self.cfg_cmd = ConfigCmd()
self.cfg_pub = rospy.Publisher('/vector/gp_command', ConfigCmd, queue_size=10)
self.motion_cmd = Twist()
self.limited_cmd = Twist()
self.motion_pub = rospy.Publisher('/vector/teleop/cmd_vel', Twist, queue_size=10)
self.override_pub = rospy.Publisher("/vector/manual_override/cmd_vel",Twist, queue_size=10)
self.linpub = rospy.Publisher("/vector/linear_actuator_cmd",LinearActuatorCmd,queue_size=1)
self.arm_pub = [0]*2
self.gripper_pub = [0]*2
self.arm_pub[0] = rospy.Publisher('/vector/right_arm/cartesian_vel_cmd', JacoCartesianVelocityCmd, queue_size=10)
self.gripper_pub[0] = rospy.Publisher('/vector/right_gripper/cmd', GripperCmd, queue_size=10)
self.arm_pub[1] = rospy.Publisher('/vector/left_arm/cartesian_vel_cmd', JacoCartesianVelocityCmd, queue_size=10)
self.gripper_pub[1] = rospy.Publisher('/vector/left_gripper/cmd', GripperCmd, queue_size=10)
self.pan_pub = rospy.Publisher('/pan_controller/command', Float64, queue_size=1)
self.tilt_pub = rospy.Publisher('/tilt_controller/command', Float64, queue_size=1)
rospy.Subscriber('/joy', Joy, self._vector_teleop)
def _update_simulation_linear_actuator(self, msg):
self.sim_lin_actuator_position = msg.actual.positions[0]
def _update_configuration_limits(self,config):
self.x_vel_limit_mps = config.teleop_x_vel_limit_mps
self.y_vel_limit_mps = config.teleop_y_vel_limit_mps
self.yaw_rate_limit_rps = config.teleop_yaw_rate_limit_rps
self.accel_lim = config.teleop_accel_limit_mps2
self.yaw_accel_lim = config.teleop_yaw_accel_limit_rps2
self.config_updated = True
def _parse_joy_input(self,joyMessage):
raw_button_states = dict()
self.button_state = dict()
for key, value in self.ctrl_map.iteritems():
if key == 'momentary':
for key2, value2 in value.iteritems():
raw_button_states[key2]=True
self.button_state[key2]=False
else:
for key2, value2 in value.iteritems():
self.axis_value[key2] = 0.0
for key, value in self.ctrl_map.iteritems():
if key == 'momentary':
for key2, item in value.iteritems():
if item['is_button']:
if item['set_val'] == joyMessage.buttons[item['index']]:
raw_button_states[key2] &= True
else:
raw_button_states[key2] = False
else:
temp = joyMessage.axes[item['index']]
if (item['invert_axis']):
temp *= -1.0
if (temp >= item['set_thresh']):
raw_button_states[key2] &= True
else:
raw_button_states[key2] = False
if (True == raw_button_states[key2]):
self.db_cnt[key2]+=1
if (self.db_cnt[key2] > 10):
self.db_cnt[key2] = 10
self.button_state[key2] = True
else:
self.button_state[key2] = False
self.db_cnt[key2] = 0
if key == 'axis':
for key2, item in value.iteritems():
temp = joyMessage.axes[item['index']]
if (item['invert_axis']):
temp *= -1.0
self.axis_value[key2] = temp
def _vector_teleop(self, joyMessage):
self._parse_joy_input(joyMessage)
if self.button_state['base_ctl']:
self.run_arm_ctl_right = False
self.run_arm_ctl_left = False
self.run_pan_tilt_ctl = False
self._init_pan_tilt = False
elif self.button_state['arm_ctl_right']:
self.run_arm_ctl_right = True
self.run_arm_ctl_left = False
self.run_pan_tilt_ctl = False
self._init_pan_tilt = False
elif self.button_state['arm_ctl_left']:
self.run_arm_ctl_right = False
self.run_arm_ctl_left = True
self.run_pan_tilt_ctl = False
self._init_pan_tilt = False
elif self.button_state['pan_tilt_ctl']:
self.run_arm_ctl = False
self.run_arm_ctl_right = False
self.run_arm_ctl_left = False
self.run_pan_tilt_ctl = True
self._init_pan_tilt = True
if self.button_state['estop']:
self.run_arm_ctl = False
self.run_pan_tilt_ctl = False
self._init_pan_tilt = False
arm_cmd = JacoCartesianVelocityCmd()
arm_cmd.header.stamp=rospy.get_rostime()
arm_cmd.header.frame_id=''
self.arm_pub[0].publish(arm_cmd)
self.arm_pub[1].publish(arm_cmd)
home = Float64()
home.data = 0.0
self.pan_pub.publish(home)
self.tilt_pub.publish(home)
if self.run_arm_ctl_right or self.run_arm_ctl_left:
arm_cmd = JacoCartesianVelocityCmd()
arm_cmd.header.stamp=rospy.get_rostime()
arm_cmd.header.frame_id=''
gripper_cmd = GripperCmd()
if self.run_arm_ctl_right:
arm_idx = 0
else:
arm_idx = 1
if self.button_state['dead_man']:
arm_cmd.x = self.axis_value['left_right'] * 0.1
arm_cmd.z = self.axis_value['for_aft'] * 0.1
if not self.button_state['man_ovvrd']:
arm_cmd.y = self.axis_value['twist'] * 0.1
else:
# Check if we're in simulation - if so set the last known position
if self.is_sim == True:
if self.sim_lin_init == False:
self.lincmd.desired_position_m = self.sim_lin_actuator_position
self.sim_lin_init = True
dt = rospy.get_time() - self.last_arm_update
self.lincmd.desired_position_m += (self.axis_value['twist'] * 0.05) * dt
if (self.lincmd.desired_position_m > 0.855):
self.lincmd.desired_position_m = 0.855
elif self.lincmd.desired_position_m < 0.0:
self.lincmd.desired_position_m = 0.0
self.lincmd.header.stamp = rospy.get_rostime()
self.lincmd.header.frame_id=''
self.linpub.publish(self.lincmd)
self.lincmd.header.seq+=1
self.last_arm_update = rospy.get_time()
arm_cmd.theta_y = self.axis_value['dpad_ud'] * 100.0
arm_cmd.theta_x = self.axis_value['dpad_lr'] * 100.0
if self.button_state['standby']:
arm_cmd.theta_z = 100.0
elif self.button_state['tractor']:
arm_cmd.theta_z = -100.0
gripper_val = (self.axis_value['flipper'] + 1.0)/2.0
if abs(self._last_gripper_val-gripper_val) > 0.05:
gripper_cmd.position = gripper_val * 0.085
gripper_cmd.speed = 0.05
gripper_cmd.force = 100.0
self.gripper_pub[arm_idx].publish(gripper_cmd)
self._last_gripper_val = gripper_val
self.arm_pub[arm_idx].publish(arm_cmd)
elif self.run_pan_tilt_ctl:
if self._init_pan_tilt:
# Check if we're in sim - if so use default speed
if self.is_sim == False:
rospy.wait_for_service('/pan_controller/set_speed')
rospy.wait_for_service('/tilt_controller/set_speed')
try:
set_speed = rospy.ServiceProxy('/pan_controller/set_speed', SetSpeed)
resp1 = set_speed(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
try:
set_speed = rospy.ServiceProxy('/tilt_controller/set_speed', SetSpeed)
resp1 = set_speed(1.0)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
self._init_pan_tilt = False
if self.button_state['dead_man']:
pan = self.axis_value['twist'] * 1.05
tilt = self.axis_value['for_aft'] * 1.4
pan_cmd = Float64()
tilt_cmd = Float64()
pan_cmd.data = pan
tilt_cmd.data = tilt
if abs(self._last_angles[0] - pan) > 0.05:
self.pan_pub.publish(pan_cmd)
self._last_angles[0] = pan
if abs(self._last_angles[1] - tilt) > 0.05:
self.tilt_pub.publish(tilt_cmd)
self._last_angles[1] = tilt
else:
if self.button_state['estop']:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = DTZ_REQUEST
elif self.button_state['standby']:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = STANDBY_REQUEST
elif self.button_state['tractor']:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = TRACTOR_REQUEST
else:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_NONE'
self.cfg_cmd.gp_param = 0
if ('GENERAL_PURPOSE_CMD_NONE' != self.cfg_cmd.gp_cmd):
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.cfg_cmd.header.seq
self.send_cmd_none = True
elif (True == self.send_cmd_none):
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.cfg_cmd.header.seq
self.send_cmd_none = False
elif (False == self.send_cmd_none):
if self.button_state['dead_man']:
self.motion_cmd.linear.x = (self.axis_value['for_aft'] * self.x_vel_limit_mps)
self.motion_cmd.linear.y = (self.axis_value['left_right'] * self.y_vel_limit_mps)
self.motion_cmd.angular.z = (self.axis_value['twist'] * self.yaw_rate_limit_rps)
self.last_motion_command_time = rospy.get_time()
else:
self.motion_cmd.linear.x = 0.0
self.motion_cmd.linear.y = 0.0
self.motion_cmd.angular.z = 0.0
dt = rospy.get_time() - self.last_joy
self.last_joy = rospy.get_time()
if (dt >= 0.01):
self.limited_cmd.linear.x = slew_limit(self.motion_cmd.linear.x,
self.limited_cmd.linear.x,
self.accel_lim, dt)
self.limited_cmd.linear.y = slew_limit(self.motion_cmd.linear.y,
self.limited_cmd.linear.y,
self.accel_lim, dt)
self.limited_cmd.angular.z = slew_limit(self.motion_cmd.angular.z,
self.limited_cmd.angular.z,
self.yaw_accel_lim, dt)
if ((rospy.get_time() - self.last_motion_command_time) < 2.0):
self.motion_pub.publish(self.limited_cmd)
if self.button_state['man_ovvrd'] and self.button_state['man_ovvrd']:
self.override_pub.publish(self.motion_cmd)
| mit | 5,692,273,246,121,122,000 | 46.2529 | 170 | 0.5191 | false | 3.918045 | true | false | false |
imsally/redash | tests/models/test_alerts.py | 13 | 1361 | from tests import BaseTestCase
from redash.models import Alert, db
class TestAlertAll(BaseTestCase):
def test_returns_all_alerts_for_given_groups(self):
ds1 = self.factory.data_source
group = self.factory.create_group()
ds2 = self.factory.create_data_source(group=group)
query1 = self.factory.create_query(data_source=ds1)
query2 = self.factory.create_query(data_source=ds2)
alert1 = self.factory.create_alert(query_rel=query1)
alert2 = self.factory.create_alert(query_rel=query2)
db.session.flush()
alerts = Alert.all(group_ids=[group.id, self.factory.default_group.id])
self.assertIn(alert1, alerts)
self.assertIn(alert2, alerts)
alerts = Alert.all(group_ids=[self.factory.default_group.id])
self.assertIn(alert1, alerts)
self.assertNotIn(alert2, alerts)
alerts = Alert.all(group_ids=[group.id])
self.assertNotIn(alert1, alerts)
self.assertIn(alert2, alerts)
def test_return_each_alert_only_once(self):
group = self.factory.create_group()
self.factory.data_source.add_group(group)
alert = self.factory.create_alert()
alerts = Alert.all(group_ids=[self.factory.default_group.id, group.id])
self.assertEqual(1, len(list(alerts)))
self.assertIn(alert, alerts)
| bsd-2-clause | -1,555,532,253,955,384,600 | 34.815789 | 79 | 0.664952 | false | 3.516796 | true | false | false |
icyflame/batman | tests/ui_tests.py | 1 | 28350 | # -*- coding: utf-8 -*-
"""Tests for the user interface."""
#
# (C) Pywikibot team, 2008-2015
#
# Distributed under the terms of the MIT license.
#
# NOTE FOR RUNNING WINDOWS UI TESTS
#
# Windows UI tests have to be run using the tests\ui_tests.bat helper script.
# This will set PYTHONPATH and PYWIKIBOT2_DIR, and then run the tests. Do not
# touch mouse or keyboard while the tests are running, as this might disturb the
# interaction tests.
#
# The Windows tests were developed on a Dutch Windows 7 OS. You might need to adapt the
# helper functions in TestWindowsTerminalUnicode for other versions.
#
# For the Windows-based tests, you need the following packages installed:
# - pywin32, for clipboard access, which can be downloaded here:
# http://sourceforge.net/projects/pywin32/files/pywin32/Build%20218/
# make sure to download the package for the correct python version!
#
# - pywinauto, to send keys to the terminal, which can be installed using:
# easy_install --upgrade https://pywinauto.googlecode.com/files/pywinauto-0.4.2.zip
#
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import inspect
import io
import logging
import os
import subprocess
import sys
import time
if os.name == "nt":
from multiprocessing.managers import BaseManager
import threading
try:
import win32api
except ImportError:
win32api = None
try:
import pywinauto
except ImportError:
pywinauto = None
try:
import win32clipboard
except ImportError:
win32clipboard = None
import pywikibot
from pywikibot.bot import (
ui, DEBUG, VERBOSE, INFO, STDOUT, INPUT, WARNING, ERROR, CRITICAL
)
from pywikibot.tools import PY2
from pywikibot.userinterfaces import (
terminal_interface_win32, terminal_interface_base, terminal_interface_unix,
)
from tests.aspects import TestCase
from tests.utils import unittest, FakeModule
if sys.version_info[0] > 2:
unicode = str
class Stream(object):
"""Handler for a StringIO or BytesIO instance able to patch itself."""
def __init__(self, name, patched_streams):
"""
Create a new stream with a StringIO or BytesIO instance.
@param name: The part after 'std' (e.g. 'err').
@type name: str
@param patched_streams: A mapping which maps the original stream to
the patched stream.
@type patched_streams: dict
"""
self._stream = io.StringIO() if sys.version_info[0] > 2 else io.BytesIO()
self._name = 'std{0}'.format(name)
self._original = getattr(sys, self._name)
patched_streams[self._original] = self._stream
def __repr__(self):
return '<patched %s %r wrapping %r>' % (
self._name, self._stream, self._original)
def reset(self):
"""Reset own stream."""
self._stream.truncate(0)
self._stream.seek(0)
if os.name == "nt":
class pywikibotWrapper(object):
"""pywikibot wrapper class."""
def init(self):
pywikibot.version._get_program_dir()
def output(self, *args, **kwargs):
return pywikibot.output(*args, **kwargs)
def request_input(self, *args, **kwargs):
self.input = None
def threadedinput():
self.input = pywikibot.input(*args, **kwargs)
self.inputthread = threading.Thread(target=threadedinput)
self.inputthread.start()
def get_input(self):
self.inputthread.join()
return self.input
def set_config(self, key, value):
setattr(pywikibot.config, key, value)
def set_ui(self, key, value):
setattr(pywikibot.ui, key, value)
def cls(self):
os.system('cls')
class pywikibotManager(BaseManager):
"""pywikibot manager class."""
pass
pywikibotManager.register(str('pywikibot'), pywikibotWrapper)
_manager = pywikibotManager(
address=('127.0.0.1', 47228),
authkey=b'4DJSchgwy5L5JxueZEWbxyeG')
if len(sys.argv) > 1 and sys.argv[1] == "--run-as-slave-interpreter":
s = _manager.get_server()
s.serve_forever()
def patched_print(text, targetStream):
try:
stream = patched_streams[targetStream]
except KeyError:
assert isinstance(targetStream, pywikibot.userinterfaces.win32_unicode.UnicodeOutput)
assert targetStream._stream
stream = patched_streams[targetStream._stream]
org_print(text, stream)
def patched_input():
return strin._stream.readline().strip()
patched_streams = {}
strout = Stream('out', patched_streams)
strerr = Stream('err', patched_streams)
strin = Stream('in', {})
newstdout = strout._stream
newstderr = strerr._stream
newstdin = strin._stream
if sys.version_info[0] == 2:
# In Python 2 the sys.std* streams use bytes instead of unicode
# But this module is using unicode_literals so '…' will generate unicode
# So it'll convert those back into bytes
original_write = newstdin.write
def encoded_write(text):
if isinstance(text, unicode):
text = text.encode('utf8')
original_write(text)
newstdin.write = encoded_write
org_print = ui._print
org_input = ui._raw_input
def patch():
"""Patch standard terminal files."""
strout.reset()
strerr.reset()
strin.reset()
ui._print = patched_print
ui._raw_input = patched_input
def unpatch():
"""un-patch standard terminal files."""
ui._print = org_print
ui._raw_input = org_input
logger = logging.getLogger('pywiki')
loggingcontext = {'caller_name': 'ui_tests',
'caller_file': 'ui_tests',
'caller_line': 0,
'newline': '\n'}
class UITestCase(unittest.TestCase):
"""UI tests."""
net = False
def setUp(self):
patch()
pywikibot.config.colorized_output = True
pywikibot.config.transliterate = False
pywikibot.ui.transliteration_target = None
pywikibot.ui.encoding = 'utf-8'
def tearDown(self):
unpatch()
def _encode(self, string, encoding='utf-8'):
if sys.version_info[0] > 2:
return string
else:
return string.encode(encoding)
class TestTerminalOutput(UITestCase):
"""Terminal output tests."""
def testOutputLevels_logging_debug(self):
logger.log(DEBUG, 'debug', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def testOutputLevels_logging_verbose(self):
logger.log(VERBOSE, 'verbose', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def testOutputLevels_logging_info(self):
logger.log(INFO, 'info', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'info\n')
def testOutputLevels_logging_stdout(self):
logger.log(STDOUT, 'stdout', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), 'stdout\n')
self.assertEqual(newstderr.getvalue(), '')
def testOutputLevels_logging_input(self):
logger.log(INPUT, 'input', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'input\n')
def testOutputLevels_logging_WARNING(self):
logger.log(WARNING, 'WARNING', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'WARNING: WARNING\n')
def testOutputLevels_logging_ERROR(self):
logger.log(ERROR, 'ERROR', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'ERROR: ERROR\n')
def testOutputLevels_logging_CRITICAL(self):
logger.log(CRITICAL, 'CRITICAL', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'CRITICAL: CRITICAL\n')
def test_output(self):
pywikibot.output('output', toStdout=False)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'output\n')
def test_output_stdout(self):
pywikibot.output('output', toStdout=True)
self.assertEqual(newstdout.getvalue(), 'output\n')
self.assertEqual(newstderr.getvalue(), '')
def test_warning(self):
pywikibot.warning('warning')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'WARNING: warning\n')
def test_error(self):
pywikibot.error('error')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'ERROR: error\n')
def test_log(self):
pywikibot.log('log')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def test_critical(self):
pywikibot.critical('critical')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'CRITICAL: critical\n')
def test_debug(self):
pywikibot.debug('debug', 'test')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def test_exception(self):
class TestException(Exception):
"""Test exception."""
try:
raise TestException('Testing Exception')
except TestException:
pywikibot.exception('exception')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'ERROR: TestException: Testing Exception\n')
def test_exception_tb(self):
class TestException(Exception):
"""Test exception."""
try:
raise TestException('Testing Exception')
except TestException:
pywikibot.exception('exception', tb=True)
self.assertEqual(newstdout.getvalue(), '')
stderrlines = newstderr.getvalue().split('\n')
self.assertEqual(stderrlines[0], 'ERROR: TestException: Testing Exception')
self.assertEqual(stderrlines[1], 'Traceback (most recent call last):')
self.assertEqual(stderrlines[3], " raise TestException('Testing Exception')")
self.assertTrue(stderrlines[4].endswith(': Testing Exception'))
self.assertNotEqual(stderrlines[-1], '\n')
class TestTerminalInput(UITestCase):
"""Terminal input tests."""
input_choice_output = 'question ([A]nswer 1, a[n]swer 2, an[s]wer 3): '
def testInput(self):
newstdin.write('input to read\n')
newstdin.seek(0)
returned = pywikibot.input('question')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'question: ')
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, u'input to read')
def _call_input_choice(self):
rv = pywikibot.input_choice(
'question',
(('answer 1', u'A'),
('answer 2', u'N'),
('answer 3', u'S')),
u'A',
automatic_quit=False)
self.assertEqual(newstdout.getvalue(), '')
self.assertIsInstance(rv, unicode)
return rv
def testInputChoiceDefault(self):
newstdin.write('\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(returned, 'a')
def testInputChoiceCapital(self):
newstdin.write('N\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(), self.input_choice_output)
self.assertEqual(returned, 'n')
def testInputChoiceNonCapital(self):
newstdin.write('n\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(), self.input_choice_output)
self.assertEqual(returned, 'n')
def testInputChoiceIncorrectAnswer(self):
newstdin.write('X\nN\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(),
self.input_choice_output * 2)
self.assertEqual(returned, 'n')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTerminalOutputColorUnix(UITestCase):
"""Terminal output color tests."""
str1 = 'text \03{lightpurple}light purple text\03{default} text'
def testOutputColorizedText(self):
pywikibot.output(self.str1)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'text \x1b[95mlight purple text\x1b[0m text\n')
def testOutputNoncolorizedText(self):
pywikibot.config.colorized_output = False
pywikibot.output(self.str1)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'text light purple text text ***\n')
str2 = ('normal text \03{lightpurple} light purple ' +
'\03{lightblue} light blue \03{previous} light purple ' +
'\03{default} normal text')
def testOutputColorCascade_incorrect(self):
"""Test incorrect behavior of testOutputColorCascade."""
pywikibot.output(self.str2)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'normal text \x1b[95m light purple ' +
'\x1b[94m light blue \x1b[95m light purple ' +
'\x1b[0m normal text\n')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTerminalUnicodeUnix(UITestCase):
"""Terminal output tests for unix."""
def testOutputUnicodeText(self):
pywikibot.output(u'Заглавная_страница')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
self._encode(u'Заглавная_страница\n', 'utf-8'))
def testInputUnicodeText(self):
newstdin.write(self._encode(u'Заглавная_страница\n', 'utf-8'))
newstdin.seek(0)
returned = pywikibot.input(u'Википедию? ')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
self._encode(u'Википедию? ', 'utf-8'))
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, u'Заглавная_страница')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTransliterationUnix(UITestCase):
"""Terminal output transliteration tests."""
def testOutputTransliteratedUnicodeText(self):
pywikibot.ui.encoding = 'latin-1'
pywikibot.config.transliterate = True
pywikibot.output(u'abcd АБГД αβγδ あいうえお')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'abcd \x1b[93mA\x1b[0m\x1b[93mB\x1b[0m\x1b[93mG\x1b[0m'
'\x1b[93mD\x1b[0m \x1b[93ma\x1b[0m\x1b[93mb\x1b[0m\x1b[93mg'
'\x1b[0m\x1b[93md\x1b[0m \x1b[93ma\x1b[0m\x1b[93mi\x1b[0m'
'\x1b[93mu\x1b[0m\x1b[93me\x1b[0m\x1b[93mo\x1b[0m\n')
@unittest.skipUnless(os.name == 'nt', 'requires Windows console')
class WindowsTerminalTestCase(UITestCase):
"""MS Windows terminal tests."""
@classmethod
def setUpClass(cls):
if os.name != 'nt':
raise unittest.SkipTest('requires Windows console')
if not win32api:
raise unittest.SkipTest('requires Windows package pywin32')
if not win32clipboard:
raise unittest.SkipTest('requires Windows package win32clipboard')
if not pywinauto:
raise unittest.SkipTest('requires Windows package pywinauto')
try:
# pywinauto 0.5.0
cls._app = pywinauto.Application()
except AttributeError as e1:
try:
cls._app = pywinauto.application.Application()
except AttributeError as e2:
raise unittest.SkipTest('pywinauto Application failed: %s\n%s'
% (e1, e2))
super(WindowsTerminalTestCase, cls).setUpClass()
@classmethod
def setUpProcess(cls, command):
si = subprocess.STARTUPINFO()
si.dwFlags = subprocess.STARTF_USESTDHANDLES
cls._process = subprocess.Popen(command,
creationflags=subprocess.CREATE_NEW_CONSOLE)
cls._app.connect_(process=cls._process.pid)
# set truetype font (Lucida Console, hopefully)
try:
window = cls._app.window_()
except Exception as e:
cls.tearDownProcess()
raise unittest.SkipTest('Windows package pywinauto could not locate window: %r'
% e)
try:
window.TypeKeys('% {UP}{ENTER}^L{HOME}L{ENTER}', with_spaces=True)
except Exception as e:
cls.tearDownProcess()
raise unittest.SkipTest('Windows package pywinauto could not use window TypeKeys: %r'
% e)
@classmethod
def tearDownProcess(cls):
cls._process.kill()
def setUp(self):
super(WindowsTerminalTestCase, self).setUp()
self.setclip(u'')
def waitForWindow(self):
while not self._app.window_().IsEnabled():
time.sleep(0.01)
def getstdouterr(self):
sentinel = u'~~~~SENTINEL~~~~cedcfc9f-7eed-44e2-a176-d8c73136c185'
# select all and copy to clipboard
self._app.window_().SetFocus()
self.waitForWindow()
self._app.window_().TypeKeys('% {UP}{UP}{UP}{RIGHT}{DOWN}{DOWN}{DOWN}{ENTER}{ENTER}',
with_spaces=True)
while True:
data = self.getclip()
if data != sentinel:
return data
time.sleep(0.01)
def setclip(self, text):
win32clipboard.OpenClipboard()
win32clipboard.SetClipboardData(win32clipboard.CF_UNICODETEXT, unicode(text))
win32clipboard.CloseClipboard()
def getclip(self):
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)
win32clipboard.CloseClipboard()
data = data.split(u'\x00')[0]
data = data.replace(u'\r\n', u'\n')
return data
def sendstdin(self, text):
self.setclip(text.replace(u'\n', u'\r\n'))
self._app.window_().SetFocus()
self.waitForWindow()
self._app.window_().TypeKeys('% {UP}{UP}{UP}{RIGHT}{DOWN}{DOWN}{ENTER}', with_spaces=True)
class TestWindowsTerminalUnicode(WindowsTerminalTestCase):
"""MS Windows terminal unicode tests."""
@classmethod
def setUpClass(cls):
super(TestWindowsTerminalUnicode, cls).setUpClass()
fn = inspect.getfile(inspect.currentframe())
cls.setUpProcess(['python', 'pwb.py', fn, '--run-as-slave-interpreter'])
_manager.connect()
cls.pywikibot = _manager.pywikibot()
@classmethod
def tearDownClass(cls):
del cls.pywikibot
cls.tearDownProcess()
def setUp(self):
super(TestWindowsTerminalUnicode, self).setUp()
self.pywikibot.set_config('colorized_output', True)
self.pywikibot.set_config('transliterate', False)
self.pywikibot.set_config('console_encoding', 'utf-8')
self.pywikibot.set_ui('transliteration_target', None)
self.pywikibot.set_ui('encoding', 'utf-8')
self.pywikibot.cls()
def testOutputUnicodeText_no_transliterate(self):
self.pywikibot.output(u'Заглавная_страница')
self.assertEqual(self.getstdouterr(), u'Заглавная_страница\n')
def testOutputUnicodeText_transliterate(self):
self.pywikibot.set_config('transliterate', True)
self.pywikibot.set_ui('transliteration_target', 'latin-1')
self.pywikibot.output(u'Заглавная_страница')
self.assertEqual(self.getstdouterr(), 'Zaglavnaya_stranica\n')
def testInputUnicodeText(self):
self.pywikibot.set_config('transliterate', True)
self.pywikibot.request_input(u'Википедию? ')
self.assertEqual(self.getstdouterr(), u'Википедию?')
self.sendstdin(u'Заглавная_страница\n')
returned = self.pywikibot.get_input()
self.assertEqual(returned, u'Заглавная_страница')
class TestWindowsTerminalUnicodeArguments(WindowsTerminalTestCase):
"""MS Windows terminal unicode argument tests."""
@classmethod
def setUpClass(cls):
super(TestWindowsTerminalUnicodeArguments, cls).setUpClass()
cls.setUpProcess(['cmd', '/k', 'echo off'])
@classmethod
def tearDownClass(cls):
cls.tearDownProcess()
pass
def testOutputUnicodeText_no_transliterate(self):
self.sendstdin(
u"python -c \"import os, pywikibot; os.system('cls'); "
u"pywikibot.output(u'\\n'.join(pywikibot.handleArgs()))\" "
u"Alpha Bετα Гамма دلتا\n")
lines = []
while len(lines) < 4 or lines[0] != 'Alpha':
lines = self.getstdouterr().split('\n')
time.sleep(1)
# empty line is the new command line
self.assertEqual(lines, [u'Alpha', u'Bετα', u'Гамма', u'دلتا', u''])
class FakeUITest(TestCase):
"""Test case to allow doing uncolorized general UI tests."""
net = False
expected = 'Hello world you! ***'
expect_color = False
ui_class = terminal_interface_base.UI
def setUp(self):
"""Create dummy instances for the test and patch encounter_color."""
super(FakeUITest, self).setUp()
if PY2:
self.stream = io.BytesIO()
else:
self.stream = io.StringIO()
self.ui_obj = self.ui_class()
self._orig_encounter_color = self.ui_obj.encounter_color
self.ui_obj.encounter_color = self._encounter_color
self._index = 0
def tearDown(self):
"""Unpatch the encounter_color method."""
self.ui_obj.encounter_color = self._orig_encounter_color
super(FakeUITest, self).tearDown()
self.assertEqual(self._index,
len(self._colors) if self.expect_color else 0)
def _getvalue(self):
"""Get the value of the stream and also decode it on Python 2."""
value = self.stream.getvalue()
if PY2:
value = value.decode(self.ui_obj.encoding)
return value
def _encounter_color(self, color, target_stream):
"""Patched encounter_color method."""
assert False, 'This method should not be invoked'
def test_no_color(self):
"""Test a string without any colors."""
self._colors = tuple()
self.ui_obj._print('Hello world you!', self.stream)
self.assertEqual(self._getvalue(), 'Hello world you!')
def test_one_color(self):
"""Test a string using one color."""
self._colors = (('red', 6), ('default', 10))
self.ui_obj._print('Hello \03{red}world you!', self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_flat_color(self):
"""Test using colors with defaulting in between."""
self._colors = (('red', 6), ('default', 6), ('yellow', 3), ('default', 1))
self.ui_obj._print('Hello \03{red}world \03{default}you\03{yellow}!',
self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_stack_with_pop_color(self):
"""Test using stacked colors and just poping the latest color."""
self._colors = (('red', 6), ('yellow', 6), ('red', 3), ('default', 1))
self.ui_obj._print('Hello \03{red}world \03{yellow}you\03{previous}!',
self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_stack_implicit_color(self):
"""Test using stacked colors without poping any."""
self._colors = (('red', 6), ('yellow', 6), ('default', 4))
self.ui_obj._print('Hello \03{red}world \03{yellow}you!', self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_one_color_newline(self):
"""Test with trailing new line and one color."""
self._colors = (('red', 6), ('default', 11))
self.ui_obj._print('Hello \03{red}world you!\n', self.stream)
self.assertEqual(self._getvalue(), self.expected + '\n')
class FakeUIColorizedTestBase(TestCase):
"""Base class for test cases requiring that colorized output is active."""
expect_color = True
def setUp(self):
"""Force colorized_output to True."""
super(FakeUIColorizedTestBase, self).setUp()
self._old_config = pywikibot.config2.colorized_output
pywikibot.config2.colorized_output = True
def tearDown(self):
"""Undo colorized_output configuration."""
pywikibot.config2.colorized_output = self._old_config
super(FakeUIColorizedTestBase, self).tearDown()
class FakeUnixTest(FakeUIColorizedTestBase, FakeUITest):
"""Test case to allow doing colorized Unix tests in any environment."""
net = False
expected = 'Hello world you!'
ui_class = terminal_interface_unix.UnixUI
def _encounter_color(self, color, target_stream):
"""Verify that the written data, color and stream are correct."""
self.assertIs(target_stream, self.stream)
expected_color = self._colors[self._index][0]
self._index += 1
self.assertEqual(color, expected_color)
self.assertEqual(len(self.stream.getvalue()),
sum(e[1] for e in self._colors[:self._index]))
class FakeWin32Test(FakeUIColorizedTestBase, FakeUITest):
"""
Test case to allow doing colorized Win32 tests in any environment.
This only patches the ctypes import in the terminal_interface_win32 module.
As the Win32CtypesUI is using the std-streams from another import these will
be unpatched.
"""
net = False
expected = 'Hello world you!'
ui_class = terminal_interface_win32.Win32CtypesUI
def setUp(self):
"""Patch the ctypes import and initialize a stream and UI instance."""
super(FakeWin32Test, self).setUp()
self._orig_ctypes = terminal_interface_win32.ctypes
ctypes = FakeModule.create_dotted('ctypes.windll.kernel32')
ctypes.windll.kernel32.SetConsoleTextAttribute = self._handle_setattr
terminal_interface_win32.ctypes = ctypes
self.stream._hConsole = object()
def tearDown(self):
"""Unpatch the ctypes import and check that all colors were used."""
terminal_interface_win32.ctypes = self._orig_ctypes
super(FakeWin32Test, self).tearDown()
def _encounter_color(self, color, target_stream):
"""Call the original method."""
self._orig_encounter_color(color, target_stream)
def _handle_setattr(self, handle, attribute):
"""Dummy method to handle SetConsoleTextAttribute."""
self.assertIs(handle, self.stream._hConsole)
color = self._colors[self._index][0]
self._index += 1
color = terminal_interface_win32.windowsColors[color]
self.assertEqual(attribute, color)
self.assertEqual(len(self.stream.getvalue()),
sum(e[1] for e in self._colors[:self._index]))
class FakeWin32UncolorizedTest(FakeWin32Test):
"""Test case to allow doing uncolorized Win32 tests in any environment."""
net = False
expected = 'Hello world you! ***'
expect_color = False
def setUp(self):
"""Change the local stream's console to None to disable colors."""
super(FakeWin32UncolorizedTest, self).setUp()
self.stream._hConsole = None
if __name__ == "__main__":
try:
try:
unittest.main()
except SystemExit:
pass
finally:
unpatch()
| mit | 2,106,241,176,758,903,000 | 31.732247 | 98 | 0.627841 | false | 3.822322 | true | false | false |
bruecksen/isimip | isi_mip/climatemodels/forms.py | 1 | 27809 | from django import forms
from django.core.exceptions import ValidationError
from django.forms import inlineformset_factory, ClearableFileInput
from django.utils.safestring import mark_safe
from dateutil.parser import parse
from isi_mip.climatemodels.fields import MyModelSingleChoiceField, MyModelMultipleChoiceField
from isi_mip.climatemodels.models import *
from isi_mip.climatemodels.widgets import MyMultiSelect, MyTextInput, MyBooleanSelect, RefPaperWidget
from isi_mip.contrib.models import Country
ContactPersonFormset = inlineformset_factory(BaseImpactModel, ContactPerson,
extra=1, max_num=2, min_num=1, fields='__all__',
can_delete=False, help_texts='The scientists responsible for performing the simulations for this sector')
class ImpactModelStartForm(forms.ModelForm):
model = forms.ModelChoiceField(queryset=BaseImpactModel.objects.order_by('name'), required=False)
name = forms.CharField(label='New Impact Model', required=False)
sector = forms.ModelChoiceField(queryset=Sector.objects.order_by('name'), required=False)
send_invitation_email = forms.BooleanField(label='Send the invitation email?', required=False, initial=True)
class Meta:
model = BaseImpactModel
fields = ('model', 'name', 'sector')
class BaseImpactModelForm(forms.ModelForm):
region = MyModelMultipleChoiceField(allowcustom=True, queryset=Region.objects, required=True)
class Meta:
model = BaseImpactModel
exclude = ('owners', 'public', 'sector', 'name', 'drkz_folder_name')
widgets = {
'short_description': MyTextInput(textarea=True),
}
class ImpactModelForm(forms.ModelForm):
class Meta:
model = ImpactModel
exclude = ('base_model', 'public', 'simulation_round')
widgets = {
'version': MyTextInput(),
'main_reference_paper': RefPaperWidget(),
'other_references': RefPaperWidget(),
'responsible_person': MyTextInput(),
}
@staticmethod
def _ref_paper(args):
if not args['doi'] and not args['title']:
return None
if args['doi']:
try:
rp = ReferencePaper.objects.get_or_create(doi=args['doi'])[0]
rp.title = args['title']
except ReferencePaper.MultipleObjectsReturned:
rp = ReferencePaper.objects.create(title=args['title'], doi=args['doi'])
else:
try:
rp = ReferencePaper.objects.get_or_create(title=args['title'])[0]
except ReferencePaper.MultipleObjectsReturned:
rp = ReferencePaper.objects.create(title=args['title'], doi=args['doi'])
rp.lead_author = args['lead_author']
rp.journal_name = args['journal_name']
rp.journal_volume = args['journal_volume']
rp.journal_pages = args['journal_pages']
rp.first_published = args['first_published']
rp.save()
return rp
def clean_main_reference_paper(self):
try:
myargs = {
'lead_author': self.data.getlist('main_reference_paper-author')[0],
'title': self.data.getlist('main_reference_paper-title')[0],
'journal_name': self.data.getlist('main_reference_paper-journal')[0],
'doi': self.data.getlist('main_reference_paper-doi')[0],
'journal_volume': self.data.getlist('main_reference_paper-volume')[0] or None,
'journal_pages': self.data.getlist('main_reference_paper-page')[0]
}
try:
myargs['first_published'] = parse(self.data.getlist('main_reference_paper-date')[0])
except:
myargs['first_published'] = None
except:
raise ValidationError('Problems adding the main reference paper')
return self._ref_paper(myargs)
def clean_other_references(self):
rps = []
for i in range(len(self.data.getlist('other_references-title')) - 1):
myargs = {
'lead_author': self.data.getlist('other_references-author')[i],
'title': self.data.getlist('other_references-title')[i],
'journal_name': self.data.getlist('other_references-journal')[i],
'doi': self.data.getlist('other_references-doi')[i],
'journal_volume': self.data.getlist('other_references-volume')[i] or None,
'journal_pages': self.data.getlist('other_references-page')[i]
}
try:
myargs['first_published'] = parse(self.data.getlist('other_references-date')[i])
except:
myargs['first_published'] = None
rp = self._ref_paper(myargs)
rps += [rp] if rp is not None else []
return rps
class TechnicalInformationModelForm(forms.ModelForm):
spatial_aggregation = MyModelSingleChoiceField(allowcustom=True, queryset=SpatialAggregation.objects)
class Meta:
model = TechnicalInformation
exclude = ('impact_model',)
widgets = {
'spatial_resolution': MyMultiSelect(allowcustom=True),
'spatial_resolution_info': MyTextInput(textarea=True),
'temporal_resolution_climate': MyMultiSelect(allowcustom=True),
'temporal_resolution_co2': MyMultiSelect(allowcustom=True),
'temporal_resolution_land': MyMultiSelect(allowcustom=True),
'temporal_resolution_soil': MyMultiSelect(allowcustom=True),
'temporal_resolution_info': MyTextInput(textarea=True),
}
class InputDataInformationModelForm(forms.ModelForm):
simulated_atmospheric_climate_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
observed_atmospheric_climate_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
simulated_ocean_climate_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
observed_ocean_climate_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
emissions_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
socio_economic_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
land_use_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
other_human_influences_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
other_data_sets = MyModelMultipleChoiceField(allowcustom=False, queryset=InputData.objects)
climate_variables = MyModelMultipleChoiceField(allowcustom=False, queryset=ClimateVariable.objects)
class Meta:
model = InputDataInformation
exclude = ('impact_model',)
widgets = {
'climate_variables_info': MyTextInput(textarea=True),
'additional_input_data_sets': MyTextInput(textarea=True),
}
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
simulation_round = instance.impact_model.simulation_round
super(InputDataInformationModelForm, self).__init__(*args, **kwargs)
self.fields['climate_variables'].queryset = ClimateVariable.objects.filter(inputdata__data_type__is_climate_data_type=True, inputdata__simulation_round=simulation_round).distinct()
self.fields['emissions_data_sets'].queryset = InputData.objects.filter(data_type__name='Emissions', simulation_round=simulation_round).distinct()
self.fields['land_use_data_sets'].queryset = InputData.objects.filter(data_type__name='Land use', simulation_round=simulation_round).distinct()
self.fields['observed_atmospheric_climate_data_sets'].queryset = InputData.objects.filter(data_type__name='Observed atmospheric climate', simulation_round=simulation_round).distinct()
self.fields['observed_ocean_climate_data_sets'].queryset = InputData.objects.filter(data_type__name='Observed ocean climate', simulation_round=simulation_round).distinct()
self.fields['other_data_sets'].queryset = InputData.objects.filter(data_type__name='Other', simulation_round=simulation_round).distinct()
self.fields['other_human_influences_data_sets'].queryset = InputData.objects.filter(data_type__name='Other human influences', simulation_round=simulation_round).distinct()
self.fields['simulated_atmospheric_climate_data_sets'].queryset = InputData.objects.filter(data_type__name='Simulated atmospheric climate', simulation_round=simulation_round).distinct()
self.fields['simulated_ocean_climate_data_sets'].queryset = InputData.objects.filter(data_type__name='Simulated ocean climate', simulation_round=simulation_round).distinct()
self.fields['socio_economic_data_sets'].queryset = InputData.objects.filter(data_type__name='Socio-economic', simulation_round=simulation_round).distinct()
class OtherInformationModelForm(forms.ModelForm):
class Meta:
model = OtherInformation
exclude = ('impact_model',)
widgets = {
'exceptions_to_protocol': MyTextInput(textarea=True),
'spin_up': MyBooleanSelect(nullable=False),
'spin_up_design': MyTextInput(textarea=True),
'natural_vegetation_partition': MyTextInput(textarea=True),
'natural_vegetation_dynamics': MyTextInput(textarea=True),
'natural_vegetation_cover_dataset': MyTextInput(),
'management': MyTextInput(textarea=True),
'extreme_events': MyTextInput(textarea=True),
'anything_else': MyTextInput(textarea=True),
}
# SEKTOREN ############################################################
class BaseSectorForm(forms.ModelForm):
generic_fields = []
class Meta:
model = GenericSector
exclude = ('impact_model', 'data')
abstract = True
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
super(BaseSectorForm, self).__init__(*args, **kwargs)
if instance:
sector = instance.impact_model.base_model.sector
self.generic_groups = []
for group in SectorInformationGroup.objects.filter(sector=sector):
fields = []
for field in group.fields.all():
fields.append(field.unique_identifier)
self.generic_fields.append(field.unique_identifier)
self.fields[field.unique_identifier] = forms.CharField(widget=MyTextInput(textarea=True), label=field.name, help_text=field.help_text, required=False, initial='')
if instance.data and field.unique_identifier in instance.data:
field_initial = instance.data[field.unique_identifier]
if field_initial:
self.fields[field.unique_identifier].initial = field_initial
self.generic_groups.append({'name': group.name, 'fields': fields, 'description': group.description})
def clean(self):
cleaned_data_generic = {}
cleaned_data = super(BaseSectorForm, self).clean()
for k in list(cleaned_data.keys()):
if k in self.generic_fields:
cleaned_data_generic[k] = cleaned_data[k]
del cleaned_data[k]
cleaned_data['data'] = cleaned_data_generic
return cleaned_data
def save(self, commit=True):
instance = super(BaseSectorForm, self).save(commit=False)
instance.data = self.cleaned_data['data']
if commit:
instance.save()
return instance
class AgricultureForm(BaseSectorForm):
template = 'edit_agriculture.html'
class Meta:
model = Agriculture
exclude = ('impact_model',)
widgets = {
'crops': MyTextInput(textarea=True),
'land_coverage': MyTextInput(textarea=True),
'planting_date_decision': MyTextInput(textarea=True),
'planting_density': MyTextInput(textarea=True),
'crop_cultivars': MyTextInput(textarea=True),
'fertilizer_application': MyTextInput(textarea=True),
'irrigation': MyTextInput(textarea=True),
'crop_residue': MyTextInput(textarea=True),
'initial_soil_water': MyTextInput(textarea=True),
'initial_soil_nitrate_and_ammonia': MyTextInput(textarea=True),
'initial_soil_C_and_OM': MyTextInput(textarea=True),
'initial_crop_residue': MyTextInput(textarea=True),
'lead_area_development': MyTextInput(textarea=True),
'light_interception': MyTextInput(textarea=True),
'light_utilization': MyTextInput(textarea=True),
'yield_formation': MyTextInput(textarea=True),
'crop_phenology': MyTextInput(textarea=True),
'root_distribution_over_depth': MyTextInput(textarea=True),
'stresses_involved': MyTextInput(textarea=True),
'type_of_water_stress': MyTextInput(textarea=True),
'type_of_heat_stress': MyTextInput(textarea=True),
'water_dynamics': MyTextInput(textarea=True),
'evapo_transpiration': MyTextInput(textarea=True),
'soil_CN_modeling': MyTextInput(textarea=True),
'co2_effects': MyTextInput(textarea=True),
'parameters_number_and_description': MyTextInput(textarea=True),
'calibrated_values': MyTextInput(textarea=True),
'output_variable_and_dataset': MyTextInput(textarea=True),
'spatial_scale_of_calibration_validation': MyTextInput(textarea=True),
'temporal_scale_of_calibration_validation': MyTextInput(textarea=True),
'criteria_for_evaluation': MyTextInput(textarea=True),
}
class ForestsForm(BaseSectorForm):
template = 'edit_forests.html'
upload_parameter_list = forms.CharField(widget= MyTextInput(textarea=True), required=False, label=mark_safe('Please upload a list of your parameters as an attachment (Section 7). The list should include species-specific parameters and other parameters not depending on initialization data including the following information: short name, long name, short explanation, unit, value, see here for an example (<a href="http://www.pik-potsdam.de/4c/web_4c/theory/parameter_table_0514.pdf" target="_blank">parameter_table_0514.pdf</a>)'))
class Meta:
model = Forests
exclude = ('impact_model',)
widgets = {
# Forest Model Set-up Specifications
'initialize_model': MyTextInput(textarea=True),
'data_profound_db': MyTextInput(textarea=True),
'management_implementation': MyTextInput(textarea=True),
'harvesting_simulated': MyTextInput(textarea=True),
'regenerate': MyTextInput(textarea=True),
'unmanaged_simulations': MyTextInput(textarea=True),
'noco2_scenario': MyTextInput(textarea=True),
'leap_years': MyTextInput(textarea=True),
'simulate_minor_tree': MyTextInput(textarea=True),
'nitrogen_simulation': MyTextInput(textarea=True),
'soil_depth': MyTextInput(textarea=True),
'upload_parameter_list': MyTextInput(textarea=True),
'minimum_diameter_tree': MyTextInput(textarea=True),
'model_historically_calibrated': MyTextInput(textarea=True),
'stochastic_element': MyTextInput(textarea=True),
# Forest Model Output Specifications
'initial_state': MyTextInput(textarea=True),
'total_calculation': MyTextInput(textarea=True),
'output_dbh_class': MyTextInput(textarea=True),
'output': MyTextInput(textarea=True),
'output_per_pft': MyTextInput(),
'considerations': MyTextInput(textarea=True),
'dynamic_vegetation': MyTextInput(textarea=True),
'nitrogen_limitation': MyTextInput(textarea=True),
'co2_effects': MyTextInput(textarea=True),
'light_interception': MyTextInput(textarea=True),
'light_utilization': MyTextInput(textarea=True),
'phenology': MyTextInput(textarea=True),
'water_stress': MyTextInput(textarea=True),
'heat_stress': MyTextInput(textarea=True),
'evapotranspiration_approach': MyTextInput(textarea=True),
'rooting_depth_differences': MyTextInput(textarea=True),
'root_distribution': MyTextInput(textarea=True),
'permafrost': MyTextInput(textarea=True),
'closed_energy_balance': MyTextInput(textarea=True),
'soil_moisture_surface_temperature_coupling': MyTextInput(textarea=True),
'latent_heat': MyTextInput(textarea=True),
'sensible_heat': MyTextInput(textarea=True),
'mortality_age': MyTextInput(textarea=True),
'mortality_fire': MyTextInput(textarea=True),
'mortality_drought': MyTextInput(textarea=True),
'mortality_insects': MyTextInput(textarea=True),
'mortality_storm': MyTextInput(textarea=True),
'mortality_stochastic_random_disturbance': MyTextInput(textarea=True),
'mortality_other': MyTextInput(textarea=True),
'mortality_remarks': MyTextInput(textarea=True),
'nbp_fire': MyTextInput(textarea=True),
'nbp_landuse_change': MyTextInput(textarea=True),
'nbp_harvest': MyTextInput(textarea=True),
'nbp_other': MyTextInput(textarea=True),
'nbp_comments': MyTextInput(textarea=True),
'list_of_pfts': MyTextInput(textarea=True),
'pfts_comments': MyTextInput(textarea=True),
'assimilation': MyTextInput(textarea=True),
'respiration': MyTextInput(textarea=True),
'carbon_allocation': MyTextInput(textarea=True),
'regeneration_planting': MyTextInput(textarea=True),
'soil_water_balance': MyTextInput(textarea=True),
'carbon_nitrogen_balance': MyTextInput(textarea=True),
'feedbacks_considered': MyTextInput(textarea=True),
}
class BiomesForm(BaseSectorForm):
template = 'edit_biomes.html'
class Meta:
model = Biomes
exclude = ('impact_model',)
widgets = {
'output': MyTextInput(textarea=True),
'output_per_pft': MyTextInput(),
'considerations': MyTextInput(textarea=True),
'dynamic_vegetation': MyTextInput(textarea=True),
'nitrogen_limitation': MyTextInput(textarea=True),
'co2_effects': MyTextInput(textarea=True),
'light_interception': MyTextInput(textarea=True),
'light_utilization': MyTextInput(textarea=True),
'phenology': MyTextInput(textarea=True),
'water_stress': MyTextInput(textarea=True),
'heat_stress': MyTextInput(textarea=True),
'evapotranspiration_approach': MyTextInput(textarea=True),
'rooting_depth_differences': MyTextInput(textarea=True),
'root_distribution': MyTextInput(textarea=True),
'permafrost': MyTextInput(textarea=True),
'closed_energy_balance': MyTextInput(textarea=True),
'soil_moisture_surface_temperature_coupling': MyTextInput(textarea=True),
'latent_heat': MyTextInput(textarea=True),
'sensible_heat': MyTextInput(textarea=True),
'mortality_age': MyTextInput(textarea=True),
'mortality_fire': MyTextInput(textarea=True),
'mortality_drought': MyTextInput(textarea=True),
'mortality_insects': MyTextInput(textarea=True),
'mortality_storm': MyTextInput(textarea=True),
'mortality_stochastic_random_disturbance': MyTextInput(textarea=True),
'mortality_other': MyTextInput(textarea=True),
'mortality_remarks': MyTextInput(textarea=True),
'nbp_fire': MyTextInput(textarea=True),
'nbp_landuse_change': MyTextInput(textarea=True),
'nbp_harvest': MyTextInput(textarea=True),
'nbp_other': MyTextInput(textarea=True),
'nbp_comments': MyTextInput(textarea=True),
'list_of_pfts': MyTextInput(textarea=True),
'pfts_comments': MyTextInput(textarea=True),
'compute_soil_carbon': MyTextInput(textarea=True),
'seperate_soil_carbon': MyTextInput(textarea=True),
'harvest_npp_crops': MyTextInput(textarea=True),
'treat_biofuel_npp': MyTextInput(textarea=True),
'npp_litter_output': MyTextInput(textarea=True),
'simulate_bioenergy': MyTextInput(textarea=True),
'transition_cropland': MyTextInput(textarea=True),
'simulate_pasture': MyTextInput(textarea=True),
}
class BiodiversityForm(BaseSectorForm):
template = 'edit_biodiversity.html'
class Meta:
model = Biodiversity
exclude = ('impact_model',)
widgets = {
'model_algorithm': MyMultiSelect(allowcustom=False),
'explanatory_variables': MyTextInput(textarea=True),
'response_variable': MyMultiSelect(allowcustom=False),
'additional_information_response_variable': MyTextInput(textarea=True),
'distribution_response_variable': MyMultiSelect(allowcustom=False),
'parameters': MyTextInput(textarea=True),
'additional_info_parameters': MyTextInput(textarea=True),
'software_function': MyMultiSelect(allowcustom=False),
'software_package': MyMultiSelect(allowcustom=False),
'software_program': MyTextInput(textarea=True),
'model_output': MyMultiSelect(allowcustom=False),
'additional_info_model_output': MyTextInput(textarea=True),
}
class EnergyForm(BaseSectorForm):
template = 'edit_energy.html'
class Meta:
model = Energy
exclude = ('impact_model',)
widgets = {
'model_type': MyTextInput(textarea=True),
'temporal_extent': MyTextInput(textarea=True),
'temporal_resolution': MyTextInput(textarea=True),
'data_format_for_input': MyTextInput(textarea=True),
'impact_types_energy_demand': MyTextInput(textarea=True),
'impact_types_temperature_effects_on_thermal_power': MyTextInput(textarea=True),
'impact_types_weather_effects_on_renewables': MyTextInput(textarea=True),
'impact_types_water_scarcity_impacts': MyTextInput(textarea=True),
'impact_types_other': MyTextInput(textarea=True),
'output_energy_demand': MyTextInput(textarea=True),
'output_energy_supply': MyTextInput(textarea=True),
'output_water_scarcity': MyTextInput(textarea=True),
'output_economics': MyTextInput(textarea=True),
'output_other': MyTextInput(textarea=True),
'variables_not_directly_from_GCMs': MyTextInput(textarea=True),
'response_function_of_energy_demand_to_HDD_CDD': MyTextInput(textarea=True),
'factor_definition_and_calculation': MyTextInput(textarea=True),
'biomass_types': MyTextInput(textarea=True),
'maximum_potential_assumption': MyTextInput(textarea=True),
'bioenergy_supply_costs': MyTextInput(textarea=True),
'socioeconomic_input': MyTextInput(textarea=True),
}
class MarineEcosystemsForm(BaseSectorForm):
template = 'edit_marine.html'
class Meta:
model = MarineEcosystems
exclude = ('impact_model',)
widgets = {
'defining_features': MyTextInput(textarea=True),
'spatial_scale': MyTextInput(),
'spatial_resolution': MyTextInput(),
'temporal_scale': MyTextInput(),
'temporal_resolution': MyTextInput(),
'taxonomic_scope': MyTextInput(),
'vertical_resolution': MyTextInput(),
'spatial_dispersal_included': MyTextInput(),
'fishbase_used_for_mass_length_conversion': MyTextInput(),
}
class WaterForm(BaseSectorForm):
template = 'edit_water.html'
class Meta:
model = Water
exclude = ('impact_model',)
widgets = {
'technological_progress': MyTextInput(textarea=True),
'soil_layers': MyTextInput(textarea=True),
'water_use': MyTextInput(textarea=True),
'water_sectors': MyTextInput(textarea=True),
'routing': MyTextInput(textarea=True),
'routing_data': MyTextInput(textarea=True),
'land_use': MyTextInput(textarea=True),
'dams_reservoirs': MyTextInput(textarea=True),
'calibration': MyBooleanSelect(nullable=True),
'calibration_years': MyTextInput(),
'calibration_dataset': MyTextInput(),
'calibration_catchments': MyTextInput(),
'vegetation': MyBooleanSelect(nullable=True),
'vegetation_representation': MyTextInput(textarea=True),
"methods_evapotranspiration": MyTextInput(textarea=True),
'methods_snowmelt': MyTextInput(textarea=True),
}
class GenericSectorForm(BaseSectorForm):
template = 'edit_generic_sector.html'
class Meta:
model = GenericSector
exclude = ('impact_model', 'data')
def get_sector_form(sector):
mapping = {
'agriculture': AgricultureForm,
'agroeconomicmodelling': GenericSectorForm,
'biodiversity': BiodiversityForm,
'biomes': BiomesForm,
'coastalinfrastructure': GenericSectorForm,
'computablegeneralequilibriummodelling': GenericSectorForm,
'energy': EnergyForm,
'forests': ForestsForm,
'health': GenericSectorForm,
'marineecosystemsglobal': MarineEcosystemsForm,
'marineecosystemsregional': MarineEcosystemsForm,
'permafrost': GenericSectorForm,
'waterglobal': WaterForm,
'waterregional': WaterForm,
'genericsector': GenericSectorForm,
}
return mapping[sector.class_name.lower()]
class ContactInformationForm(forms.Form):
name = forms.CharField(label='Your name', max_length=60, required=False, widget=forms.TextInput(attrs={'readonly': 'readonly'}), help_text='If you want to change the contact person or add a new contact person, please contact [email protected]')
email = forms.EmailField(label='Your email adress', required=True)
institute = forms.CharField(max_length=500, required=False)
country = forms.ModelChoiceField(queryset=Country.objects.all(), required=False, empty_label='-------')
class AttachmentModelForm(forms.ModelForm):
class Meta:
model = Attachment
exclude = ('impact_model',)
widgets = {
'attachment1': ClearableFileInput,
'attachment1_description': MyTextInput(),
'attachment2': ClearableFileInput,
'attachment2_description': MyTextInput(),
'attachment3': ClearableFileInput,
'attachment3_description': MyTextInput(),
'attachment4': ClearableFileInput,
'attachment4_description': MyTextInput(),
'attachment5': ClearableFileInput,
'attachment5_description': MyTextInput(),
}
class DataConfirmationForm(forms.Form):
terms = forms.BooleanField(required=True)
license = forms.ChoiceField(required=True, choices=(('CC BY 4.0', 'CC BY 4.0'), ('other', 'other')))
other_license_name = forms.CharField(required=False)
correct = forms.BooleanField(required=True)
| mit | -8,307,859,549,264,177,000 | 49.378623 | 536 | 0.647272 | false | 4.066832 | false | false | false |
rosswhitfield/mantid | Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/IndirectTransmissionMonitorTest.py | 3 | 1609 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
import os
import mantid
from mantid.simpleapi import *
class IndirectTransmissionMonitorTest(unittest.TestCase):
def setUp(self):
self._sample_workspace = 'IndirectTransmissionMonitorTest_sample'
self._can_workspace = 'IndirectTransmissionMonitorTest_can'
Load(Filename='IRS26176.RAW', OutputWorkspace=self._sample_workspace)
Load(Filename='IRS26173.RAW', OutputWorkspace=self._can_workspace)
self.kwargs = {}
self.kwargs['SampleWorkspace'] = self._sample_workspace
self.kwargs['CanWorkspace'] = self._can_workspace
def test_basic(self):
trans_workspace = IndirectTransmissionMonitor(**self.kwargs)
self.assertTrue(isinstance(trans_workspace, mantid.api.WorkspaceGroup), msg='Result should be a workspace group')
self.assertEqual(trans_workspace.size(), 3, msg='Transmission workspace group should have 3 workspaces: sample, can and transfer')
expected_names = set()
expected_names.add(self._sample_workspace + '_Can')
expected_names.add(self._sample_workspace + '_Sam')
expected_names.add(self._sample_workspace + '_Trans')
self.assertEqual(set(trans_workspace.getNames()), expected_names)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -3,429,622,365,907,997,700 | 38.243902 | 138 | 0.708515 | false | 3.849282 | true | false | false |
talavis/kimenu | parser.py | 1 | 11741 | #!/usr/bin/env python3
# Copyright (c) 2014-2020, Linus Östberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of kimenu nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Parsers of the menu pages for the restaurants at Karolinska Institutet
'''
import datetime
from datetime import date
import re
import sys
import requests
from bs4 import BeautifulSoup
from collections import defaultdict
def restaurant(func):
"""
Decorator to use for restaurants.
"""
def helper(res_data):
data = {'title': res_data['name'],
'location': res_data['campus'],
'url': res_data['url'],
'map_url': res_data['osm']}
try:
data.update(func(res_data))
except Exception as err:
sys.stderr.write(f'E in {func.__name__}: {err}\n')
data.update({'menu': []})
pass
return data
helper.__name__ = func.__name__
helper.__doc__ = func.__doc__
return helper
def get_parser(url: str) -> BeautifulSoup:
"""
Request page and create Beautifulsoup object
"""
page_req = requests.get(url)
if page_req.status_code != 200:
raise IOError('Bad HTTP responce code')
return BeautifulSoup(page_req.text, 'html.parser')
def fix_bad_symbols(text):
'''
HTML formatting of characters
'''
text = text.replace('è', 'è')
text = text.replace('ä', 'ä')
text = text.replace('Ã', 'Ä')
text = text.replace('Ã', 'Ä')
text = text.replace('ö', 'ö')
text = text.replace('é', 'é')
text = text.replace('Ã¥', 'å')
text = text.replace('Ã
', 'Å')
text = text.strip()
return text
### date management start ###
def get_day():
'''
Today as digit
'''
return date.today().day
def get_monthdigit():
'''
Month as digit
'''
return date.today().month
def get_month():
'''
Month name
'''
months = {1: 'januari', 2: 'februari', 3: 'mars', 4: 'april',
5: 'maj', 6: 'juni', 7: 'juli', 8: 'augusti',
9: 'september', 10: 'oktober', 11: 'november', 12: 'december'}
return months[get_monthdigit()]
def get_week():
'''
Week number
'''
return date.today().isocalendar()[1]
def get_weekday(lang='sv', tomorrow=False):
'''
Day name in swedish(sv) or english (en)
'''
wdigit = get_weekdigit()
if tomorrow:
wdigit += 1
if lang == 'sv':
weekdays = {0: 'måndag', 1: 'tisdag', 2: 'onsdag', 3: 'torsdag',
4: 'fredag', 5: 'lördag', 6: 'söndag', 7: 'måndag'}
if lang == 'en':
weekdays = {0: 'monday', 1: 'tuesday', 2: 'wednesday', 3: 'thursday',
4: 'friday', 5: 'saturday', 6: 'sunday', 7: 'monday'}
return weekdays[wdigit]
def get_weekdigit():
'''
Get digit for week (monday = 0)
'''
return date.today().weekday()
def get_year():
'''
Year as number
'''
return date.today().year
### date management end ###
### parsers start ###
@restaurant
def parse_bikupan(res_data: dict) -> dict:
'''
Parse the menu of Restaurang Bikupan
'''
def fmt_paragraph(p):
return p.get_text().strip().replace('\n', ' ')
def find_todays_menu(menus):
today = datetime.datetime.today()
today = (today.month, today.day)
for day_menu in menus:
# We expect day to contain text similar to `Måndag 10/2`
date = day_menu.find('h6').text.split(' ')[1]
day, month = date.split('/')
if (int(month), int(day)) == today:
menu = list()
# Bikupan has both English and Swedish, we are only showing Swedish:
courses = defaultdict(list)
for p in day_menu.find_all('p'):
if 'class' in p.attrs and p['class'][0] == 'eng-meny':
courses['english'].append(p)
else:
courses['swedish'].append(p)
for sv in courses['swedish']:
menu.append(fmt_paragraph(sv))
return menu
raise Exception("Can't find today's menu")
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
menus = soup.find_all('div', {'class': 'menu-item'})
menu = list(find_todays_menu(menus))
for course in menu:
data['menu'].append(course)
return data
@restaurant
def parse_dufva(res_data):
'''
Parse the menu of Sven Dufva
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
relevant = soup.find("div", {"id": "post"})
menu_data = relevant.get_text().split('\n')
dag = get_weekday()
started = False
for line in menu_data:
if not line:
continue
if line.lower() == f"- {dag} -":
started = True
continue
if started:
if line[0] != '-':
data['menu'].append(line.strip())
else:
break
return data
@restaurant
def parse_glada(res_data):
'''
Parse the menu of Glada restaurangen
'''
data = {'menu': []}
# No way I'll parse this one. If anyone actually wants to, I'd be happy to accept a patch.
return data
@restaurant
def parse_haga(res_data):
'''
Print a link to the menu of Haga gatukök
'''
return {'menu': []}
@restaurant
def parse_hjulet(res_data):
'''
Parse the menu of Restaurang Hjulet
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
passed = False
for header in soup.find_all('h3'):
if header.find(text=re.compile(f'MENY VECKA {get_week()}')):
passed = True
# Will fail if the day is in a non-menu paragraph
if passed:
menu = soup.find('pre')
correct_part = False
for menu_row in menu:
if get_weekday().upper() in str(menu_row):
correct_part = True
continue
if get_weekday(tomorrow=True).upper() in str(menu_row):
break
if correct_part:
data['menu'] += [entry
for entry
in str(menu_row).strip().replace('\r', '').split('\n')
if entry]
return data
@restaurant
def parse_hubben(res_data):
'''
Parse the menu of Restaurang Hubben
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
days = soup.find_all("div", {"class": "day"})
current = days[get_weekdigit()]
dishes = current.find_all('div', {'class': 'element description col-md-4 col-print-5'})
for dish in dishes:
data['menu'].append(dish.get_text().strip().replace('\n', ' '))
return data
@restaurant
def parse_jons(res_data):
'''
Parse the menu of Jöns Jacob
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
days = soup.find('table', {'class':'table lunch_menu animation'})
day = days.find('tbody', {'class':'lunch-day-content'})
dishes = day.find_all('td', {'class':'td_title'})
data['menu'] += [dish.text.strip() for dish in dishes if dish.text.strip()]
return data
@restaurant
def parse_jorpes(res_data):
'''
Parse the menu of Resturang Jorpes
'''
data = {'menu': []}
return data
@restaurant
def parse_livet(res_data):
'''
Parse the menu of Livet
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
started = False
for par in soup.find_all(('h3', 'p')):
if started:
if par.find(text=re.compile(get_weekday(tomorrow=True).capitalize())):
break
if par.find(text=re.compile('[Pp]ersonuppgifterna')):
break
text = par.find(text=True, recursive=False)
if text:
data['menu'].append(text)
continue
if par.find(text=re.compile(get_weekday().capitalize())):
started = True
return data
@restaurant
def parse_nanna(res_data):
'''
Parse the menu of Nanna Svartz
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
menu_part = soup.find_all('div', {'class': 'entry-content'})[0]
current_week = False
for tag in menu_part.find_all('strong'):
if tag.find(text=re.compile(r'MATSEDEL V\.' + str(get_week()))):
current_week = True
break
if current_week:
started = False
dishes = []
for par in menu_part.find_all(('li', 'strong')):
if started:
if (par.find(text=re.compile(get_weekday(tomorrow=True).capitalize())) or
par.find(text=re.compile(r'^Priser'))):
break
# Since they mess up the page now and then,
# day may show up twice because it is both <li> and <strong>
if par.find(text=re.compile(get_weekday().capitalize())):
continue
dish_text = par.text.replace('\xa0', '')
if dish_text:
dishes.append(dish_text)
if par.find(text=re.compile(get_weekday().capitalize())):
started = True
data['menu'] = dishes[::2] # get rid of entries in English
return data
@restaurant
def parse_rudbeck(res_data):
'''
Parse the menu of Bistro Rudbeck
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
days = soup.find_all('div', {'class':'container-fluid no-print'})
day = days[get_weekdigit()]
dishes = day.find_all('span')[3:]
for dish in dishes:
data['menu'].append(dish.get_text().strip())
return data
@restaurant
def parse_svarta(res_data):
'''
Parse the menu of Svarta Räfven
'''
return {'menu': []}
@restaurant
def parse_tallrik(res_data):
'''
Parse the menu of Tallriket
'''
data = {'menu': []}
soup = get_parser(res_data['menu_url'])
days = soup.find_all('div', {'class':'container-fluid no-print'})
day = days[get_weekdigit()]
dishes = day.find_all('span')[3:]
for dish in [x for x in dishes if x.get_text().strip() != '']:
data['menu'].append(dish.get_text().strip())
return data
| bsd-3-clause | 4,986,862,459,494,774,000 | 26.942721 | 94 | 0.566963 | false | 3.614696 | false | false | false |
0--key/lib | portfolio/2013_OrSys/stuff/scratches.py | 1 | 7606 | # MySQL unneccesary functions:
def insertNewSupplier(form_data, userID): # @@
"""All necessary convertations, validations and insertions there"""
raw_data = {} # a single dictionary
# lets prepare data to processing
iDkeys = [
'supplier', 'line1', 'city', 'province', 'zip',
'firstname', 'lastname', 'email', 'phone', 'fax'
] # later add preference & etc.
for i in iDkeys:
raw_data.update({i: form_data[i]})
# now raw_data filled
# input data validation
(data_valid, ins_data, msg) = inputDataValidator(raw_data)
msg.update({'ins': False})
if data_valid: # --<insertion case
db = MySQLdb.connect(passwd=db_password, db=db_name, user='orsys')
MySQL_c = db.cursor()
supplier = ins_data['supplier']
address_id = ins_data['address_id']
contact_id = ins_data['contact_id']
try:
MySQL_c.execute(
"""INSERT INTO suppliers (supplier_name, address_id,
contact_id) VALUES (%s, %s, %s)""",
(supplier, address_id, contact_id))
msg.update({'ins': True})
except:
logging.info('Insertions failed, supplier=%s,\
address_id=%s, contact_id=%s' % (supplier, address_id, contact_id))
#
db.commit()
db.close()
else:
logging.info('Data not valid for insertion: %s' % (msg,))
return msg
def inputDataValidator(raw_data_dict): # @@
msg = {}
data_for_insertion = {}
val_result = ()
db = MySQLdb.connect(passwd=db_password, db=db_name, user='orsys')
MySQL_c = db.cursor()
data_dict = sanitizer(raw_data_dict) # sanitize it
# check up supplier
supplier = data_dict['supplier']
if supplier:
MySQL_c.execute(
"""SELECT id FROM suppliers WHERE supplier_name=%s""",
(supplier,))
if MySQL_c.fetchone(): # this supplier is already exists in DB
msg.update({'s_name': 'already exists'}) # <-- update case
data_for_insertion.update({'supplier': supplier})
val_result = False
else: # <-- insert case
data_for_insertion.update({'supplier': supplier})
val_result = True
else: # <-- empty field case:
msg.update({'s_name': 'empty'})
val_result = False
data_for_insertion.update({'address_id': 1}) # address_id})
data_for_insertion.update({'contact_id': 1}) # clerk_id})
result = (val_result, data_for_insertion, msg)
db.commit()
db.close()
return result
# order_composition filler:
SQLite3_c.execute(
'SELECT Order_Number, Item_SKU, Item_Price, Item_Qty_Ordered \
FROM orders')
raw_item_data = SQLite3_c.fetchall()
prep_data = []
for i in raw_item_data:
(o_number, sku, price, qty) = i
MySQL_c.execute("""SELECT id FROM orders WHERE magento_id=%s""",
(o_number,))
o_id = int(MySQL_c.fetchone()[0])
MySQL_c.execute("""SELECT id FROM products WHERE sku=%s""",
(sku,))
p_id = int(MySQL_c.fetchone()[0])
prep_data.append((o_id, p_id, price.split('$')[-1], qty))
print prep_data
MySQL_c.executemany(
""" INSERT INTO order_composition (order_id, product_id,
price, qty) VALUES (%s, %s, %s, %s)""", prep_data)
# this is orders table filler
SQLite3_c.execute(
'select Order_Number,Order_Date, Customer_Name, \
Shipping_Phone_Number, Shipping_Street from orders'
)
raw_orders = set(SQLite3_c.fetchall())
orders = list(raw_orders)
prepared_data = []
for i in orders:
(m_num, m_date, c_name, p_num, street) = i
# lets convert date into MySQL format:
raw_date, raw_time = m_date.split()
time = raw_time + ':00'
date = '-'.join(raw_date.split('/')[::-1])
m_date = date + ' ' + time
# lets find foreing keys:
MySQL_c.execute("""SELECT id FROM customers WHERE customer_name=%s""",
(c_name,))
customer_id = int(MySQL_c.fetchone()[0])
MySQL_c.execute("""SELECT id FROM phones WHERE phone_num=%s""",
(p_num,))
phone_id = int(MySQL_c.fetchone()[0])
MySQL_c.execute("""SELECT id FROM addresses WHERE line1=%s""",
(street,))
address_id = int(MySQL_c.fetchone()[0])
print (
m_num, m_date, c_name, customer_id, p_num, phone_id,
street, address_id
)
prepared_data.append(
(int(m_num), customer_id, address_id, phone_id, m_date))
MySQL_c.executemany(
"""INSERT INTO orders (magento_id, customer_id, shipping_address_id,
shipping_phone_id, magento_time) VALUES (%s, %s, %s, %s, %s)""",
prepared_data)
#?
def phoneFiller(self, raw_phone):
# extract significant parts:
if len(raw_phone) == 8: # it's a bold phone number
# Filling addresses table:
SQLite3_c.execute(
"""SELECT Shipping_Street, Shipping_Zip, Shipping_City, Shipping_State_Name, \
Shipping_Country_Name FROM orders"""
)
address_data = set(SQLite3_c.fetchall())
MySQL_c.executemany(
"""INSERT INTO addresses (line1, zip, city, province, country)
VALUES (%s, %s, %s,%s, %s)""", address_data
)
# - #
# typical MySQL interaction: filling products table
SQLite3_c.execute('SELECT Item_Name, Item_SKU from orders')
product_data = SQLite3_c.fetchall()
inserted_sku = []
prepared_data = []
for i in product_data:
if i[1] not in inserted_sku:
prepared_data.append((None, i[0], i[1]))
inserted_sku.append(i[1])
print prepared_data
MySQL_c.executemany(
"""INSERT INTO products (id, item_name, sku) VALUES (%s, %s, %s)""",
prepared_data)
# - #
# this snippet fills data from csv into SQLite3
csv_file = open('orders.csv', 'rU')
o = csv.reader(csv_file)
for i in o:
c.execute('INSERT INTO orders VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,\
?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,\
?)', tuple(i))
# - #
# check up address
line1 = data_dict['line1'] # -- four variables there
city = data_dict['city']
province = data_dict['province']
postal_zip = data_dict['zip']
#
if line1:
MySQL_c.execute(
"""SELECT id FROM addresses WHERE line1=%s""",
(line1,))
if MySQL_c.fetchone(): # this address is well known
address_id = MySQL_c.fetchone()[0]
else: # the new one
msg.update({'line1': 'new insertion'})
MySQL_c.execute(
"""INSERT INTO addresses (line1, city, province, zip)
VALUES (%s, %s, %s, %s)""",
(line1, city, province, postal_zip))
address_id = MySQL_c.lastrowid
else: # empty line1 case
msg.update({'line1': 'empty'})
MySQL_c.execute(
"""INSERT INTO addresses (line1, city, province, zip)
VALUES (%s, %s, %s, %s)""",
(line1, city, province, postal_zip))
address_id = MySQL_c.lastrowid
# check up clerk
c_first_name = data_dict['firstname']
c_last_name = data_dict['lastname']
email = data_dict['email']
phone = data_dict['phone']
fax = data_dict['fax']
# the main condition:
if (email or phone) or (email and phone):
# check it up
MySQL_c.execute(
"""SELECT id FROM clerks WHERE email=%s""",
(email,))
clerk_id = MySQL_c.fetchone()
if clerk_id: # this email is well known already
#
else: # it's a new email
#
else: # it's a deviation
msg.update({'contact': 'unknown communication method'})
# - #
| apache-2.0 | 1,865,513,079,236,741,600 | 32.804444 | 82 | 0.570208 | false | 3.334502 | false | false | false |
FabienGreard/DetectCocaPy | robotCoke/component.py | 1 | 1417 | # Class to for the global value of a component
class RobotComponent(object):
pin1 = None
pin2 = None
position = None
def __init__(self, board, pin1, pin2=None, position=None):
self.pin1 = board.get_pin(pin1)
if pin2 != None:
self.pin2 = board.get_pin(pin2)
if position != None:
self.position = position
# class for the motors actions
class Motor(RobotComponent):
def forward(self):
self.pin1.write(1)
self.pin2.write(0)
def backward(self):
self.pin1.write(0)
self.pin2.write(1)
def left(self):
if self.position == "left":
self.backward()
elif self.position == "rigth":
self.forward()
def rigth(self):
if self.position == "left":
self.forward()
elif self.position == "rigth":
self.backward()
def stop(self):
self.pin1.write(0)
self.pin2.write(0)
# class for the magnet actions
class Magnet(RobotComponent):
def on(self):
print ("Magnet on")
for i in range(0,50):
self.pin1.write(1)
self.pin2.write(1)
def off(self):
print ("Magnet off")
for i in range(0,50):
self.pin1.write(0)
self.pin2.write(0)
class Servo(RobotComponent):
def move(self, value):
print (value)
self.pin1.write(value)
| gpl-3.0 | -4,035,162,867,894,250,500 | 22.616667 | 62 | 0.553987 | false | 3.490148 | false | false | false |
hetica/bentools | modules/vennFromKad/vennFromKad.py | 1 | 2236 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os
__appname__ = "vennFromKad"
__licence__ = "none"
__version__ = "0.1"
__author__ = "Benoit Guibert <[email protected]>"
__shortdesc__ = "Build a file for a venn diagram from kad output"
__opts__ = []
def main(parent):
args = argsChk(parent)
for file in args:
venn_list = buildVennData(file)
setFile(file, venn_list)
# print("App: ", parent)
# print("Arguments:")
def buildVennData(file):
with open(file) as stream:
venn_list = {}
header = stream.readline().split(';')
for line in stream:
name = ""
sline = line.split('\t')[1:]
#print(sline)
if len(sline) > 0:
try:
for i,item in enumerate(sline):
if i == 0:
name += item.split('|')[0]
else:
name += "_" + item.split('|')[0]
except IndexError:
pass
if name in venn_list:
venn_list[name] += 1
else:
venn_list[name] = 1
return venn_list
def setFile(file, venn_list):
#print(venn_list)
venn_table = ""
for k,v in venn_list.items():
venn_table += "{}\t{}\n".format(k,v)
print(venn_table, end="")
def argsChk(parent):
args = sys.argv[1:] if __appname__ in sys.argv[0] else sys.argv[2:]
if "-h" in args:
__opts__.append("-h")
args.remove("-h")
helpme(parent)
sys.exit()
if len(args) < 1:
helpme(parent)
sys.exit()
return args
def helpme(parent):
opts = " -h\t: help\n"
print("\n{}\n".format(__shortdesc__))
if parent == __appname__:
print("Usage: {} <arguments>".format(__appname__))
print(" {} -h\n".format(__appname__))
print(opts)
else:
print("Usage: {} {} [-h] <arguments>".format(parent, __appname__))
print(" {} {} -h\n".format(parent, __appname__))
print(opts)
if __name__ == "__main__":
main(__appname__)
| gpl-3.0 | -2,063,184,588,625,783,600 | 26.268293 | 76 | 0.457066 | false | 3.47205 | false | false | false |
moul/alfred-workflow-pingport | src/workflow.py | 1 | 3528 | # -*- coding: utf-8 -*-
import os
import ConfigParser
import time
import socket
import subprocess
import alfred
def tcp_port_status(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
try:
s.connect((host, int(port)))
s.shutdown(2)
return True
except:
return False
def tcp_port_ping(host, port):
t = time.time()
if tcp_port_status(host, port):
diff = time.time() - t
return True, diff
return False, None
class PingportWorkflow(alfred.AlfredWorkflow):
_reserved_words = []
def __init__(self, max_results=20):
self.max_results = max_results
def command_autocomplete_iter(self, query):
args = query.rstrip().split()
if len(query.lstrip()) and query[-1] == ' ':
args.append('')
if len(args) in (0, 1):
valids = 0
for host in ('localhost', '127.0.0.1'):
item = self.item(title=host,
description='pingport {} ...'.format(host),
uid=host,
autocomplete=True, arg='{} '.format(host),
match=query, ignore=True)
if item:
valids += 1
yield item
if not valids:
yield self.item(title=query,
uid=query,
description='pingport {} ...'.format(query),
autocomplete=False, arg=query, ignore=True)
elif len(args) == 2:
valids = 0
for port in (22, 80, 443):
sub_query = '{} {}'.format(args[0], port)
item = self.item(title=sub_query,
uid=sub_query,
description='pingport {}'.format(sub_query),
autocomplete=True, arg='{} '.format(sub_query),
match=query, ignore=True)
if item:
valids += 1
yield item
if not valids:
yield self.item(title=query,
uid=query,
description='pingport {}'.format(query),
autocomplete=True, arg='{} '.format(query),
ignore=True)
elif len(args) == 3:
host, port = args[:2]
status, latency = tcp_port_ping(host, port)
if status:
description = '{}:{} TCP port replied in ' \
'{:.2f}ms.'.format(host, port, latency * 1000)
yield self.item(title=query, uid=query,
description=description, autocomplete=True,
arg=query, match=query, ignore=True)
else:
description = '{}:{} TCP port is closed.'.format(host, port)
yield self.item(title=query, uid=query,
description=description, autocomplete=True,
arg=query, match=query, ignore=True)
def do_command_autocomplete(self, query):
self.write_items(self.command_autocomplete_iter(query))
def main(action, query):
pingport = PingportWorkflow()
pingport.route_action(action, query)
if __name__ == "__main__":
main(action=alfred.args()[0], query=alfred.args()[1])
| mit | 3,071,026,989,103,077,400 | 33.930693 | 80 | 0.470238 | false | 4.460177 | false | false | false |
siggame/Creer | creer/utilities.py | 1 | 2052 | import re
import os
import collections
import operator
def extend(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = extend(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def list_dirs(path):
folders = []
while path != "" and path != None:
path, folder = os.path.split(path)
if folder != "":
folders.append(folder)
else:
if path!="":
folders.append(path)
break
folders.reverse()
return folders
def uncapitalize(s):
return s[:1].lower() + s[1:] if s else ''
def extract_str(raw_string, start_marker, end_marker):
start = raw_string.index(start_marker) + len(start_marker)
end = raw_string.index(end_marker, start)
return raw_string[start:end]
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camel_case_to_underscore(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
def camel_case_to_hyphenate(name):
s1 = first_cap_re.sub(r'\1-\2', name)
return all_cap_re.sub(r'\1-\2', s1).lower()
def copy_dict(source_dict, diffs):
result=dict(source_dict) # Shallow copy
result.update(diffs)
return result
def sort_dict_keys(d):
return sorted(d)
def sort_dict_values(d):
return sorted(d.items(), key=operator.itemgetter(0))
def upcase_first(s):
return s[0].upper() + s[1:]
def lowercase_first(s):
return s[0].lower() + s[1:]
def human_string_list(strs, conjunction='or'):
n = len(strs)
if n == 0:
return ''
if n == 1:
return str(strs[0])
if n == 2:
return '{} {} {}'.format(strs[0], conjunction, strs[1])
# else list of >= 3
strs_safe = list(strs)
strs_safe[-1] = '{} {}'.format(conjunction, strs_safe[-1])
return ', '.join(strs_safe)
def is_primitive_type(type_obj):
return (type_obj['name'] in ['null', 'boolean', 'int', 'float', 'string', 'list', 'dictionary'])
| mit | 165,453,380,058,000,770 | 24.974684 | 100 | 0.570175 | false | 3 | false | false | false |
bluef0x/Dataproject | Code/loadin.py | 1 | 3116 | ''' Jeroen Meijaard 10611002 '''
import json
import os
import errno
import calendar
import sys
import time
from dateutil import parser
def loadTwitterData(filepath='00.json'):
''' Load in twitterdata'''
twitterData = []
print 'Loading', filepath
# get data from file to array
with open(filepath) as data_file:
for line in data_file:
twitterData.append(json.loads(line))
return twitterData
def loadStockData():
''' Load in stock data'''
stockData = []
dataObjects = []
# open input file and write to csv
with open('US2.GOOG_120101_120131.txt') as data_file:
inputFileList = [line.rstrip('\n').strip('\r') for line in data_file]
# for apple stock, remove strip('\r') and use split('\r')
for line in inputFileList:
stockData.append(line.split(','))
for i,line in enumerate(stockData):
if i == 0:
# write header
header = ["close_time","TICKER","OPEN","HIGH","LOW","CLOSE","VOL"]
print header[0]
for i,part in enumerate(line):
line.remove(line[i])
# line.insert(0,header[i + 1])
else:
# write data
print stockData[i]
dateObject = parser.parse(str(stockData[i][2] + stockData[i][3]))
timeobject = dateObject.strftime("%a %b %d %H:%M:%S %Y")
temp2 = timeobject.split(" ")
temp2.insert(4,"+0000")
temp3 = " ".join(temp2)
line.insert(0,str(temp3))
return stockData
def generate_paths(root_path =' '):
''' generate paths for all files in folder '''
paths = []
cal = calendar.Calendar()
year = 2012
month = 1
# generate day,hour,minute for pathname
days = [d for d in cal.itermonthdays(year, month) if d != 0]
for day in days:
if day < 10:
day = str(day).zfill(2)
else:
day = str(day)
for hour in range(0,23):
if hour < 10:
hour = str(hour).zfill(2)
else:
hour = str(hour)
for minute in range(0,60):
if minute < 10:
minute = str(minute).zfill(2)
else:
minute = str(minute)
temp = "/".join([root_path,str(year),str(month).zfill(2),day,hour,minute])
temp = temp + ".json"
yield temp
def loadFiles(paths, n = 5):
''' load a determined amount of 1 minute twitter files '''
files = []
for i,path in enumerate(paths):
# load file
try:
files.append(loadTwitterData(path))
if (i + 1) % n == 0:
yield files
files = []
# error handling
except IOError as e:
print(os.strerror(e.errno))
print "%s not found" %(path)
pass
#use Os.path.walk python to load multiple files # does not work!! To heavy ram use!!
# def loadAllJSON(root_path):
# alldata = []
# for root, directories, filenames in os.walk(root_path):
# print root
# for filename in filenames:
# if filename.endswith('json'):
# alldata.extend(loadTwitterData(os.path.join(root, filename)))
def main():
# give rootpath
root_path = "/Users/jeroen_meijaard48/Downloads"
t0 = time.time()
paths = generate_paths(root_path)
#paths = ["/somepath/something"]
# load 5 files at a time
test = loadFiles(paths,5)
# print amount of time needed for run
print 'Took %.2f seconds' % (time.time() - t0)
return test
if __name__ == '__main__':
pass
| mit | 4,275,324,881,465,816,600 | 22.081481 | 84 | 0.640244 | false | 2.895911 | false | false | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_1_0_6/models/healthcareservice.py | 1 | 11306 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/HealthcareService) on 2016-06-23.
# 2016, SMART Health IT.
from . import domainresource
class HealthcareService(domainresource.DomainResource):
""" The details of a healthcare service available at a location.
"""
resource_name = "HealthcareService"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.appointmentRequired = None
""" If an appointment is required for access to this service.
Type `bool`. """
self.availabilityExceptions = None
""" Description of availability exceptions.
Type `str`. """
self.availableTime = None
""" Times the Service Site is available.
List of `HealthcareServiceAvailableTime` items (represented as `dict` in JSON). """
self.characteristic = None
""" Collection of characteristics (attributes).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.comment = None
""" Additional description and/or any specific issues not covered
elsewhere.
Type `str`. """
self.coverageArea = None
""" Location(s) service is inteded for/available to.
List of `FHIRReference` items referencing `Location` (represented as `dict` in JSON). """
self.eligibility = None
""" Specific eligibility requirements required to use the service.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.eligibilityNote = None
""" Describes the eligibility conditions for the service.
Type `str`. """
self.extraDetails = None
""" Extra details about the service that can't be placed in the other
fields.
Type `str`. """
self.identifier = None
""" External identifiers for this item.
List of `Identifier` items (represented as `dict` in JSON). """
self.location = None
""" Location where service may be provided.
Type `FHIRReference` referencing `Location` (represented as `dict` in JSON). """
self.notAvailable = None
""" Not available during this time due to provided reason.
List of `HealthcareServiceNotAvailable` items (represented as `dict` in JSON). """
self.photo = None
""" Facilitates quick identification of the service.
Type `Attachment` (represented as `dict` in JSON). """
self.programName = None
""" Program Names that categorize the service.
List of `str` items. """
self.providedBy = None
""" Organization that provides this service.
Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """
self.publicKey = None
""" PKI Public keys to support secure communications.
Type `str`. """
self.referralMethod = None
""" Ways that the service accepts referrals.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.serviceCategory = None
""" Broad category of service being performed or delivered.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.serviceName = None
""" Description of service as presented to a consumer while searching.
Type `str`. """
self.serviceProvisionCode = None
""" Conditions under which service is available/offered.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.serviceType = None
""" Specific service delivered or performed.
List of `HealthcareServiceServiceType` items (represented as `dict` in JSON). """
self.telecom = None
""" Contacts related to the healthcare service.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(HealthcareService, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareService, self).elementProperties()
js.extend([
("appointmentRequired", "appointmentRequired", bool, False, None, False),
("availabilityExceptions", "availabilityExceptions", str, False, None, False),
("availableTime", "availableTime", HealthcareServiceAvailableTime, True, None, False),
("characteristic", "characteristic", codeableconcept.CodeableConcept, True, None, False),
("comment", "comment", str, False, None, False),
("coverageArea", "coverageArea", fhirreference.FHIRReference, True, None, False),
("eligibility", "eligibility", codeableconcept.CodeableConcept, False, None, False),
("eligibilityNote", "eligibilityNote", str, False, None, False),
("extraDetails", "extraDetails", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("location", "location", fhirreference.FHIRReference, False, None, True),
("notAvailable", "notAvailable", HealthcareServiceNotAvailable, True, None, False),
("photo", "photo", attachment.Attachment, False, None, False),
("programName", "programName", str, True, None, False),
("providedBy", "providedBy", fhirreference.FHIRReference, False, None, False),
("publicKey", "publicKey", str, False, None, False),
("referralMethod", "referralMethod", codeableconcept.CodeableConcept, True, None, False),
("serviceCategory", "serviceCategory", codeableconcept.CodeableConcept, False, None, False),
("serviceName", "serviceName", str, False, None, False),
("serviceProvisionCode", "serviceProvisionCode", codeableconcept.CodeableConcept, True, None, False),
("serviceType", "serviceType", HealthcareServiceServiceType, True, None, False),
("telecom", "telecom", contactpoint.ContactPoint, True, None, False),
])
return js
from . import backboneelement
class HealthcareServiceAvailableTime(backboneelement.BackboneElement):
""" Times the Service Site is available.
A collection of times that the Service Site is available.
"""
resource_name = "HealthcareServiceAvailableTime"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allDay = None
""" Always available? e.g. 24 hour service.
Type `bool`. """
self.availableEndTime = None
""" Closing time of day (ignored if allDay = true).
Type `FHIRDate` (represented as `str` in JSON). """
self.availableStartTime = None
""" Opening time of day (ignored if allDay = true).
Type `FHIRDate` (represented as `str` in JSON). """
self.daysOfWeek = None
""" mon | tue | wed | thu | fri | sat | sun.
List of `str` items. """
super(HealthcareServiceAvailableTime, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareServiceAvailableTime, self).elementProperties()
js.extend([
("allDay", "allDay", bool, False, None, False),
("availableEndTime", "availableEndTime", fhirdate.FHIRDate, False, None, False),
("availableStartTime", "availableStartTime", fhirdate.FHIRDate, False, None, False),
("daysOfWeek", "daysOfWeek", str, True, None, False),
])
return js
class HealthcareServiceNotAvailable(backboneelement.BackboneElement):
""" Not available during this time due to provided reason.
The HealthcareService is not available during this period of time due to
the provided reason.
"""
resource_name = "HealthcareServiceNotAvailable"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Reason presented to the user explaining why time not available.
Type `str`. """
self.during = None
""" Service not availablefrom this date.
Type `Period` (represented as `dict` in JSON). """
super(HealthcareServiceNotAvailable, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareServiceNotAvailable, self).elementProperties()
js.extend([
("description", "description", str, False, None, True),
("during", "during", period.Period, False, None, False),
])
return js
class HealthcareServiceServiceType(backboneelement.BackboneElement):
""" Specific service delivered or performed.
A specific type of service that may be delivered or performed.
"""
resource_name = "HealthcareServiceServiceType"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.specialty = None
""" Specialties handled by the Service Site.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" Type of service delivered or performed.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(HealthcareServiceServiceType, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(HealthcareServiceServiceType, self).elementProperties()
js.extend([
("specialty", "specialty", codeableconcept.CodeableConcept, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
])
return js
from . import attachment
from . import codeableconcept
from . import contactpoint
from . import fhirdate
from . import fhirreference
from . import identifier
from . import period
| bsd-3-clause | 2,485,395,729,182,734,300 | 41.02974 | 113 | 0.627101 | false | 4.546039 | false | false | false |
SCECcode/BBP | bbp/utils/misc/gen_station_grid.py | 1 | 1763 | #!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Program to create a list of stations for use in the Broadband Platform.
"""
# Import Python modules
import os
import sys
def main():
"""
Get min and max latitude and longitude values from the user, also
read the step to be used in generating the station list. This code
can only be used to generate a maximum of 9999 stations.
"""
if len(sys.argv) < 6:
print "Usage: %s lat_min lat_max lon_min lon_max step" % sys.argv[0]
sys.exit(0)
lat_min = float(sys.argv[1])
lat_max = float(sys.argv[2])
lon_min = float(sys.argv[3])
lon_max = float(sys.argv[4])
step = float(sys.argv[5])
station_number = 0
cur_lat = lat_min
cur_lon = lon_min
while cur_lat <= lat_max:
while cur_lon <= lon_max:
station_number = station_number + 1
print "%2.3f %2.3f sta%04d 10 " % (cur_lon,
cur_lat,
station_number)
cur_lon = cur_lon + step
cur_lat = cur_lat + step
cur_lon = lon_min
if __name__ == "__main__":
main()
| apache-2.0 | -2,315,007,829,536,662,000 | 31.054545 | 76 | 0.619399 | false | 3.759062 | false | false | false |
wienerschnitzel/schnitzelserver | schnitzelserver/modules_builtin/access_control/user.py | 1 | 1577 | from sqlalchemy import Column, Integer, String, Boolean, Numeric, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declared_attr
from schnitzelserver.models.modelbase import SchnitzelBase, inherits
class GroupMembership(SchnitzelBase):
__tablename__ = 'schnitzel_group_membership'
__audit__ = False
user = Column(Integer, ForeignKey('schnitzel_user.id'), primary_key=True)
group = Column(Integer, ForeignKey('schnitzel_group.id'), primary_key=True)
class User(SchnitzelBase):
"""
A basic schnitzel-user which can belong to groups
"""
__tablename__ = 'schnitzel_user'
__audit__ = True
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True)
groups = relationship("Group",
secondary="schnitzel_group_membership",
backref="members")
locked_permission = Column(Boolean) # if set to true, can't be granted additional
# permissions
def validate(self):
print("i am the user")
super().validate()
@inherits('schnitzel_user')
class Module(SchnitzelBase):
"""
A module has its dedicated user being in all groups
"""
__tablename__ = 'schnitzel_module'
id = Column(Integer, ForeignKey('schnitzel_user.id'), primary_key=True)
version = Column(Numeric)
class Group(SchnitzelBase):
"""
A basic group to have access on models/fields
"""
__tablename__ = 'schnitzel_group'
id = Column(Integer, primary_key=True)
name = Column(String(100))
| lgpl-3.0 | 9,040,040,697,108,493,000 | 28.754717 | 86 | 0.667724 | false | 3.763723 | false | false | false |
RAPD/RAPD | src/cloud/handlers/cloud_handler_reindex.py | 1 | 10141 | """
This file is part of RAPD
Copyright (C) 2016-2018 Cornell University
All rights reserved.
RAPD is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, version 3.
RAPD is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__created__ = "2016-01-29"
__maintainer__ = "Frank Murphy"
__email__ = "[email protected]"
__status__ = "Development"
import datetime
import logging
import os
import threading
# RAPD imports
from control_server import LaunchAction
# This is a rapd cloud handler
CLOUD_HANDLER = True
# This handler's request type
REQUEST_TYPE = "reindex"
# A unique UUID for this handler (uuid.uuid1().hex)
ID = "fc774d3ad98e11e5b08ac82a1400d5bc"
class Handler(threading.Thread):
"""
Handles the initialization of reprocessing runs in a separate thread
"""
# single, pair, or new_pair
index_type = None
# The data on the image(s)
image1 = None
image2 = None
# Previous result(s) information
original_result = None
process_settings = None
def __init__(self, request, database, settings, reply_settings):
"""Initialize the handler for reindexing frames"""
# Grab the logger
self.logger = logging.getLogger("RAPDLogger")
self.logger.info("ReprocessHandler::__init__ %s", request)
# Initialize the thread
threading.Thread.__init__(self)
#store passed-in variables
self.request = request
self.database = database
self.settings = settings
self.reply_settings = reply_settings
# Kick it off
self.start()
def run(self):
"""Main process og the handler"""
#mark that the request has been addressed
self.database.markCloudRequest(self.request["cloud_request_id"], "working")
# Get the settings for processing
self.get_process_data()
# Get the images data
self.get_image_data()
# Get the working directory and repr
new_work_dir, new_repr = self.get_work_dir()
# Save some typing
data_root_dir = self.original_result["data_root_dir"]
# Header beam position settings will be overridden sometimes
# Not overridden
if self.process_settings["x_beam"] == "0":
# Source the beam center from the calculated one from image1
# This gives better indexing results
if self.image1["calc_beam_center_x"] > 0.0:
self.process_settings["x_beam"] = self.image1["calc_beam_center_x"]
self.process_settings["y_beam"] = self.image1["calc_beam_center_y"]
process_type = {"single" : "single",
"pair" : "pair",
"new_pair" : "pair"}
# Add the process to the database to display as in-process
process_id = self.database.addNewProcess(type=process_type[self.index_type],
rtype="reprocess",
data_root_dir=data_root_dir,
repr=new_repr)
# Add the ID entry to the data dict
self.image1.update({"ID" : os.path.basename(new_work_dir),
"process_id" : process_id,
"repr" : new_repr})
if self.image2:
self.image2.update({"ID" : os.path.basename(new_work_dir),
"process_id" : process_id,
"repr" : new_repr})
# Now package directories into a dict for easy access by worker class
new_dirs = {"work" : new_work_dir,
"data_root_dir" : self.original_result["data_root_dir"]}
# Add the request to self.process_settings so it can be passed on
self.process_settings["request"] = self.request
# Mark that the request has been addressed
self.database.markCloudRequest(self.request["cloud_request_id"], "working")
#mark in the cloud_current table
self.database.addCloudCurrent(self.request)
# Connect to the server and autoindex the single image
# Pair
if "pair" in self.index_type:
LaunchAction(command=("AUTOINDEX-PAIR",
new_dirs,
self.image1,
self.image2,
self.process_settings,
self.reply_settings),
settings=self.settings)
# Single
else:
LaunchAction(("AUTOINDEX",
new_dirs,
self.image1,
self.process_settings,
self.reply_settings),
self.settings)
def get_process_data(self):
"""Retrieve information on the previous process from the database"""
# Get the settings for processing
self.process_settings = self.database.getSettings(setting_id=self.request["new_setting_id"])
self.logger.debug("process_settings: %s", self.process_settings)
# Get te original result from the database
self.original_result = self.database.getResultById(self.request["original_id"],
self.request["original_type"])
self.logger.debug("original_result: %s", self.original_result)
def get_image_data(self):
"""Retrieve image data for the image(s) in the autoindexing"""
# Coming from an indexing of a single image
if self.request["original_type"] == "single":
# Reindex using two singles to make a pair
if self.request["additional_image"] != 0:
self.index_type = "new_pair"
self.image1 = self.database.getImageByImageID(
image_id=self.original_result["image1_id"])
self.image2 = self.database.getImageByImageID(
image_id=self.request["additional_image"])
# Single image reindex
else:
self.index_type = "single"
self.image1 = self.database.getImageByImageID(
image_id=self.original_result["image_id"])
# Pair reindex
elif self.request["original_type"] == "pair":
self.index_type = "pair"
self.image1 = self.database.getImageByImageID(
image_id=self.original_result["image1_id"])
self.image2 = self.database.getImageByImageID(
image_id=self.original_result["image2_id"])
def get_work_dir(self):
"""Calculate the new work directory for this reindexing"""
# Toplevel
if self.process_settings["work_dir_override"] == "False":
# Same as before
if "/single/" in self.original_result["work_dir"]:
toplevel_dir = os.path.dirname(
self.original_result["work_dir"].split("single")[0])
elif "/pair/" in self.original_result["work_dir"]:
toplevel_dir = os.path.dirname(
self.original_result["work_dir"].split("pair")[0])
else:
# New toplevel dir
toplevel_dir = self.process_settings["work_directory"]
# Type level
if self.index_type == "new_pair":
typelevel_dir = "pair"
else:
typelevel_dir = self.index_type
# Date level
datelevel_dir = datetime.date.today().isoformat()
# Sub level
if self.index_type == "single":
if self.settings["DETECTOR_SUFFIX"]:
sub_dir = os.path.basename(self.image1["fullname"]).replace(
self.settings["DETECTOR_SUFFIX"], "")
else:
sub_dir = os.path.basename(self.image1["fullname"])
elif self.index_type == "pair":
sub_dir = "_".join((self.image1["image_prefix"],
"+".join((str(self.image1["image_number"]).lstrip("0"),
str(self.image2["image_number"]).lstrip("0")))))
elif self.index_type == "new_pair":
# Image prefixes are the same
if self.image1["image_prefix"] == self.image2["image_prefix"]:
sub_dir = "_".join((self.image["image_prefix"],
"+".join((str(self.image1["image_number"]).lstrip("0"),
str(self.image2["image_number"]).lstrip("0")))))
# Different image prefixes - same for now, but could change if decide to
else:
sub_dir = "_".join((self.image1["image_prefix"],
"+".join((str(self.image1["image_number"]).lstrip("0"),
str(self.image2["image_number"]).lstrip("0")))))
# Join the three levels
work_dir_candidate = os.path.join(toplevel_dir, typelevel_dir, datelevel_dir, sub_dir)
# Make sure this is an original directory
if os.path.exists(work_dir_candidate):
# We have already
self.logger.debug("%s has already been used, will add qualifier", work_dir_candidate)
for i in range(1, 10000):
if not os.path.exists("_".join((work_dir_candidate, str(i)))):
work_dir_candidate = "_".join((work_dir_candidate, str(i)))
self.logger.debug("%s will be used for this image", work_dir_candidate)
break
else:
i += 1
return work_dir_candidate, sub_dir
| agpl-3.0 | 8,719,385,646,900,597,000 | 38.306202 | 100 | 0.556848 | false | 4.282517 | false | false | false |
engdan77/edoAutoHomeMobile | twisted/cred/test/test_cred.py | 3 | 14714 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.cred}, now with 30% more starch.
"""
from zope.interface import implements, Interface
from twisted.trial import unittest
from twisted.cred import portal, checkers, credentials, error
from twisted.python import components
from twisted.internet import defer
try:
from crypt import crypt
except ImportError:
crypt = None
try:
from twisted.cred import pamauth
except ImportError:
pamauth = None
class ITestable(Interface):
pass
class TestAvatar:
def __init__(self, name):
self.name = name
self.loggedIn = False
self.loggedOut = False
def login(self):
assert not self.loggedIn
self.loggedIn = True
def logout(self):
self.loggedOut = True
class Testable(components.Adapter):
implements(ITestable)
# components.Interface(TestAvatar).adaptWith(Testable, ITestable)
components.registerAdapter(Testable, TestAvatar, ITestable)
class IDerivedCredentials(credentials.IUsernamePassword):
pass
class DerivedCredentials(object):
implements(IDerivedCredentials, ITestable)
def __init__(self, username, password):
self.username = username
self.password = password
def checkPassword(self, password):
return password == self.password
class TestRealm:
implements(portal.IRealm)
def __init__(self):
self.avatars = {}
def requestAvatar(self, avatarId, mind, *interfaces):
if avatarId in self.avatars:
avatar = self.avatars[avatarId]
else:
avatar = TestAvatar(avatarId)
self.avatars[avatarId] = avatar
avatar.login()
return (interfaces[0], interfaces[0](avatar),
avatar.logout)
class NewCredTests(unittest.TestCase):
def setUp(self):
r = self.realm = TestRealm()
p = self.portal = portal.Portal(r)
up = self.checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
up.addUser("bob", "hello")
p.registerChecker(up)
def testListCheckers(self):
expected = [credentials.IUsernamePassword, credentials.IUsernameHashedPassword]
got = self.portal.listCredentialsInterfaces()
expected.sort()
got.sort()
self.assertEqual(got, expected)
def testBasicLogin(self):
l = []; f = []
self.portal.login(credentials.UsernamePassword("bob", "hello"),
self, ITestable).addCallback(
l.append).addErrback(f.append)
if f:
raise f[0]
# print l[0].getBriefTraceback()
iface, impl, logout = l[0]
# whitebox
self.assertEqual(iface, ITestable)
self.failUnless(iface.providedBy(impl),
"%s does not implement %s" % (impl, iface))
# greybox
self.failUnless(impl.original.loggedIn)
self.failUnless(not impl.original.loggedOut)
logout()
self.failUnless(impl.original.loggedOut)
def test_derivedInterface(self):
"""
Login with credentials implementing an interface inheriting from an
interface registered with a checker (but not itself registered).
"""
l = []
f = []
self.portal.login(DerivedCredentials("bob", "hello"), self, ITestable
).addCallback(l.append
).addErrback(f.append)
if f:
raise f[0]
iface, impl, logout = l[0]
# whitebox
self.assertEqual(iface, ITestable)
self.failUnless(iface.providedBy(impl),
"%s does not implement %s" % (impl, iface))
# greybox
self.failUnless(impl.original.loggedIn)
self.failUnless(not impl.original.loggedOut)
logout()
self.failUnless(impl.original.loggedOut)
def testFailedLogin(self):
l = []
self.portal.login(credentials.UsernamePassword("bob", "h3llo"),
self, ITestable).addErrback(
lambda x: x.trap(error.UnauthorizedLogin)).addCallback(l.append)
self.failUnless(l)
self.assertEqual(error.UnauthorizedLogin, l[0])
def testFailedLoginName(self):
l = []
self.portal.login(credentials.UsernamePassword("jay", "hello"),
self, ITestable).addErrback(
lambda x: x.trap(error.UnauthorizedLogin)).addCallback(l.append)
self.failUnless(l)
self.assertEqual(error.UnauthorizedLogin, l[0])
class OnDiskDatabaseTests(unittest.TestCase):
users = [
('user1', 'pass1'),
('user2', 'pass2'),
('user3', 'pass3'),
]
def testUserLookup(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
for (u, p) in self.users:
self.failUnlessRaises(KeyError, db.getUser, u.upper())
self.assertEqual(db.getUser(u), (u, p))
def testCaseInSensitivity(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
for (u, p) in self.users:
self.assertEqual(db.getUser(u.upper()), (u, p))
def testRequestAvatarId(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
creds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults(
[defer.maybeDeferred(db.requestAvatarId, c) for c in creds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testRequestAvatarId_hashed(self):
dbfile = self.mktemp()
db = checkers.FilePasswordDB(dbfile, caseSensitive=0)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, p))
f.close()
creds = [credentials.UsernameHashedPassword(u, p) for u, p in self.users]
d = defer.gatherResults(
[defer.maybeDeferred(db.requestAvatarId, c) for c in creds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
class HashedPasswordOnDiskDatabaseTests(unittest.TestCase):
users = [
('user1', 'pass1'),
('user2', 'pass2'),
('user3', 'pass3'),
]
def hash(self, u, p, s):
return crypt(p, s)
def setUp(self):
dbfile = self.mktemp()
self.db = checkers.FilePasswordDB(dbfile, hash=self.hash)
f = file(dbfile, 'w')
for (u, p) in self.users:
f.write('%s:%s\n' % (u, crypt(p, u[:2])))
f.close()
r = TestRealm()
self.port = portal.Portal(r)
self.port.registerChecker(self.db)
def testGoodCredentials(self):
goodCreds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults([self.db.requestAvatarId(c) for c in goodCreds])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testGoodCredentials_login(self):
goodCreds = [credentials.UsernamePassword(u, p) for u, p in self.users]
d = defer.gatherResults([self.port.login(c, None, ITestable)
for c in goodCreds])
d.addCallback(lambda x: [a.original.name for i, a, l in x])
d.addCallback(self.assertEqual, [u for u, p in self.users])
return d
def testBadCredentials(self):
badCreds = [credentials.UsernamePassword(u, 'wrong password')
for u, p in self.users]
d = defer.DeferredList([self.port.login(c, None, ITestable)
for c in badCreds], consumeErrors=True)
d.addCallback(self._assertFailures, error.UnauthorizedLogin)
return d
def testHashedCredentials(self):
hashedCreds = [credentials.UsernameHashedPassword(u, crypt(p, u[:2]))
for u, p in self.users]
d = defer.DeferredList([self.port.login(c, None, ITestable)
for c in hashedCreds], consumeErrors=True)
d.addCallback(self._assertFailures, error.UnhandledCredentials)
return d
def _assertFailures(self, failures, *expectedFailures):
for flag, failure in failures:
self.assertEqual(flag, defer.FAILURE)
failure.trap(*expectedFailures)
return None
if crypt is None:
skip = "crypt module not available"
class PluggableAuthenticationModulesTests(unittest.TestCase):
def setUp(self):
"""
Replace L{pamauth.callIntoPAM} with a dummy implementation with
easily-controlled behavior.
"""
self.patch(pamauth, 'callIntoPAM', self.callIntoPAM)
def callIntoPAM(self, service, user, conv):
if service != 'Twisted':
raise error.UnauthorizedLogin('bad service: %s' % service)
if user != 'testuser':
raise error.UnauthorizedLogin('bad username: %s' % user)
questions = [
(1, "Password"),
(2, "Message w/ Input"),
(3, "Message w/o Input"),
]
replies = conv(questions)
if replies != [
("password", 0),
("entry", 0),
("", 0)
]:
raise error.UnauthorizedLogin('bad conversion: %s' % repr(replies))
return 1
def _makeConv(self, d):
def conv(questions):
return defer.succeed([(d[t], 0) for t, q in questions])
return conv
def testRequestAvatarId(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'password', 2:'entry', 3:''})
creds = credentials.PluggableAuthenticationModules('testuser',
conv)
d = db.requestAvatarId(creds)
d.addCallback(self.assertEqual, 'testuser')
return d
def testBadCredentials(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'', 2:'', 3:''})
creds = credentials.PluggableAuthenticationModules('testuser',
conv)
d = db.requestAvatarId(creds)
self.assertFailure(d, error.UnauthorizedLogin)
return d
def testBadUsername(self):
db = checkers.PluggableAuthenticationModulesChecker()
conv = self._makeConv({1:'password', 2:'entry', 3:''})
creds = credentials.PluggableAuthenticationModules('baduser',
conv)
d = db.requestAvatarId(creds)
self.assertFailure(d, error.UnauthorizedLogin)
return d
if not pamauth:
skip = "Can't run without PyPAM"
class CheckersMixin:
"""
L{unittest.TestCase} mixin for testing that some checkers accept
and deny specified credentials.
Subclasses must provide
- C{getCheckers} which returns a sequence of
L{checkers.ICredentialChecker}
- C{getGoodCredentials} which returns a list of 2-tuples of
credential to check and avaterId to expect.
- C{getBadCredentials} which returns a list of credentials
which are expected to be unauthorized.
"""
@defer.inlineCallbacks
def test_positive(self):
"""
The given credentials are accepted by all the checkers, and give
the expected C{avatarID}s
"""
for chk in self.getCheckers():
for (cred, avatarId) in self.getGoodCredentials():
r = yield chk.requestAvatarId(cred)
self.assertEqual(r, avatarId)
@defer.inlineCallbacks
def test_negative(self):
"""
The given credentials are rejected by all the checkers.
"""
for chk in self.getCheckers():
for cred in self.getBadCredentials():
d = chk.requestAvatarId(cred)
yield self.assertFailure(d, error.UnauthorizedLogin)
class HashlessFilePasswordDBMixin:
credClass = credentials.UsernamePassword
diskHash = None
networkHash = staticmethod(lambda x: x)
_validCredentials = [
('user1', 'password1'),
('user2', 'password2'),
('user3', 'password3')]
def getGoodCredentials(self):
for u, p in self._validCredentials:
yield self.credClass(u, self.networkHash(p)), u
def getBadCredentials(self):
for u, p in [('user1', 'password3'),
('user2', 'password1'),
('bloof', 'blarf')]:
yield self.credClass(u, self.networkHash(p))
def getCheckers(self):
diskHash = self.diskHash or (lambda x: x)
hashCheck = self.diskHash and (lambda username, password, stored: self.diskHash(password))
for cache in True, False:
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('%s:%s\n' % (u, diskHash(p)))
fObj.close()
yield checkers.FilePasswordDB(fn, cache=cache, hash=hashCheck)
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('%s dingle dongle %s\n' % (diskHash(p), u))
fObj.close()
yield checkers.FilePasswordDB(fn, ' ', 3, 0, cache=cache, hash=hashCheck)
fn = self.mktemp()
fObj = file(fn, 'w')
for u, p in self._validCredentials:
fObj.write('zip,zap,%s,zup,%s\n' % (u.title(), diskHash(p)))
fObj.close()
yield checkers.FilePasswordDB(fn, ',', 2, 4, False, cache=cache, hash=hashCheck)
class LocallyHashedFilePasswordDBMixin(HashlessFilePasswordDBMixin):
diskHash = staticmethod(lambda x: x.encode('hex'))
class NetworkHashedFilePasswordDBMixin(HashlessFilePasswordDBMixin):
networkHash = staticmethod(lambda x: x.encode('hex'))
class credClass(credentials.UsernameHashedPassword):
def checkPassword(self, password):
return self.hashed.decode('hex') == password
class HashlessFilePasswordDBCheckerTests(HashlessFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
class LocallyHashedFilePasswordDBCheckerTests(LocallyHashedFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
class NetworkHashedFilePasswordDBCheckerTests(NetworkHashedFilePasswordDBMixin, CheckersMixin, unittest.TestCase):
pass
| mit | -8,630,387,330,935,826,000 | 32.517084 | 114 | 0.605478 | false | 3.913298 | true | false | false |
digistam/recon-ng | modules/recon/domains-hosts/google_site_api.py | 1 | 1558 | import module
# unique to module
from urlparse import urlparse
class Module(module.Module):
def __init__(self, params):
module.Module.__init__(self, params, query='SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL ORDER BY domain')
self.info = {
'Name': 'Google CSE Hostname Enumerator',
'Author': 'Tim Tomes (@LaNMaSteR53)',
'Description': 'Leverages the Google Custom Search Engine API to harvest hosts using the \'site\' search operator. Updates the \'hosts\' table with the results.'
}
def module_run(self, domains):
cnt = 0
new = 0
for domain in domains:
self.heading(domain, level=0)
base_query = 'site:' + domain
hosts = []
while True:
query = ''
# build query based on results of previous results
for host in hosts:
query += ' -site:%s' % (host)
query = base_query + query
results = self.search_google_api(query, limit=1)
if not results: break
for result in results:
host = urlparse(result['link']).netloc
if not host in hosts:
hosts.append(host)
self.output(host)
# add each host to the database
new += self.add_hosts(host)
cnt += len(hosts)
self.summarize(new, cnt)
| gpl-3.0 | 4,255,562,045,877,445,600 | 40 | 182 | 0.507702 | false | 4.823529 | false | false | false |
bharel/PinnacleFileFinder | PinnacleFileFinder.py | 1 | 5095 | """
Name : PinnacleFileFinder.py
Usage : PinnacleFileFinder.py -h
Author : Bar Harel
Description:
- Takes a .AXP file and creates a list of all the used files in that pinnacle project with their order and time of appearance.
- The list can be output as a text file or .csv for use with programs like Excel
Todo:
- Add all possible file formats
Changelog:
- 06/03/15 - GitHub :-)
- 21/02/15 - Creation
"""
import re, argparse, os, csv
# The encoding pinnacle studio uses
PINNACLE_ENCODING = "utf_16_le"
# File formats
FILE_FORMATS = "jpg|JPG|MOV|mov|png|PNG|avi|AVI"
# Unicode RE format for getting the time and name
TIME_NAME_RE = ur"RecIn=\".*?\(([^>]*?)\).*?<Name>([^\n]+?\.(?:%s))</Name>" % (FILE_FORMATS)
# Default output paths
CSV_DEFAULT = r".\PinnacleFiles.csv"
TXT_DEFAULT = r".\PinnacleFiles.txt"
# Max name for file
MAX_FILE_NAME = 100
def convert_time(seconds_as_float):
"""
Function : convert_time(seconds_as_float) --> hours, minutes, seconds, ms
Purpose:
- Convert the time from seconds to an hour, minute, second, ms tupple
"""
# Conversion
seconds, ms = divmod(seconds_as_float,1)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return hours, minutes, seconds, ms
def output_file(findings_list, output_format, output_path):
"""
Function : output_txt(findings_list, output_format, output_path) --> NoneType
Purpose:
- Output the file in the specified format
"""
# Txt output
if output_format == "txt":
# The final string used to store the formatted file
final_str = u"Pinnacle studio file list:\n"
# Set a counter for the files
counter = 1
# Go over the findings
for appearance_time, file_name in findings_list:
# Failsafe in case of false positive matching
if len(file_name) > MAX_FILE_NAME:
continue
# Convert time to hours, mintes, seconds, ms
try:
hours, minutes, seconds, ms = convert_time(float(appearance_time))
# In case of conversion errors
except ValueError as err:
continue
# The time string
time_str = "%02d:%02d:%02d.%s" % (hours, minutes, seconds, str(ms)[2:])
# Format the output
final_str += u"%d: %-25s \tat %02d:%02d:%02d.%s\n" % (counter, file_name, hours, minutes, seconds, str(ms)[2:])
# Increase counter
counter += 1
# Write the result to the output file
try:
with open(output_path,"w") as my_file:
my_file.write(final_str)
except IOError as err:
print "Error opening or writing to the output file."
# CSV output
elif output_format == "csv":
try:
with open(output_path,"wb") as my_file:
# Generate the csv file writer
file_writer = csv.writer(my_file)
# Go over the findings
for appearance_time, file_name in findings_list:
# Failsafe in case of false positive matching
if len(file_name) > MAX_FILE_NAME:
continue
# Convert time to hours, mintes, seconds, ms
try:
hours, minutes, seconds, ms = convert_time(float(appearance_time))
# In case of conversion errors
except ValueError as err:
continue
# The time string
time_str = "%02d:%02d:%02d.%s" % (hours, minutes, seconds, str(ms)[2:])
# Output the row
file_writer.writerow([file_name, time_str])
except IOError, csv.Error:
print "Error opening or writing to the output file."
else:
print "ERROR: Invalid output format"
def main():
"""
Function: main() --> NoneType
Purpose:
- Control the flow of the program
"""
# Parse arguments
parser = argparse.ArgumentParser(description="Find file names and time of appearance from a pinnacle studio AXP file.")
parser.add_argument("axp_file", help="Path to the .axp file")
parser.add_argument("-o", "--output_file", help=("Output file, defaults to '%s' in case of txt and '%s' in case of csv" % (TXT_DEFAULT, CSV_DEFAULT)))
parser.add_argument("-csv", help="Output the file in csv format.", action="store_true")
args = parser.parse_args()
# Check if input file exists
if not os.path.exists(args.axp_file):
print "ERROR: Invalid input path."
return
# Check the extension
if args.axp_file[-4:].lower() != ".axp":
print "Error: Not a .axp file"
return
# Unicode RE for getting the time and name
try:
time_name_re = re.compile(TIME_NAME_RE, re.S|re.U)
except re.error as err:
print "ERROR: Bad input RE."
return
# Open and read from the file
try:
with open(args.axp_file, "r") as input_file:
input_str = input_file.read()
except IOError as err:
print "Error opening or reading from input file."
return
# Decode using the pinnacle studio encoding
input_str = input_str.decode(PINNACLE_ENCODING)
# Find the re matches in the string
findings = time_name_re.findall(input_str)
# Check the specified output format
output_format = "csv" if args.csv else "txt"
# Check output file path
if args.output_file is None:
output_path = CSV_DEFAULT if args.csv else TXT_DEFAULT
else:
output_path = args.output_file
output_file(findings, output_format, output_path)
if __name__ == "__main__":
main() | mit | -6,050,888,270,007,522,000 | 25.541667 | 151 | 0.672031 | false | 3.08414 | false | false | false |
Karaage-Cluster/karaage-debian | karaage/common/passwords.py | 1 | 1901 | # Copyright 2014-2015 VPAC
# Copyright 2014 The University of Melbourne
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import logging
LOG = logging.getLogger(__name__)
def assert_password_simple(password, old=None):
if old and password == old:
raise ValueError('Old and new passwords are the same.')
elif len(password) < 6:
raise ValueError('Password is less than six characters.')
return password
try:
from cracklib import VeryFascistCheck as _assert_password
# Some configuration errors are only apparent when cracklib
# tests a password for the first time, so test a strong password to
# verify that cracklib is working as intended.
_assert_password('thaeliez4niore0U')
except ImportError:
_assert_password = assert_password_simple
except (OSError, ValueError) as e:
LOG.warning("Cracklib misconfigured: %s", str(e))
_assert_password = assert_password_simple
def assert_strong_password(username, password, old_password=None):
"""Raises ValueError if the password isn't strong.
Returns the password otherwise."""
if username is not None and username in password:
raise ValueError("Password contains username")
return _assert_password(password, old_password)
| gpl-3.0 | -5,339,375,146,943,122,000 | 34.203704 | 71 | 0.738559 | false | 4.061966 | false | false | false |
mathiasertl/django-xmpp-server-list | xmpp/plugins/rosterver.py | 1 | 1637 | # This file is part of django-xmpp-server-list
# (https://github.com/mathiasertl/django-xmpp-server-list)
#
# django-xmpp-server-list is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# xmppllist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with django-xmpp-server-list. If not, see <http://www.gnu.org/licenses/>.
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.stanza import StreamFeatures
from sleekxmpp.xmlstream import ElementBase
from sleekxmpp.xmlstream import register_stanza_plugin
class RosterVerStanza(ElementBase):
name = 'ver'
namespace = 'urn:xmpp:features:rosterver'
interfaces = set()
plugin_attrib = 'ver'
class feature_rosterver(BasePlugin):
"""Plugin for Roster Versioning (XEP-0237).
.. seealso:: http://www.xmpp.org/extensions/xep-0237.html
"""
def plugin_init(self):
self.description = 'XEP-0237: Roster Versioning (obsolete)'
self.xmpp.register_feature(
'ver',
self._handle_rosterver,
restart=False,
order=self.config.get('order', 0))
register_stanza_plugin(StreamFeatures, RosterVerStanza)
def _handle_rosterver(self, features):
pass
| gpl-3.0 | 125,941,615,756,715,860 | 33.829787 | 82 | 0.718387 | false | 3.737443 | false | false | false |
robinlombaert/ComboCode | cc/modeling/codes/ModelingSession.py | 3 | 9935 | # -*- coding: utf-8 -*-
"""
Interface for creating modeling environments.
Author: R. Lombaert
"""
import os
from time import gmtime
import types
import cc.path
from cc.tools.io import DataIO
class ModelingSession(object):
"""
The basic modeling environment. Inherited by MCMax() and Gastronoom().
"""
def __init__(self,code,path,replace_db_entry=0,new_entries=[],\
single_session=0):
"""
Initializing an instance of ModelingSession.
@param code: code for which the modelingsession is created
@type code: string
@param path: modeling output folder in the code's home folder
@type path: string
@keyword replace_db_entry: replace an entry in the database with a
newly calculated model with a new model id
(eg if some general data not included in
the inputfiles is changed)
(default: 0)
@type replace_db_entry: bool
@keyword new_entries: The new model_ids when replace_db_entry is 1
of other models in the grid. These are not
replaced!
(default: [])
@type new_entries: list[str]
@keyword single_session: If this is the only CC session. Speeds up db
check.
(default: 0)
@type single_session: bool
"""
self.path = path
self.code = code
self.model_id = ''
self.replace_db_entry = replace_db_entry
self.new_entries = new_entries
self.single_session = single_session
if code == 'Chemistry':
self.mutable = []
else:
mutablefile = os.path.join(cc.path.aux,\
'Mutable_Parameters_%s.dat'%code)
self.mutable = [line[0]
for line in DataIO.readFile(mutablefile,delimiter=' ')
if ' '.join(line)]
self.mutable = [line for line in self.mutable if line[0] != '#']
fout = os.path.join(getattr(cc.path,self.code.lower()),self.path)
DataIO.testFolderExistence(os.path.join(fout,'models'))
def makeNewId(self):
'''
Make a new model_id based on the current UTC in seconds since 1970.
'''
return 'model_%.4i-%.2i-%.2ih%.2i-%.2i-%.2i' \
%(gmtime()[0],gmtime()[1],gmtime()[2],\
gmtime()[3],gmtime()[4],gmtime()[5])
def setCommandKey(self,comm_key,star,key_type,star_key=None,\
alternative=None,make_int=0,exp_not=0):
'''
Try setting a key in the command_list from a star instance.
If the key is unknown, it is left open and will be filled in from the
standard gastronoom inputfile.
@param comm_key: the name of the keyword in the command list
@type comm_key: string
@param star: The parameter set
@type star: Star()
@param key_type: the type of the keyword, either 'DUST' or 'GAS'
@type key_type: string
@keyword star_key: the name of the keyword in the star instance
(minus '_%s'%key_type, which is added as well in a
second attempt if the first without the addition is
not found), if None, it is equal to comm_key
(default: None)
@type star_key: string
@keyword alternative: a default value passed from the standard
inputfile that is used if the keyword or the
keyword + '_%s'%key_type is not found in Star()
(default: None)
@type alternative: string
@keyword make_int: make an integer before converting to string for this
keyword.
(default: 0)
@type make_int: boolean
@keyword exp_not: Convert to exponential notation in a string
(default: 0)
@type exp_not: bool
@return: True if successful, otherwise False.
@rtype: bool
'''
if star_key is None: star_key = comm_key
try:
self.command_list[comm_key] = \
DataIO.inputToString(star[star_key],make_int,exp_not)
return True
except KeyError:
try:
self.command_list[comm_key] = \
DataIO.inputToString(star[star_key+ '_%s'%key_type],\
make_int,exp_not)
return True
except KeyError:
if not alternative is None:
self.command_list[comm_key] = \
DataIO.inputToString(alternative,make_int,exp_not)
return True
else:
return False
def compareCommandLists(self,this_list,modellist,code,ignoreAbun=0,\
extra_dict=None,check_keys=[]):
"""
Comparing a command_list with a database entry.
@param this_list: parameters in this modeling session
@type this_list: dict
@param modellist: parameters from database model
@type modellist: dict
@param code: The GASTRoNOoM subcode
@type code: string
@keyword ignoreAbun: only relevant for mline: ignore the 4 abundance
parameters (such as for co)
(default: 0)
@type ignoreAbun: bool
@keyword extra_dict: if not None this gives extra dictionary entries
to be used in the comparison on top of this_list.
The extra entries are assumed present in modellist
otherwise the comparison will return False.
(default: None)
@type extra_dict: dict
@keyword check_keys: Only check keys given in this list. If empty, the
standard keyword lists are used.
(default: [])
@type check_keys: list[str]
@return: Comparison between the two parameter sets
@rtype: bool
"""
model_bool_list = []
if not extra_dict is None: this_list.update(extra_dict)
if check_keys:
keywords = check_keys
elif code == 'mcmax':
keywords = set(this_list.keys()+modellist.keys())
if 'dust_species' in keywords:
keywords.remove('dust_species')
if 'IN_PROGRESS' in keywords:
keywords.remove('IN_PROGRESS')
#elif code == 'chemistry':
##keywords = set(this_list.keys()+modellist.keys())
#keywords = getattr(self,code + '_keywords')
#if 'IN_PROGRESS' in keywords:
#keywords.remove('IN_PROGRESS')
else:
keywords = getattr(self,code + '_keywords')
if code == 'mline' and ignoreAbun and not check_keys:
keywords = [key
for key in keywords
if key not in ['ABUN_MOLEC','ABUN_MOLEC_RINNER',\
'ABUN_MOLEC_RE','RMAX_MOLEC']]
for keyword in keywords:
#-- All issues with "double" notation instead of exponential should be resolved
# if keyword == 'STEP_RS_RIN':
# if this_list.has_key(keyword) \
# and type(this_list[keyword]) is types.StringType:
# if 'd' in this_list[keyword]:
# this_list[keyword] =this_list[keyword].replace('d','e')
# if modellist.has_key(keyword) \
# and type(modellist[keyword]) is types.StringType:
# if 'd' in modellist[keyword]:
# modellist[keyword] =modellist[keyword].replace('d','e')
try:
try:
try:
val = float(this_list[keyword])
except TypeError:
raise ValueError
delta = not val and 1e-10 or 0.001*val
if val < 0:
tb = val-delta > float(modellist[keyword]) > val+delta
else:
tb = val-delta < float(modellist[keyword]) < val+delta
except ValueError:
tb = this_list[keyword]==modellist[keyword]
except KeyError:
if keyword not in this_list.keys() \
and keyword not in modellist.keys():
tb = True
else:
tb = False
model_bool_list.append(tb)
if False not in model_bool_list:
return True
else:
return False
def cCL(self,*args,**kwargs):
'''
Short-hand helper function for compareCommandLists.
'''
return self.compareCommandLists(*args,**kwargs)
| gpl-3.0 | -1,328,444,157,775,239,400 | 37.065134 | 82 | 0.473981 | false | 4.822816 | false | false | false |
kwilliams-mo/iris | lib/iris/fileformats/pp.py | 1 | 66319 | # (C) British Crown Copyright 2010 - 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides UK Met Office Post Process (PP) format specific capabilities.
"""
import abc
import collections
from copy import deepcopy
import itertools
import operator
import os
import re
import struct
import warnings
import numpy as np
import numpy.ma as ma
import netcdftime
import iris.config
import iris.fileformats.rules
import iris.unit
from iris.fileformats.manager import DataManager
import iris.fileformats.pp_rules
import iris.coord_systems
import iris.proxy
iris.proxy.apply_proxy('iris.fileformats.pp_packing', globals())
__all__ = ['load', 'save', 'PPField', 'add_load_rules', 'reset_load_rules',
'add_save_rules', 'reset_save_rules', 'STASH', 'EARTH_RADIUS']
EARTH_RADIUS = 6371229.0
# PP->Cube and Cube->PP rules are loaded on first use
_load_rules = None
_save_rules = None
PP_HEADER_DEPTH = 256
PP_WORD_DEPTH = 4
NUM_LONG_HEADERS = 45
NUM_FLOAT_HEADERS = 19
# The header definition for header release 2.
#: A list of (header_name, position_in_header(tuple of)) pairs for
#: header release 2 - using the one-based UM/FORTRAN indexing convention.
UM_HEADER_2 = [
('lbyr', (1, )),
('lbmon', (2, )),
('lbdat', (3, )),
('lbhr', (4, )),
('lbmin', (5, )),
('lbday', (6, )),
('lbyrd', (7, )),
('lbmond', (8, )),
('lbdatd', (9, )),
('lbhrd', (10, )),
('lbmind', (11, )),
('lbdayd', (12, )),
('lbtim', (13, )),
('lbft', (14, )),
('lblrec', (15, )),
('lbcode', (16, )),
('lbhem', (17, )),
('lbrow', (18, )),
('lbnpt', (19, )),
('lbext', (20, )),
('lbpack', (21, )),
('lbrel', (22, )),
('lbfc', (23, )),
('lbcfc', (24, )),
('lbproc', (25, )),
('lbvc', (26, )),
('lbrvc', (27, )),
('lbexp', (28, )),
('lbegin', (29, )),
('lbnrec', (30, )),
('lbproj', (31, )),
('lbtyp', (32, )),
('lblev', (33, )),
('lbrsvd', (34, 35, 36, 37, )),
('lbsrce', (38, )),
('lbuser', (39, 40, 41, 42, 43, 44, 45, )),
('brsvd', (46, 47, 48, 49, )),
('bdatum', (50, )),
('bacc', (51, )),
('blev', (52, )),
('brlev', (53, )),
('bhlev', (54, )),
('bhrlev', (55, )),
('bplat', (56, )),
('bplon', (57, )),
('bgor', (58, )),
('bzy', (59, )),
('bdy', (60, )),
('bzx', (61, )),
('bdx', (62, )),
('bmdi', (63, )),
('bmks', (64, )),
]
# The header definition for header release 3.
#: A list of (header_name, position_in_header(tuple of)) pairs for
#: header release 3 - using the one-based UM/FORTRAN indexing convention.
UM_HEADER_3 = [
('lbyr', (1, )),
('lbmon', (2, )),
('lbdat', (3, )),
('lbhr', (4, )),
('lbmin', (5, )),
('lbsec', (6, )),
('lbyrd', (7, )),
('lbmond', (8, )),
('lbdatd', (9, )),
('lbhrd', (10, )),
('lbmind', (11, )),
('lbsecd', (12, )),
('lbtim', (13, )),
('lbft', (14, )),
('lblrec', (15, )),
('lbcode', (16, )),
('lbhem', (17, )),
('lbrow', (18, )),
('lbnpt', (19, )),
('lbext', (20, )),
('lbpack', (21, )),
('lbrel', (22, )),
('lbfc', (23, )),
('lbcfc', (24, )),
('lbproc', (25, )),
('lbvc', (26, )),
('lbrvc', (27, )),
('lbexp', (28, )),
('lbegin', (29, )),
('lbnrec', (30, )),
('lbproj', (31, )),
('lbtyp', (32, )),
('lblev', (33, )),
('lbrsvd', (34, 35, 36, 37, )),
('lbsrce', (38, )),
('lbuser', (39, 40, 41, 42, 43, 44, 45, )),
('brsvd', (46, 47, 48, 49, )),
('bdatum', (50, )),
('bacc', (51, )),
('blev', (52, )),
('brlev', (53, )),
('bhlev', (54, )),
('bhrlev', (55, )),
('bplat', (56, )),
('bplon', (57, )),
('bgor', (58, )),
('bzy', (59, )),
('bdy', (60, )),
('bzx', (61, )),
('bdx', (62, )),
('bmdi', (63, )),
('bmks', (64, )),
]
# A map from header-release-number to header definition
UM_HEADERS = {2: UM_HEADER_2, 3: UM_HEADER_3}
# Offset value to convert from UM_HEADER positions to PP_HEADER offsets.
UM_TO_PP_HEADER_OFFSET = 1
#: A dictionary mapping IB values to their names.
EXTRA_DATA = {
1: 'x',
2: 'y',
3: 'lower_y_domain',
4: 'lower_x_domain',
5: 'upper_y_domain',
6: 'upper_x_domain',
7: 'lower_z_domain',
8: 'upper_z_domain',
10: 'field_title',
11: 'domain_title',
12: 'x_lower_bound',
13: 'x_upper_bound',
14: 'y_lower_bound',
15: 'y_upper_bound',
}
#: Maps lbuser[0] to numpy data type. "default" will be interpreted if
#: no match is found, providing a warning in such a case.
LBUSER_DTYPE_LOOKUP = {1 :np.dtype('>f4'),
2 :np.dtype('>i4'),
3 :np.dtype('>i4'),
-1:np.dtype('>f4'),
-2:np.dtype('>i4'),
-3:np.dtype('>i4'),
'default': np.dtype('>f4'),
}
# LBPROC codes and their English equivalents
LBPROC_PAIRS = ((1, "Difference from another experiment"),
(2, "Difference from zonal (or other spatial) mean"),
(4, "Difference from time mean"),
(8, "X-derivative (d/dx)"),
(16, "Y-derivative (d/dy)"),
(32, "Time derivative (d/dt)"),
(64, "Zonal mean field"),
(128, "Time mean field"),
(256, "Product of two fields"),
(512, "Square root of a field"),
(1024, "Difference between fields at levels BLEV and BRLEV"),
(2048, "Mean over layer between levels BLEV and BRLEV"),
(4096, "Minimum value of field during time period"),
(8192, "Maximum value of field during time period"),
(16384, "Magnitude of a vector, not specifically wind speed"),
(32768, "Log10 of a field"),
(65536, "Variance of a field"),
(131072, "Mean over an ensemble of parallel runs"))
# lbproc_map is dict mapping lbproc->English and English->lbproc essentially a one to one mapping
lbproc_map = {x : y for x,y in itertools.chain(LBPROC_PAIRS, ((y,x) for x,y in LBPROC_PAIRS))}
class STASH(collections.namedtuple('STASH', 'model section item')):
"""
A class to hold a single STASH code.
Create instances using:
>>> model = 1
>>> section = 2
>>> item = 3
>>> my_stash = iris.fileformats.pp.STASH(model, section, item)
Access the sub-components via:
>>> my_stash.model
1
>>> my_stash.section
2
>>> my_stash.item
3
String conversion results in the MSI format:
>>> print iris.fileformats.pp.STASH(1, 16, 203)
m01s16i203
"""
__slots__ = ()
def __new__(cls, model, section, item):
"""
Args:
* model
A positive integer less than 100, or None.
* section
A non-negative integer less than 100, or None.
* item
A positive integer less than 1000, or None.
"""
model = cls._validate_member('model', model, 1, 99)
section = cls._validate_member('section', section, 0, 99)
item = cls._validate_member('item', item, 1, 999)
return super(STASH, cls).__new__(cls, model, section, item)
@staticmethod
def from_msi(msi):
"""Convert a STASH code MSI string to a STASH instance."""
if not isinstance(msi, basestring):
raise TypeError('Expected STASH code MSI string, got %r' % msi)
msi_match = re.match('^\s*m(.*)s(.*)i(.*)\s*$', msi, re.IGNORECASE)
if msi_match is None:
raise ValueError('Expected STASH code MSI string "mXXsXXiXXX", got %r' % msi)
return STASH(*msi_match.groups())
@staticmethod
def _validate_member(name, value, lower_limit, upper_limit):
# Returns a valid integer or None.
try:
value = int(value)
if not lower_limit <= value <= upper_limit:
value = None
except (TypeError, ValueError):
value = None
return value
def __str__(self):
model = self._format_member(self.model, 2)
section = self._format_member(self.section, 2)
item = self._format_member(self.item, 3)
return 'm{}s{}i{}'.format(model, section, item)
def _format_member(self, value, num_digits):
if value is None:
result = '?' * num_digits
else:
format_spec = '0' + str(num_digits)
result = format(value, format_spec)
return result
def lbuser3(self):
"""Return the lbuser[3] value that this stash represents."""
return (self.section or 0) * 1000 + (self.item or 0)
def lbuser6(self):
"""Return the lbuser[6] value that this stash represents."""
return self.model or 0
@property
def is_valid(self):
return '?' not in str(self)
def __eq__(self, other):
if isinstance(other, basestring):
return super(STASH, self).__eq__(STASH.from_msi(other))
else:
return super(STASH, self).__eq__(other)
def __ne__(self, other):
return not self.__eq__(other)
class SplittableInt(object):
"""
A class to hold integers which can easily get each decimal digit individually.
>>> three_six_two = SplittableInt(362)
>>> print three_six_two
362
>>> print three_six_two[0]
2
>>> print three_six_two[2]
3
.. note:: No support for negative numbers
"""
def __init__(self, value, name_mapping_dict=None):
"""
Build a SplittableInt given the positive integer value provided.
Kwargs:
* name_mapping_dict - (dict)
A special mapping to provide name based access to specific integer positions:
>>> a = SplittableInt(1234, {'hundreds': 2})
>>> print a.hundreds
2
>>> a.hundreds = 9
>>> print a.hundreds
9
>>> print a
1934
"""
if value < 0:
raise ValueError('Negative numbers not supported with splittable integers object')
# define the name lookup first (as this is the way __setattr__ is plumbed)
#: A dictionary mapping special attribute names on this object
#: to the slices/indices required to access them.
self._name_lookup = name_mapping_dict or {}
self._value = value
self._calculate_str_value_from_value()
def __int__(self):
return int(self._value)
def _calculate_str_value_from_value(self):
# Reverse the string to get the appropriate index when getting the sliced value
self._strvalue = [int(c) for c in str(self._value)[::-1]]
# Associate the names in the lookup table to attributes
for name, index in self._name_lookup.items():
object.__setattr__(self, name, self[index])
def _calculate_value_from_str_value(self):
self._value = np.sum([ 10**i * val for i, val in enumerate(self._strvalue)])
def __len__(self):
return len(self._strvalue)
def __getitem__(self, key):
try:
val = self._strvalue[key]
except IndexError:
val = 0
# if the key returns a list of values, then combine them together to an integer
if isinstance(val, list):
val = sum([10**i * val for i, val in enumerate(val)])
return val
def __setitem__(self, key, value):
# The setitem method has been overridden so that assignment using ``val[0] = 1`` style syntax updates
# the entire object appropriately.
if (not isinstance(value, int) or value < 0):
raise ValueError('Can only set %s as a positive integer value.' % key)
if isinstance(key, slice):
if ((key.start is not None and key.start < 0) or
(key.step is not None and key.step < 0) or
(key.stop is not None and key.stop < 0)):
raise ValueError('Cannot assign a value with slice objects containing negative indices.')
# calculate the current length of the value of this string
current_length = len(range(*key.indices(len(self))))
# get indices for as many digits as have been requested. Putting the upper limit on the number of digits at 100.
indices = range(*key.indices(100))
if len(indices) < len(str(value)):
raise ValueError('Cannot put %s into %s as it has too many digits.' % (value, key))
# Iterate over each of the indices in the slice, zipping them together with the associated digit
for index, digit in zip(indices, str(value).zfill(current_length)[::-1]):
# assign each digit to the associated index
self.__setitem__(index, int(digit))
else:
# If we are trying to set to an index which does not currently exist in _strvalue then extend it to the
# appropriate length
if (key + 1) > len(self):
new_str_value = [0] * (key + 1)
new_str_value[:len(self)] = self._strvalue
self._strvalue = new_str_value
self._strvalue[key] = value
for name, index in self._name_lookup.items():
if index == key:
object.__setattr__(self, name, value)
self._calculate_value_from_str_value()
def __setattr__(self, name, value):
# if the attribute is a special value, update the index value which will in turn update the attribute value
if (name != '_name_lookup' and name in self._name_lookup.keys()):
self[self._name_lookup[name]] = value
else:
object.__setattr__(self, name, value)
def __str__(self):
return str(self._value)
def __repr__(self):
return 'SplittableInt(%r, name_mapping_dict=%r)' % (self._value, self._name_lookup)
def __eq__(self, other):
result = NotImplemented
if isinstance(other, SplittableInt):
result = self._value == other._value
elif isinstance(other, int):
result = self._value == other
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def _compare(self, other, op):
result = NotImplemented
if isinstance(other, SplittableInt):
result = op(self._value, other._value)
elif isinstance(other, int):
result = op(self._value, other)
return result
def __lt__(self, other):
return self._compare(other, operator.lt)
def __le__(self, other):
return self._compare(other, operator.le)
def __gt__(self, other):
return self._compare(other, operator.gt)
def __ge__(self, other):
return self._compare(other, operator.ge)
class BitwiseInt(SplittableInt):
"""
A class to hold an integer, of fixed bit-length, which can easily get/set each bit individually.
.. note::
Uses a fixed number of bits.
Will raise an Error when attempting to access an out-of-range flag.
>>> a = BitwiseInt(511)
>>> a.flag1
1
>>> a.flag8
1
>>> a.flag128
1
>>> a.flag256
1
>>> a.flag512
AttributeError: 'BitwiseInt' object has no attribute 'flag512'
>>> a.flag512 = 1
AttributeError: Cannot set a flag that does not exist: flag512
"""
def __init__(self, value, num_bits=None):
""" """ # intentionally empty docstring as all covered in the class docstring.
SplittableInt.__init__(self, value)
self.flags = ()
#do we need to calculate the number of bits based on the given value?
self._num_bits = num_bits
if self._num_bits is None:
self._num_bits = 0
while((value >> self._num_bits) > 0):
self._num_bits += 1
else:
#make sure the number of bits is enough to store the given value.
if (value >> self._num_bits) > 0:
raise ValueError("Not enough bits to store value")
self._set_flags_from_value()
def _set_flags_from_value(self):
all_flags = []
# Set attributes "flag[n]" to 0 or 1
for i in range(self._num_bits):
flag_name = 1 << i
flag_value = ((self._value >> i) & 1)
object.__setattr__(self, 'flag%d' % flag_name, flag_value)
# Add to list off all flags
if flag_value:
all_flags.append(flag_name)
self.flags = tuple(all_flags)
def _set_value_from_flags(self):
self._value = 0
for i in range(self._num_bits):
bit_value = pow(2, i)
flag_name = "flag%i" % bit_value
flag_value = object.__getattribute__(self, flag_name)
self._value += flag_value * bit_value
def __iand__(self, value):
"""Perform an &= operation."""
self._value &= value
self._set_flags_from_value()
return self
def __ior__(self, value):
"""Perform an |= operation."""
self._value |= value
self._set_flags_from_value()
return self
def __iadd__(self, value):
"""Perform an inplace add operation"""
self._value += value
self._set_flags_from_value()
return self
def __setattr__(self, name, value):
# Allow setting of the attribute flags
# Are we setting a flag?
if name.startswith("flag") and name != "flags":
#true and false become 1 and 0
if not isinstance(value, bool):
raise TypeError("Can only set bits to True or False")
# Setting an existing flag?
if hasattr(self, name):
#which flag?
flag_value = int(name[4:])
#on or off?
if value:
self |= flag_value
else:
self &= ~flag_value
# Fail if an attempt has been made to set a flag that does not exist
else:
raise AttributeError("Cannot set a flag that does not exist: %s" % name)
# If we're not setting a flag, then continue as normal
else:
SplittableInt.__setattr__(self, name, value)
class PPDataProxy(object):
"""A reference to the data payload of a single PP field."""
__slots__ = ('path', 'offset', 'data_len', 'lbpack', 'mask')
def __init__(self, path, offset, data_len, lbpack, mask):
self.path = path
self.offset = offset
self.data_len = data_len
self.lbpack = lbpack
self.mask = mask
# NOTE:
# "__getstate__" and "__setstate__" functions are defined here to provide a custom interface for Pickle
# : Pickle "normal" behaviour is just to save/reinstate the object dictionary
# : that won't work here, because the use of __slots__ means **there is no object dictionary**
def __getstate__(self):
# object state capture method for Pickle.dump()
# - return the instance data values needed to reconstruct the PPDataProxy object
return dict([(k,getattr(self,k)) for k in PPDataProxy.__slots__])
def __setstate__(self, state):
# object reconstruction method for Pickle.load()
# reinitialise the object state from the serialised values (using setattr, as there is no object dictionary)
for (key, val) in state.items():
setattr(self, key, val)
def __repr__(self):
return '%s(%r, %r, %r, %r, %r)' % \
(self.__class__.__name__, self.path, self.offset,
self.data_len, self.lbpack, self.mask)
def load(self, data_shape, data_type, mdi, deferred_slice):
"""
Load the corresponding proxy data item and perform any deferred slicing.
Args:
* data_shape (tuple of int):
The data shape of the proxy data item.
* data_type (:class:`numpy.dtype`):
The data type of the proxy data item.
* mdi (float):
The missing data indicator value.
* deferred_slice (tuple):
The deferred slice to be applied to the proxy data item.
Returns:
:class:`numpy.ndarray`
"""
# Load the appropriate proxy data conveniently with a context manager.
with open(self.path, 'rb') as pp_file:
pp_file.seek(self.offset, os.SEEK_SET)
data_bytes = pp_file.read(self.data_len)
data = _read_data_bytes(data_bytes, self.lbpack, data_shape,
data_type, mdi, self.mask)
# Identify which index items in the deferred slice are tuples.
tuple_dims = [i for i, value in enumerate(deferred_slice) if isinstance(value, tuple)]
# Whenever a slice consists of more than one tuple index item, numpy does not slice the
# data array as we want it to. We therefore require to split the deferred slice into
# multiple slices and consistently slice the data with one slice per tuple.
if len(tuple_dims) > 1:
# Identify which index items in the deferred slice are single scalar values.
# Such dimensions will collapse in the sliced data shape.
collapsed_dims = [i for i, value in enumerate(deferred_slice) if isinstance(value, int)]
# Equate the first slice to be the original deferred slice.
tuple_slice = list(deferred_slice)
# Replace all tuple index items in the slice, except for the first,
# to be full slices over their dimension.
for dim in tuple_dims[1:]:
tuple_slice[dim] = slice(None)
# Perform the deferred slice containing only the first tuple index item.
payload = data[tuple_slice]
# Re-slice the data consistently with the next single tuple index item.
for dim in tuple_dims[1:]:
# Identify all those pre-sliced collapsed dimensions less than
# the dimension of the current slice tuple index item.
ndims_collapsed = len(filter(lambda x: x < dim, collapsed_dims))
# Construct the single tuple slice.
tuple_slice = [slice(None)] * payload.ndim
tuple_slice[dim - ndims_collapsed] = deferred_slice[dim]
# Slice the data with this single tuple slice.
payload = payload[tuple_slice]
else:
# The deferred slice contains no more than one tuple index item, so
# it's safe to slice the data directly.
payload = data[deferred_slice]
return payload
def __eq__(self, other):
result = NotImplemented
if isinstance(other, PPDataProxy):
result = True
for attr in self.__slots__:
if getattr(self, attr) != getattr(other, attr):
result = False
break
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
def _read_data_bytes(data_bytes, lbpack, data_shape, data_type, mdi,
mask=None):
"""
Convert the already read binary data payload into a numpy array, unpacking
and decompressing as per the F3 specification.
"""
if lbpack.n1 in (0, 2):
data = np.frombuffer(data_bytes, dtype=data_type)
elif lbpack.n1 == 1:
data = pp_packing.wgdos_unpack(data_bytes, data_shape[0],
data_shape[1], mdi)
elif lbpack.n1 == 4:
data = pp_packing.rle_decode(data_bytes, data_shape[0], data_shape[1], mdi)
else:
raise iris.exceptions.NotYetImplementedError(
'PP fields with LBPACK of %s are not yet supported.' % lbpack)
# Ensure we have write permission on the data buffer.
data.setflags(write=True)
# Ensure the data is in the native byte order
if not data.dtype.isnative:
data.byteswap(True)
data.dtype = data.dtype.newbyteorder('=')
if hasattr(lbpack, 'boundary_packing'):
# Convert a long string of numbers into a "lateral boundary
# condition" array, which is split into 4 quartiles, North
# East, South, West and where North and South contain the corners.
boundary_packing = lbpack.boundary_packing
compressed_data = data
data = np.ma.masked_all(data_shape)
boundary_height = boundary_packing.y_halo + boundary_packing.rim_width
boundary_width = boundary_packing.x_halo + boundary_packing.rim_width
y_height, x_width = data_shape
# The height of the east and west components.
mid_height = y_height - 2 * boundary_height
n_s_shape = boundary_height, x_width
e_w_shape = mid_height, boundary_width
# Keep track of our current position in the array.
current_posn = 0
north = compressed_data[:boundary_height*x_width]
current_posn += len(north)
data[-boundary_height:, :] = north.reshape(*n_s_shape)
east = compressed_data[current_posn:
current_posn + boundary_width * mid_height]
current_posn += len(east)
data[boundary_height:-boundary_height,
-boundary_width:] = east.reshape(*e_w_shape)
south = compressed_data[current_posn:
current_posn + boundary_height * x_width]
current_posn += len(south)
data[:boundary_height, :] = south.reshape(*n_s_shape)
west = compressed_data[current_posn:
current_posn + boundary_width * mid_height]
current_posn += len(west)
data[boundary_height:-boundary_height,
:boundary_width] = west.reshape(*e_w_shape)
elif lbpack.n2 == 2:
if mask is None:
raise ValueError('No mask was found to unpack the data. '
'Could not load.')
land_mask = mask.data.astype(np.bool)
sea_mask = ~land_mask
new_data = np.ma.masked_all(land_mask.shape)
if lbpack.n3 == 1:
# Land mask packed data.
new_data.mask = sea_mask
# Sometimes the data comes in longer than it should be (i.e. it
# looks like the compressed data is compressed, but the trailing
# data hasn't been clipped off!).
new_data[land_mask] = data[:land_mask.sum()]
elif lbpack.n3 == 2:
# Sea mask packed data.
new_data.mask = land_mask
new_data[sea_mask] = data[:sea_mask.sum()]
else:
raise ValueError('Unsupported mask compression.')
data = new_data
else:
# Reform in row-column order
data.shape = data_shape
# Mask the array?
if mdi in data:
data = ma.masked_values(data, mdi, copy=False)
return data
# The special headers of the PPField classes which get some improved functionality
_SPECIAL_HEADERS = ('lbtim', 'lbcode', 'lbpack', 'lbproc',
'data', 'data_manager', 'stash', 't1', 't2')
def _header_defn(release_number):
"""
Returns the zero-indexed header definition for a particular release of a PPField.
"""
um_header = UM_HEADERS[release_number]
offset = UM_TO_PP_HEADER_OFFSET
return [(name, tuple(position - offset for position in positions)) for name, positions in um_header]
def _pp_attribute_names(header_defn):
"""
Returns the allowed attributes of a PPField:
all of the normal headers (i.e. not the _SPECIAL_HEADERS),
the _SPECIAL_HEADERS with '_' prefixed,
the possible extra data headers.
"""
normal_headers = list(name for name, positions in header_defn if name not in _SPECIAL_HEADERS)
special_headers = list('_' + name for name in _SPECIAL_HEADERS)
extra_data = EXTRA_DATA.values()
return normal_headers + special_headers + extra_data
class PPField(object):
"""
A generic class for PP fields - not specific to a particular header release number.
A PPField instance can easily access the PP header "words" as attributes with some added useful capabilities::
for field in iris.fileformats.pp.load(filename):
print field.lbyr
print field.lbuser
print field.lbuser[0]
print field.lbtim
print field.lbtim.ia
print field.t1
"""
# NB. Subclasses must define the attribute HEADER_DEFN to be their
# zero-based header definition. See PPField2 and PPField3 for examples.
__metaclass__ = abc.ABCMeta
__slots__ = ()
def __init__(self):
"""
PPField instances are always created empty, and attributes are added subsequently.
.. seealso::
For PP field loading see :func:`load`.
"""
@abc.abstractproperty
def t1(self):
pass
@abc.abstractproperty
def t2(self):
pass
def __repr__(self):
"""Return a string representation of the PP field."""
# Define an ordering on the basic header names
attribute_priority_lookup = {name: loc[0] for name, loc in self.HEADER_DEFN}
# With the attributes sorted the order will remain stable if extra attributes are added.
public_attribute_names = attribute_priority_lookup.keys() + EXTRA_DATA.values()
self_attrs = [(name, getattr(self, name, None)) for name in public_attribute_names]
self_attrs = filter(lambda pair: pair[1] is not None, self_attrs)
if hasattr(self, '_data_manager'):
if self._data_manager is None:
self_attrs.append( ('data', self.data) )
else:
self_attrs.append( ('unloaded_data_manager', self._data_manager) )
self_attrs.append( ('unloaded_data_proxy', self._data) )
# sort the attributes by position in the pp header followed, then by alphabetical order.
attributes = sorted(self_attrs, key=lambda pair: (attribute_priority_lookup.get(pair[0], 999), pair[0]) )
return 'PP Field' + ''.join(['\n %s: %s' % (k, v) for k, v in attributes]) + '\n'
@property
def stash(self):
"""A stash property giving access to the associated STASH object, now supporting __eq__"""
if (not hasattr(self, '_stash') or
self.lbuser[6] != self._stash.lbuser6() or
self.lbuser[3] != self._stash.lbuser3()):
self._stash = STASH(self.lbuser[6], self.lbuser[3] / 1000, self.lbuser[3] % 1000)
return self._stash
@stash.setter
def stash(self, stash):
if isinstance(stash, basestring):
self._stash = STASH.from_msi(stash)
elif isinstance(stash, STASH):
self._stash = stash
else:
raise ValueError('Cannot set stash to {!r}'.format(stash))
# Keep the lbuser up to date.
self.lbuser = list(self.lbuser)
self.lbuser[6] = self._stash.lbuser6()
self.lbuser[3] = self._stash.lbuser3()
# lbtim
def _lbtim_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
# add the ia/ib/ic values for lbtim
new_value = SplittableInt(new_value, {'ia':slice(2, None), 'ib':1, 'ic':0})
self._lbtim = new_value
lbtim = property(lambda self: self._lbtim, _lbtim_setter)
# lbcode
def _lbcode_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
# add the ix/iy values for lbcode
new_value = SplittableInt(new_value, {'iy':slice(0, 2), 'ix':slice(2, 4)})
self._lbcode = new_value
lbcode = property(lambda self: self._lbcode, _lbcode_setter)
# lbpack
def _lbpack_setter(self, new_value):
if not isinstance(new_value, SplittableInt):
# add the n1/n2/n3/n4/n5 values for lbpack
name_mapping = dict(n5=slice(4, None), n4=3, n3=2, n2=1, n1=0)
new_value = SplittableInt(new_value, name_mapping)
self._lbpack = new_value
lbpack = property(lambda self: self._lbpack, _lbpack_setter)
# lbproc
def _lbproc_setter(self, new_value):
if not isinstance(new_value, BitwiseInt):
new_value = BitwiseInt(new_value, num_bits=18)
self._lbproc = new_value
lbproc = property(lambda self: self._lbproc, _lbproc_setter)
@property
def data(self):
"""The :class:`numpy.ndarray` representing the multidimensional data of the pp file"""
# Cache the real data on first use
if self._data_manager is not None:
self._data = self._data_manager.load(self._data)
self._data_manager = None
return self._data
@data.setter
def data(self, value):
self._data = value
self._data_manager = None
@property
def calendar(self):
"""Return the calendar of the field."""
# TODO #577 What calendar to return when ibtim.ic in [0, 3]
return iris.unit.CALENDAR_GREGORIAN if self.lbtim.ic != 2 else iris.unit.CALENDAR_360_DAY
def _read_extra_data(self, pp_file, file_reader, extra_len):
"""Read the extra data section and update the self appropriately."""
# While there is still extra data to decode run this loop
while extra_len > 0:
extra_int_code = struct.unpack_from('>L', file_reader(PP_WORD_DEPTH))[0]
extra_len -= PP_WORD_DEPTH
ib = extra_int_code % 1000
ia = extra_int_code // 1000
data_len = ia * PP_WORD_DEPTH
if ib == 10:
self.field_title = ''.join(struct.unpack_from('>%dc' % data_len, file_reader(data_len))).rstrip('\00')
elif ib == 11:
self.domain_title = ''.join(struct.unpack_from('>%dc' % data_len, file_reader(data_len))).rstrip('\00')
elif ib in EXTRA_DATA:
attr_name = EXTRA_DATA[ib]
values = np.fromfile(pp_file, dtype=np.dtype('>f%d' % PP_WORD_DEPTH), count=ia)
# Ensure the values are in the native byte order
if not values.dtype.isnative:
values.byteswap(True)
values.dtype = values.dtype.newbyteorder('=')
setattr(self, attr_name, values)
else:
raise ValueError('Unknown IB value for extra data: %s' % ib)
extra_len -= data_len
@property
def x_bounds(self):
if hasattr(self, "x_lower_bound") and hasattr(self, "x_upper_bound"):
return np.column_stack((self.x_lower_bound, self.x_upper_bound))
@property
def y_bounds(self):
if hasattr(self, "y_lower_bound") and hasattr(self, "y_upper_bound"):
return np.column_stack((self.y_lower_bound, self.y_upper_bound))
def save(self, file_handle):
"""
Save the PPField to the given file object (typically created with :func:`open`).
::
# to append the field to a file
a_pp_field.save(open(filename, 'ab'))
# to overwrite/create a file
a_pp_field.save(open(filename, 'wb'))
.. note::
The fields which are automatically calculated are: 'lbext',
'lblrec' and 'lbuser[0]'. Some fields are not currently
populated, these are: 'lbegin', 'lbnrec', 'lbuser[1]'.
"""
# Before we can actually write to file, we need to calculate the header elements.
# First things first, make sure the data is big-endian
data = self.data
if isinstance(data, ma.core.MaskedArray):
data = data.filled(fill_value=self.bmdi)
if data.dtype.newbyteorder('>') != data.dtype:
# take a copy of the data when byteswapping
data = data.byteswap(False)
data.dtype = data.dtype.newbyteorder('>')
# Create the arrays which will hold the header information
lb = np.empty(shape=NUM_LONG_HEADERS, dtype=np.dtype(">u%d" % PP_WORD_DEPTH))
b = np.empty(shape=NUM_FLOAT_HEADERS, dtype=np.dtype(">f%d" % PP_WORD_DEPTH))
# Populate the arrays from the PPField
for name, pos in self.HEADER_DEFN:
try:
header_elem = getattr(self, name)
except AttributeError:
raise AttributeError("PPField.save() could not find %s" % name)
if pos[0] <= NUM_LONG_HEADERS - UM_TO_PP_HEADER_OFFSET:
index = slice(pos[0], pos[-1] + 1)
if isinstance(header_elem, SplittableInt):
header_elem = int(header_elem)
lb[index] = header_elem
else:
index = slice(pos[0] - NUM_LONG_HEADERS, pos[-1] - NUM_LONG_HEADERS + 1)
b[index] = header_elem
# Although all of the elements are now populated, we still need to update some of the elements in case
# things have changed (for example, the data length etc.)
# Set up a variable to represent the datalength of this PPField in WORDS.
len_of_data_payload = 0
# set up a list to hold the extra data which will need to be encoded at the end of the data
extra_items = []
# iterate through all of the possible extra data fields
for ib, extra_data_attr_name in EXTRA_DATA.iteritems():
# try to get the extra data field, returning None if it doesn't exist
extra_elem = getattr(self, extra_data_attr_name, None)
if extra_elem is not None:
# The special case of character extra data must be caught
if isinstance(extra_elem, basestring):
ia = len(extra_elem)
# pad any strings up to a multiple of PP_WORD_DEPTH (this length is # of bytes)
ia = (PP_WORD_DEPTH - (ia-1) % PP_WORD_DEPTH) + (ia-1)
extra_elem = extra_elem.ljust(ia, '\00')
# ia is now the datalength in WORDS of the string
ia /= PP_WORD_DEPTH
else:
# ia is the datalength in WORDS
ia = np.product(extra_elem.shape)
# flip the byteorder if the data is not big-endian
if extra_elem.dtype.newbyteorder('>') != extra_elem.dtype:
# take a copy of the extra data when byte swapping
extra_elem = extra_elem.byteswap(False)
extra_elem.dtype = extra_elem.dtype.newbyteorder('>')
# add the number of bytes to the len_of_data_payload variable + the extra integer which will encode ia/ib
len_of_data_payload += PP_WORD_DEPTH * ia + PP_WORD_DEPTH
integer_code = 1000 * ia + ib
extra_items.append( [integer_code, extra_elem] )
if ia >= 1000:
raise IOError('PP files cannot write extra data with more than '
'1000 elements. Tried to write "%s" which has %s '
'elements.' % (extra_data_attr_name, ib)
)
HEADER_DICT = dict(self.HEADER_DEFN)
# populate lbext in WORDS
lb[HEADER_DICT['lbext'][0]] = len_of_data_payload / PP_WORD_DEPTH
# Put the data length of pp.data into len_of_data_payload (in BYTES)
len_of_data_payload += data.size * PP_WORD_DEPTH
# populate lbrec in WORDS
lb[HEADER_DICT['lblrec'][0]] = len_of_data_payload / PP_WORD_DEPTH
# populate lbuser[0] to have the data's datatype
if data.dtype == np.dtype('>f4'):
lb[HEADER_DICT['lbuser'][0]] = 1
elif data.dtype == np.dtype('>f8'):
warnings.warn("Downcasting array precision from float64 to float32 for save."
"If float64 precision is required then please save in a different format")
data = data.astype('>f4')
lb[HEADER_DICT['lbuser'][0]] = 1
elif data.dtype == np.dtype('>i4'):
# NB: there is no physical difference between lbuser[0] of 2 or 3 so we encode just 2
lb[HEADER_DICT['lbuser'][0]] = 2
else:
raise IOError('Unable to write data array to a PP file. The datatype was %s.' % data.dtype)
# NB: lbegin, lbnrec, lbuser[1] not set up
# Now that we have done the manouvering required, write to the file...
if not isinstance(file_handle, file):
raise TypeError('The file_handle argument must be an instance of a Python file object, but got %r. \n'
'e.g. open(filename, "wb") to open a binary file with write permission.' % type(file_handle))
pp_file = file_handle
# header length
pp_file.write(struct.pack(">L", PP_HEADER_DEPTH))
# 49 integers
lb.tofile(pp_file)
# 16 floats
b.tofile(pp_file)
#Header length (again)
pp_file.write(struct.pack(">L", PP_HEADER_DEPTH))
# Data length (including extra data length)
pp_file.write(struct.pack(">L", int(len_of_data_payload)))
# the data itself
if lb[HEADER_DICT['lbpack'][0]] == 0:
data.tofile(pp_file)
else:
raise NotImplementedError('Writing packed pp data with lbpack of %s '
'is not supported.' % lb[HEADER_DICT['lbpack'][0]])
# extra data elements
for int_code, extra_data in extra_items:
pp_file.write(struct.pack(">L", int(int_code)))
if isinstance(extra_data, basestring):
pp_file.write(struct.pack(">%sc" % len(extra_data), *extra_data))
else:
extra_data = extra_data.astype(np.dtype('>f4'))
extra_data.tofile(pp_file)
# Data length (again)
pp_file.write(struct.pack(">L", int(len_of_data_payload)))
##############################################################
#
# From here on define helper methods for PP -> Cube conversion.
#
def regular_points(self, xy):
"""Return regular points from the PPField, or fail if not regular.
Args:
* xy - a string, "x" or "y" to specify the dimension for which to return points.
.. deprecated:: 1.5
"""
msg = "The 'regular_points' method is deprecated."
warnings.warn(msg, UserWarning, stacklevel=2)
if xy.lower() == "x":
bz = self.bzx
bd = self.bdx
count = self.lbnpt
elif xy.lower() == "y":
bz = self.bzy
bd = self.bdy
count = self.lbrow
else:
raise ValueError("'x' or 'y' not supplied")
return (bz + bd) + bd * np.arange(count, dtype=np.float32)
def regular_bounds(self, xy):
"""Return regular bounds from the PPField, or fail if not regular.
Args:
* xy - a string, "x" or "y" to specify the dimension for which to return points.
.. deprecated:: 1.5
"""
msg = "The 'regular_bounds' method is deprecated."
warnings.warn(msg, UserWarning, stacklevel=2)
if xy.lower() == "x":
delta = 0.5 * self.bdx
elif xy.lower() == "y":
delta = 0.5 * self.bdy
else:
raise ValueError("'x' or 'y' not supplied")
points = self.regular_points(xy)
return np.concatenate([[points - delta], [points + delta]]).T
def time_unit(self, time_unit, epoch='epoch'):
return iris.unit.Unit('%s since %s' % (time_unit, epoch), calendar=self.calendar)
def coord_system(self):
"""Return a CoordSystem for this PPField.
Returns:
Currently, a :class:`~iris.coord_systems.GeogCS` or :class:`~iris.coord_systems.RotatedGeogCS`.
"""
geog_cs = iris.coord_systems.GeogCS(EARTH_RADIUS)
if self.bplat != 90.0 or self.bplon != 0.0:
geog_cs = iris.coord_systems.RotatedGeogCS(self.bplat, self.bplon, ellipsoid=geog_cs)
return geog_cs
def _x_coord_name(self):
# TODO: Remove once we have the ability to derive this in the rules.
x_name = "longitude"
if isinstance(self.coord_system(), iris.coord_systems.RotatedGeogCS):
x_name = "grid_longitude"
return x_name
def _y_coord_name(self):
# TODO: Remove once we have the ability to derive this in the rules.
y_name = "latitude"
if isinstance(self.coord_system(), iris.coord_systems.RotatedGeogCS):
y_name = "grid_latitude"
return y_name
def copy(self):
"""
Returns a deep copy of this PPField.
Returns:
A copy instance of the :class:`PPField`.
"""
return self._deepcopy({})
def __deepcopy__(self, memo):
return self._deepcopy(memo)
def _deepcopy(self, memo):
field = self.__class__()
for attr in self.__slots__:
if hasattr(self, attr):
value = getattr(self, attr)
# Cope with inability to deepcopy a 0-d NumPy array.
if attr == '_data' and value is not None and value.ndim == 0:
setattr(field, attr, np.array(deepcopy(value[()], memo)))
else:
setattr(field, attr, deepcopy(value, memo))
return field
def __eq__(self, other):
result = NotImplemented
if isinstance(other, PPField):
result = True
for attr in self.__slots__:
attrs = [hasattr(self, attr), hasattr(other, attr)]
if all(attrs):
if not np.all(getattr(self, attr) == getattr(other, attr)):
result = False
break
elif any(attrs):
result = False
break
return result
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
class PPField2(PPField):
"""
A class to hold a single field from a PP file, with a header release number of 2.
"""
HEADER_DEFN = _header_defn(2)
__slots__ = _pp_attribute_names(HEADER_DEFN)
def _get_t1(self):
if not hasattr(self, '_t1'):
self._t1 = netcdftime.datetime(self.lbyr, self.lbmon, self.lbdat, self.lbhr, self.lbmin)
return self._t1
def _set_t1(self, dt):
self.lbyr = dt.year
self.lbmon = dt.month
self.lbdat = dt.day
self.lbhr = dt.hour
self.lbmin = dt.minute
self.lbday = int(dt.strftime('%j'))
if hasattr(self, '_t1'):
delattr(self, '_t1')
t1 = property(_get_t1, _set_t1, None,
"A netcdftime.datetime object consisting of the lbyr, lbmon, lbdat, lbhr, and lbmin attributes.")
def _get_t2(self):
if not hasattr(self, '_t2'):
self._t2 = netcdftime.datetime(self.lbyrd, self.lbmond, self.lbdatd, self.lbhrd, self.lbmind)
return self._t2
def _set_t2(self, dt):
self.lbyrd = dt.year
self.lbmond = dt.month
self.lbdatd = dt.day
self.lbhrd = dt.hour
self.lbmind = dt.minute
self.lbdayd = int(dt.strftime('%j'))
if hasattr(self, '_t2'):
delattr(self, '_t2')
t2 = property(_get_t2, _set_t2, None,
"A netcdftime.datetime object consisting of the lbyrd, lbmond, lbdatd, lbhrd, and lbmind attributes.")
class PPField3(PPField):
"""
A class to hold a single field from a PP file, with a header release number of 3.
"""
HEADER_DEFN = _header_defn(3)
__slots__ = _pp_attribute_names(HEADER_DEFN)
def _get_t1(self):
if not hasattr(self, '_t1'):
self._t1 = netcdftime.datetime(self.lbyr, self.lbmon, self.lbdat, self.lbhr, self.lbmin, self.lbsec)
return self._t1
def _set_t1(self, dt):
self.lbyr = dt.year
self.lbmon = dt.month
self.lbdat = dt.day
self.lbhr = dt.hour
self.lbmin = dt.minute
self.lbsec = dt.second
if hasattr(self, '_t1'):
delattr(self, '_t1')
t1 = property(_get_t1, _set_t1, None,
"A netcdftime.datetime object consisting of the lbyr, lbmon, lbdat, lbhr, lbmin, and lbsec attributes.")
def _get_t2(self):
if not hasattr(self, '_t2'):
self._t2 = netcdftime.datetime(self.lbyrd, self.lbmond, self.lbdatd, self.lbhrd, self.lbmind, self.lbsecd)
return self._t2
def _set_t2(self, dt):
self.lbyrd = dt.year
self.lbmond = dt.month
self.lbdatd = dt.day
self.lbhrd = dt.hour
self.lbmind = dt.minute
self.lbsecd = dt.second
if hasattr(self, '_t2'):
delattr(self, '_t2')
t2 = property(_get_t2, _set_t2, None,
"A netcdftime.datetime object consisting of the lbyrd, lbmond, lbdatd, lbhrd, lbmind, and lbsecd attributes.")
PP_CLASSES = {
2: PPField2,
3: PPField3
}
def make_pp_field(header_values):
# Choose a PP field class from the value of LBREL
lbrel = header_values[21]
if lbrel not in PP_CLASSES:
raise ValueError('Unsupported header release number: {}'.format(lbrel))
pp_field = PP_CLASSES[lbrel]()
for name, loc in pp_field.HEADER_DEFN:
if len(loc) == 1:
value = header_values[loc[0]]
else:
value = header_values[loc[0]:loc[-1]+1]
setattr(pp_field, name, value)
return pp_field
DeferredArrayBytes = collections.namedtuple('DeferredBytes',
'fname, position, n_bytes, dtype')
LoadedArrayBytes = collections.namedtuple('LoadedArrayBytes', 'bytes, dtype')
def load(filename, read_data=False):
"""
Return an iterator of PPFields given a filename.
Args:
* filename - string of the filename to load.
Kwargs:
* read_data - boolean
Flag whether or not the data should be read, if False an empty data manager
will be provided which can subsequently load the data on demand. Default False.
To iterate through all of the fields in a pp file::
for field in iris.fileformats.pp.load(filename):
print field
"""
return _interpret_fields(_field_gen(filename, read_data_bytes=read_data))
def _interpret_fields(fields):
"""
Turn the fields read with load and FF2PP._extract_field into useable
fields. One of the primary purposes of this function is to either convert
"deferred bytes" into "deferred arrays" or "loaded bytes" into actual
numpy arrays (via the _create_field_data) function.
"""
land_mask = None
landmask_compressed_fields = []
for field in fields:
# Store the first reference to a land mask, and use this as the
# definitive mask for future fields in this generator.
if land_mask is None and field.stash == 'm01s00i030':
land_mask = field
# Handle land compressed data payloads.
if field.lbpack.n2 == 2:
# If we don't have the land mask yet, we shouldn't yield the field.
if land_mask is None:
landmask_compressed_fields.append(field)
continue
# Land compressed fields don't have a lbrow and lbnpt.
field.lbrow, field.lbnpt = land_mask.lbrow, land_mask.lbnpt
data_shape = (field.lbrow, field.lbnpt)
_create_field_data(field, data_shape, land_mask)
yield field
if landmask_compressed_fields:
if land_mask is None:
warnings.warn('Landmask compressed fields existed without a '
'landmask to decompress with. The data will have '
'a shape of (0, 0) and will not read.')
mask_shape = (0, 0)
else:
mask_shape = (land_mask.lbrow, land_mask.lbnpt)
for field in landmask_compressed_fields:
field.lbrow, field.lbnpt = mask_shape
_create_field_data(field, (field.lbrow, field.lbnpt), land_mask)
yield field
def _create_field_data(field, data_shape, land_mask):
"""
Modifies a field's ``_data`` attribute either by:
* converting DeferredArrayBytes into a "deferred array".
* converting LoadedArrayBytes into an actual numpy array.
"""
if isinstance(field._data, LoadedArrayBytes):
loaded_bytes = field._data
field._data = _read_data_bytes(loaded_bytes.bytes, field.lbpack, data_shape,
loaded_bytes.dtype, field.bmdi, land_mask)
field._data_manager = None
else:
# Get hold of the DeferredArrayBytes instance.
deferred_bytes = field._data
# NB. This makes a 0-dimensional array
field._data = np.array(PPDataProxy(deferred_bytes.fname, deferred_bytes.position,
deferred_bytes.n_bytes, field.lbpack, land_mask))
field._data_manager = DataManager(data_shape, deferred_bytes.dtype, field.bmdi)
def _field_gen(filename, read_data_bytes):
pp_file = open(filename, 'rb')
# Get a reference to the seek method on the file
# (this is accessed 3* #number of headers so can provide a small performance boost)
pp_file_seek = pp_file.seek
pp_file_read = pp_file.read
# Keep reading until we reach the end of file
while True:
# Move past the leading header length word
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
# Get the LONG header entries
header_longs = np.fromfile(pp_file, dtype='>i%d' % PP_WORD_DEPTH, count=NUM_LONG_HEADERS)
# Nothing returned => EOF
if len(header_longs) == 0:
break
# Get the FLOAT header entries
header_floats = np.fromfile(pp_file, dtype='>f%d' % PP_WORD_DEPTH, count=NUM_FLOAT_HEADERS)
header = tuple(header_longs) + tuple(header_floats)
# Make a PPField of the appropriate sub-class (depends on header release number)
pp_field = make_pp_field(header)
# Skip the trailing 4-byte word containing the header length
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
# Read the word telling me how long the data + extra data is
# This value is # of bytes
len_of_data_plus_extra = struct.unpack_from('>L', pp_file_read(PP_WORD_DEPTH))[0]
if len_of_data_plus_extra != pp_field.lblrec * PP_WORD_DEPTH:
raise ValueError('LBLREC has a different value to the integer recorded after the '
'header in the file (%s and %s).' % (pp_field.lblrec * PP_WORD_DEPTH,
len_of_data_plus_extra))
# calculate the extra length in bytes
extra_len = pp_field.lbext * PP_WORD_DEPTH
# Derive size and datatype of payload
data_len = len_of_data_plus_extra - extra_len
dtype = LBUSER_DTYPE_LOOKUP.get(pp_field.lbuser[0],
LBUSER_DTYPE_LOOKUP['default'])
if read_data_bytes:
# Read the actual bytes. This can then be converted to a numpy array
# at a higher level.
pp_field._data = LoadedArrayBytes(pp_file.read(data_len), dtype)
else:
# Provide enough context to read the data bytes later on.
pp_field._data = DeferredArrayBytes(filename, pp_file.tell(), data_len, dtype)
# Seek over the actual data payload.
pp_file_seek(data_len, os.SEEK_CUR)
# Do we have any extra data to deal with?
if extra_len:
pp_field._read_extra_data(pp_file, pp_file_read, extra_len)
# Skip that last 4 byte record telling me the length of the field I have already read
pp_file_seek(PP_WORD_DEPTH, os.SEEK_CUR)
yield pp_field
pp_file.close()
def _ensure_load_rules_loaded():
"""Makes sure the standard conversion and verification rules are loaded."""
# Uses these module-level variables
global _load_rules, _cross_reference_rules
rules = iris.fileformats.rules
if _load_rules is None:
basepath = iris.config.CONFIG_PATH
_load_rules = rules.RulesContainer(os.path.join(basepath, 'pp_rules.txt'))
if _cross_reference_rules is None:
basepath = iris.config.CONFIG_PATH
_cross_reference_rules = rules.RulesContainer(os.path.join(basepath, 'pp_cross_reference_rules.txt'),
rule_type=rules.ObjectReturningRule)
def add_load_rules(filename):
"""
Registers a rules file for use during the PP load process.
Registered files are processed after the standard conversion rules, and in
the order they were registered.
.. deprecated:: 1.5
"""
msg = "The 'add_load_rules' function is deprecated."
warnings.warn(msg, UserWarning, stacklevel=2)
# Uses this module-level variable
global _load_rules
if _load_rules is None:
_load_rules = iris.fileformats.rules.RulesContainer(filename)
else:
_load_rules.import_rules(filename)
def reset_load_rules():
"""Resets the PP load process to use only the standard conversion rules."""
# Uses this module-level variable
global _load_rules
_load_rules = None
def _ensure_save_rules_loaded():
"""Makes sure the standard save rules are loaded."""
# Uses these module-level variables
global _save_rules
if _save_rules is None:
# Load the pp save rules
rules_filename = os.path.join(iris.config.CONFIG_PATH, 'pp_save_rules.txt')
_save_rules = iris.fileformats.rules.RulesContainer(rules_filename, iris.fileformats.rules.ProcedureRule)
def add_save_rules(filename):
"""
Registers a rules file for use during the PP save process.
Registered files are processed after the standard conversion rules, and in
the order they were registered.
"""
_ensure_save_rules_loaded()
_save_rules.import_rules(filename)
def reset_save_rules():
"""Resets the PP save process to use only the standard conversion rules."""
# Uses this module-level variable
global _save_rules
_save_rules = None
def load_cubes(filenames, callback=None):
"""
Loads cubes from a list of pp filenames.
Args:
* filenames - list of pp filenames to load
Kwargs:
* callback - a function which can be passed on to :func:`iris.io.run_callback`
.. note::
The resultant cubes may not be in the order that they are in the file (order
is not preserved when there is a field with orography references)
"""
return _load_cubes_variable_loader(filenames, callback, load)
def _load_cubes_variable_loader(filenames, callback, loading_function,
loading_function_kwargs=None):
pp_loader = iris.fileformats.rules.Loader(
loading_function, loading_function_kwargs or {},
iris.fileformats.pp_rules.convert, _load_rules)
return iris.fileformats.rules.load_cubes(filenames, callback, pp_loader)
def save(cube, target, append=False, field_coords=None):
"""
Use the PP saving rules (and any user rules) to save a cube to a PP file.
Args:
* cube - A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of cubes.
* target - A filename or open file handle.
Kwargs:
* append - Whether to start a new file afresh or add the cube(s) to the end of the file.
Only applicable when target is a filename, not a file handle.
Default is False.
* field_coords - list of 2 coords or coord names which are to be used for
reducing the given cube into 2d slices, which will ultimately
determine the x and y coordinates of the resulting fields.
If None, the final two dimensions are chosen for slicing.
See also :func:`iris.io.save`.
"""
# Open issues
# Could use rules in "sections" ... e.g. to process the extensive dimensions; ...?
# Could pre-process the cube to add extra convenient terms?
# e.g. x-coord, y-coord ... but what about multiple coordinates on the dimension?
# How to perform the slicing?
# Do we always slice in the last two dimensions?
# Not all source data will contain lat-lon slices.
# What do we do about dimensions with multiple coordinates?
# Deal with:
# LBLREC - Length of data record in words (incl. extra data)
# Done on save(*)
# LBUSER[0] - Data type
# Done on save(*)
# LBUSER[1] - Start address in DATA (?! or just set to "null"?)
# BLEV - Level - the value of the coordinate for LBVC
# *) With the current on-save way of handling LBLREC and LBUSER[0] we can't
# check if they've been set correctly without *actually* saving as a binary
# PP file. That also means you can't use the same reference.txt file for
# loaded vs saved fields (unless you re-load the saved field!).
# Set to (or leave as) "null":
# LBEGIN - Address of start of field in direct access dataset
# LBEXP - Experiment identification
# LBPROJ - Fields file projection number
# LBTYP - Fields file field type code
# LBLEV - Fields file level code / hybrid height model level
# Build confidence by having a PP object that records which header items
# have been set, and only saves if they've all been set?
# Watch out for extra-data.
# On the flip side, record which Cube metadata has been "used" and flag up
# unused?
_ensure_save_rules_loaded()
# pp file
if isinstance(target, basestring):
pp_file = open(target, "ab" if append else "wb")
elif hasattr(target, "write"):
if hasattr(target, "mode") and "b" not in target.mode:
raise ValueError("Target not binary")
pp_file = target
else:
raise ValueError("Can only save pp to filename or writable")
n_dims = len(cube.shape)
if n_dims < 2:
raise ValueError('Unable to save a cube of fewer than 2 dimensions.')
if field_coords is not None:
# cast the given coord/coord names into cube coords
field_coords = cube._as_list_of_coords(field_coords)
if len(field_coords) != 2:
raise ValueError('Got %s coordinates in field_coords, expecting exactly 2.' % len(field_coords))
else:
# default to the last two dimensions (if result of coords is an empty list, will
# raise an IndexError)
# NB watch out for the ordering of the dimensions
field_coords = (cube.coords(dimensions=n_dims-2)[0], cube.coords(dimensions=n_dims-1)[0])
# Save each named or latlon slice2D in the cube
for slice2D in cube.slices(field_coords):
# Start with a blank PPField
pp_field = PPField3()
# Set all items to 0 because we need lbuser, lbtim
# and some others to be present before running the rules.
for name, positions in pp_field.HEADER_DEFN:
# Establish whether field name is integer or real
default = 0 if positions[0] <= NUM_LONG_HEADERS - UM_TO_PP_HEADER_OFFSET else 0.0
# Establish whether field position is scalar or composite
if len(positions) > 1:
default = [default] * len(positions)
setattr(pp_field, name, default)
# Some defaults should not be 0
pp_field.lbrel = 3 # Header release 3.
pp_field.lbcode = 1 # Grid code.
pp_field.bmks = 1.0 # Some scaley thing.
pp_field.lbproc = 0
# Set the data
pp_field.data = slice2D.data
# Run the PP save rules on the slice2D, to fill the PPField,
# recording the rules that were used
rules_result = _save_rules.verify(slice2D, pp_field)
verify_rules_ran = rules_result.matching_rules
# Log the rules used
iris.fileformats.rules.log('PP_SAVE', target if isinstance(target, basestring) else target.name, verify_rules_ran)
# Write to file
pp_field.save(pp_file)
if isinstance(target, basestring):
pp_file.close()
| gpl-3.0 | -8,502,837,758,948,939,000 | 35.101796 | 124 | 0.571103 | false | 3.811656 | false | false | false |
xgvargas/fsaipe | setup.py | 1 | 1090 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import pkg_resources
import codecs
import fsaipe
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
with open("requirements.txt", "r") as f:
install_requires = [str(req) for req in pkg_resources.parse_requirements(f)]
setup(
name='saipe',
version=fsaipe.__version__,
license="Apache",
description='Flask SqlAlchemy In Place Editor',
long_description=long_description,
author='Gustavo vargas',
author_email='[email protected]',
url='https://github.com/xgvargas/saipe',
# py_modules = ['saipe'],
packages = ['fsaipe'],
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| apache-2.0 | 4,929,373,236,578,159,000 | 28.459459 | 80 | 0.644954 | false | 3.745704 | false | false | false |
electionleaflets/electionleaflets | electionleaflets/apps/leaflets/management/commands/leaflets_import_legacy.py | 1 | 3863 | # -*- coding: utf-8 -*-
import os
from django.core.management.base import BaseCommand
from django.conf import settings
from django.core.files import File
from legacy.models import legacyLeaflet, legacyParty
from leaflets.models import Leaflet, LeafletImage
from constituencies.models import Constituency
from uk_political_parties.models import Party
class Command(BaseCommand):
def clean_legacy_leaflet(self, legacy_leaflet, party=None, constituency=None):
data = legacy_leaflet.__dict__.copy()
del data['publisher_party_id']
del data['_publisher_party_cache']
del data['_state']
data['publisher_party'] = party
data['constituency'] = constituency
if data.get('live'):
data['status'] = 'live'
else:
data['status'] = 'removed'
del data['live']
return data
def clean_legacy_leaflet_image(self, legacy_image):
data = {}
key = "%s.jpg" % legacy_image.image_key
if getattr(settings, 'IMAGE_LOCAL_CACHE'):
image_path = os.path.join(
settings.IMAGE_LOCAL_CACHE,
key
)
if os.path.exists(image_path):
print "Exists"
f = open(image_path, 'r')
data['image'] = File(f)
else:
image_path = os.path.join(
settings.IMAGE_LOCAL_CACHE,
'large',
key
)
if os.path.exists(image_path):
print "Exists"
f = open(image_path, 'r')
data['image'] = File(f)
else:
print "Doesn't exist"
return data
def clean_constituency(self, con):
con_name = con.constituency.name
if con_name == "Ynys Mon":
con_name = "Ynys Môn"
if con_name == "Cotswold":
con_name = "The Cotswolds"
if con_name == "Taunton":
con_name = "Taunton Deane"
try:
con = Constituency.objects.get(name__iexact=con_name)
except Constituency.DoesNotExist:
con_name = ", ".join(con_name.split(' ', 1))
con = Constituency.objects.get(name=con_name)
return con
def handle(self, **options):
for legacy_leaflet in legacyLeaflet.objects.all():
if not legacy_leaflet.date_uploaded:
if legacy_leaflet.date_delivered:
legacy_leaflet.date_uploaded = legacy_leaflet.date_delivered
if legacy_leaflet.date_uploaded:
if not bool(legacy_leaflet.publisher_party_id):
party = None
else:
party = Party.objects.find_party_by_name(
legacy_leaflet.publisher_party.name)
cons = legacy_leaflet.legacyleafletconstituency_set.all()
con = None
if cons:
con = cons[0]
con = self.clean_constituency(con)
# import ipdb
# ipdb.set_trace()
new_leaflet, created = Leaflet.objects.update_or_create(
pk=legacy_leaflet.pk,
defaults=self.clean_legacy_leaflet(
legacy_leaflet,
party,
constituency=con
))
for legacy_image in legacy_leaflet.images.all():
new_image, created = LeafletImage.objects.update_or_create(
leaflet=new_leaflet,
legacy_image_key=legacy_image.image_key,
defaults=self.clean_legacy_leaflet_image(legacy_image))
| mit | -6,455,352,814,939,578,000 | 35.433962 | 83 | 0.511134 | false | 4.184182 | false | false | false |
leiferikb/bitpop | build/scripts/slave/recipe_modules/v8/chromium_config.py | 1 | 2205 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from slave.recipe_config import BadConf
from slave.recipe_config_types import Path
from slave import recipe_config
from RECIPE_MODULES.chromium import CONFIG_CTX
@CONFIG_CTX()
def v8(c):
targ_arch = c.gyp_env.GYP_DEFINES.get('target_arch')
if not targ_arch: # pragma: no cover
raise recipe_config.BadConf('v8 must have a valid target_arch.')
c.gyp_env.GYP_DEFINES['v8_target_arch'] = targ_arch
del c.gyp_env.GYP_DEFINES['component']
c.build_dir = Path('[CHECKOUT]', 'out')
c.compile_py.build_tool = 'make'
if c.HOST_PLATFORM == 'mac':
c.compile_py.build_tool = 'xcode'
elif c.HOST_PLATFORM == 'win':
c.compile_py.build_tool = 'vs'
c.build_dir = Path('[CHECKOUT]', 'build')
if c.BUILD_CONFIG == 'Debug':
c.gyp_env.GYP_DEFINES['v8_optimized_debug'] = 1
# Chromium adds '_x64' to the output folder, which is neither needed nor
# understood when compiling v8 standalone.
if c.HOST_PLATFORM == 'win' and c.TARGET_BITS == 64:
c.build_config_fs = c.BUILD_CONFIG
c.compile_py.pass_arch_flag = True
@CONFIG_CTX(includes=['v8'])
def interpreted_regexp(c):
c.gyp_env.GYP_DEFINES['v8_interpreted_regexp'] = 1
@CONFIG_CTX(includes=['v8'])
def no_i18n(c):
c.gyp_env.GYP_DEFINES['v8_enable_i18n_support'] = 0
@CONFIG_CTX(includes=['v8'])
def no_lsan(c):
c.gyp_env.GYP_DEFINES['lsan'] = 0
@CONFIG_CTX(includes=['v8'])
def no_snapshot(c):
c.gyp_env.GYP_DEFINES['v8_use_snapshot'] = 'false'
@CONFIG_CTX(includes=['v8'])
def novfp3(c):
c.gyp_env.GYP_DEFINES['v8_can_use_vfp3_instructions'] = 'false'
@CONFIG_CTX(includes=['v8'])
def no_optimized_debug(c):
if c.BUILD_CONFIG == 'Debug':
c.gyp_env.GYP_DEFINES['v8_optimized_debug'] = 0
@CONFIG_CTX(includes=['v8'])
def optimized_debug(c):
if c.BUILD_CONFIG == 'Debug':
c.gyp_env.GYP_DEFINES['v8_optimized_debug'] = 2
@CONFIG_CTX(includes=['v8'])
def verify_heap(c):
c.gyp_env.GYP_DEFINES['v8_enable_verify_heap'] = 1
@CONFIG_CTX(includes=['v8'])
def vtunejit(c):
c.gyp_env.GYP_DEFINES['v8_enable_vtunejit'] = 1
| gpl-3.0 | -5,664,300,842,167,193,000 | 26.222222 | 74 | 0.678005 | false | 2.794677 | true | false | false |
DrDos0016/z2 | museum_site/migrations/0009_auto_20160816_2215.py | 1 | 1270 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-16 22:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0008_article_page'),
]
operations = [
migrations.AddField(
model_name='file',
name='article_count',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='article',
name='content',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='article',
name='css',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='article',
name='date',
field=models.DateField(default='1970-01-01'),
),
migrations.AlterField(
model_name='file',
name='company',
field=models.CharField(blank=True, default='', max_length=80),
),
migrations.AlterField(
model_name='file',
name='genre',
field=models.CharField(blank=True, default='', max_length=80),
),
]
| mit | -7,044,724,493,369,413,000 | 27.222222 | 74 | 0.534646 | false | 4.45614 | false | false | false |
IshitaTakeshi/pyleargist | src/leargist/__init__.py | 1 | 4832 | import os
from ctypes import POINTER
from ctypes import pointer
from ctypes import Structure
from ctypes import c_float
from ctypes import c_int
from ctypes import c_void_p
import numpy as np
leargist_folder = os.path.abspath(__file__).rsplit(os.path.sep, 1)[0]
leargist_name = "_gist"
libleargist = np.ctypeslib.load_library(leargist_name, leargist_folder)
class GistBwImage(Structure):
'''Matches image_t declared in standalone_image.h'''
_fields_ = [
("width", c_int),
("height", c_int),
("stride", c_int), # stride needs to be computed separately
("data", POINTER(c_float))
]
class GistColorImage(Structure):
'''Matches color_image_t declared in standalone_image.h'''
_fields_ = [
("width", c_int), # stride = width
("height", c_int),
("c1", POINTER(c_float)), # R
("c2", POINTER(c_float)), # G
("c3", POINTER(c_float)), # B
]
# Setup argument & return types for color gist
libleargist.color_gist_scaletab.argtypes = (
POINTER(GistColorImage), c_int, c_int, POINTER(c_int))
libleargist.color_gist_scaletab.restype = c_void_p
# Setup argument & return types
libleargist.bw_gist_scaletab.argtypes = (
POINTER(GistBwImage), c_int, c_int, POINTER(c_int))
libleargist.bw_gist_scaletab.restype = c_void_p
def bw_gist(im, nblocks=4, orientations=(8, 8, 4)):
scales = len(orientations)
orientations = np.array(orientations, dtype=np.int32)
if im.shape[0] < 8 or im.shape[1] < 8:
raise ValueError(
"image size must at least be (8, 8), got %s" % im.size)
im = np.ascontiguousarray(im, dtype=np.float32)
gbwi = GistBwImage(
im.shape[1], # Width is the SECOND element of the shape tuple
im.shape[0],
im.shape[1],
im.ctypes.data_as(POINTER(c_float)))
# We don't need a *3 because it's black & white. Note the useless
# looking brackets here are HIGHLY NECESSARY!! difference between
# ending up with c_float * 320 (which we want) and c_float * 4 * 4 * 20
descriptors = c_float * (nblocks * nblocks * orientations.sum())
addr = libleargist.bw_gist_scaletab(
pointer(gbwi), nblocks, scales,
orientations.ctypes.data_as(POINTER(c_int)))
if addr == None:
# This can happen when the block we give it contains NaN, Inf, etc.
raise ValueError("Descriptor invalid")
return np.ctypeslib.as_array(descriptors.from_address(addr))
def color_gist_numpy(image, nblocks=4, orientations=(8, 8, 4)):
height, width = image.shape[:2]
if width < 8 or height < 8:
raise ValueError(
"image size should at least be (8, 8), got %r" % (width, height))
image = image.transpose(2, 0, 1)
image = np.ascontiguousarray(image, dtype=np.float32)
gci = GistColorImage(
width,
height,
image[0].ctypes.data_as(POINTER(c_float)),
image[1].ctypes.data_as(POINTER(c_float)),
image[2].ctypes.data_as(POINTER(c_float)))
scales = len(orientations)
orientations = np.array(orientations, dtype=np.int32)
addr = libleargist.color_gist_scaletab(
pointer(gci), nblocks, scales,
orientations.ctypes.data_as(POINTER(c_int)))
descriptors = c_float * (nblocks * nblocks * orientations.sum() * 3)
if addr == None:
# This can happen when the block we give it contains NaN, Inf, etc.
raise ValueError("Descriptor invalid")
return np.ctypeslib.as_array(descriptors.from_address(addr))
def color_gist(im, nblocks=4, orientations=(8, 8, 4)):
"""Compute the GIST descriptor of an RGB image"""
scales = len(orientations)
orientations = np.array(orientations, dtype=np.int32)
# check minimum image size
if im.size[0] < 8 or im.size[1] < 8:
raise ValueError(
"image size should at least be (8, 8), got %r" % (im.size,))
# ensure the image is encoded in RGB
im = im.convert(mode='RGB')
# build the lear_gist color image C datastructure
arr = np.fromstring(im.tobytes(), np.uint8)
arr.shape = list(im.size) + [3]
arr = arr.transpose(2, 0, 1)
arr = np.ascontiguousarray(arr, dtype=np.float32)
gci = GistColorImage(
im.size[0],
im.size[1],
arr[0].ctypes.data_as(POINTER(c_float)),
arr[1].ctypes.data_as(POINTER(c_float)),
arr[2].ctypes.data_as(POINTER(c_float)))
descriptors = c_float * (nblocks * nblocks * orientations.sum() * 3)
addr = libleargist.color_gist_scaletab(
pointer(gci), nblocks, scales,
orientations.ctypes.data_as(POINTER(c_int)))
if addr == None:
# This can happen when the block we give it contains NaN, Inf, etc.
raise ValueError("Descriptor invalid")
return np.ctypeslib.as_array(descriptors.from_address(addr))
| gpl-3.0 | -730,454,892,452,331,400 | 32.324138 | 77 | 0.639487 | false | 3.249496 | false | false | false |
Magicked/crits | crits/targets/forms.py | 6 | 3689 | from django import forms
from django.forms.utils import ErrorList
from crits.campaigns.campaign import Campaign
from crits.core import form_consts
from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form
from crits.core.handlers import get_item_names
from crits.vocabulary.relationships import RelationshipTypes
from crits.vocabulary.acls import Common, TargetACL
relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)]
class TargetInfoForm(forms.Form):
"""
Django form for adding/updating target information.
"""
error_css_class = 'error'
required_css_class = 'required'
firstname = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
lastname = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
division = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
department = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
email_address = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=True)
organization_id = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
title = forms.CharField(widget=forms.TextInput(attrs={'size': '50'}),
required=False)
note = forms.CharField(widget=forms.Textarea(attrs={'cols':'50', 'rows':'2'}),
required=False)
campaign = forms.ChoiceField(widget=forms.Select, required=False,
label=form_consts.Target.CAMPAIGN)
camp_conf = forms.ChoiceField(required=False,
label=form_consts.Target.CAMPAIGN_CONFIDENCE)
related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID)
related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE)
relationship_type = forms.ChoiceField(required=False,
label=form_consts.Common.RELATIONSHIP_TYPE,
widget=forms.Select(attrs={'id':'relationship_type'}))
def __init__(self, username, *args, **kwargs):
super(TargetInfoForm, self).__init__( *args, **kwargs)
if username.has_access_to(Common.CAMPAIGN_READ):
self.fields['campaign'].choices = [('', '')] + [
(c.name, c.name) for c in get_item_names(Campaign, True)]
self.fields['camp_conf'].choices = [('',''),
('low', 'low'),
('medium', 'medium'),
('high', 'high')]
self.fields['relationship_type'].choices = relationship_choices
self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO
add_bucketlist_to_form(self)
add_ticket_to_form(self)
def clean(self):
cleaned_data = super(TargetInfoForm, self).clean()
campaign = cleaned_data.get('campaign')
if campaign:
confidence = cleaned_data.get('camp_conf')
if not confidence or confidence == '':
self._errors.setdefault('camp_conf', ErrorList())
self._errors['camp_conf'].append(u'This field is required if campaign is specified.')
return cleaned_data
| mit | -4,767,154,713,998,922,000 | 48.547945 | 117 | 0.578477 | false | 4.329812 | false | false | false |
log2timeline/dfvfs | dfvfs/file_io/ewf_file_io.py | 2 | 2685 | # -*- coding: utf-8 -*-
"""The EWF image file-like object."""
import pyewf
from dfvfs.file_io import file_object_io
from dfvfs.lib import errors
from dfvfs.lib import ewf
from dfvfs.resolver import resolver
class EWFFile(file_object_io.FileObjectIO):
"""File input/output (IO) object using pyewf."""
def __init__(self, resolver_context, path_spec):
"""Initializes a file input/output (IO) object.
Args:
resolver_context (Context): resolver context.
path_spec (PathSpec): a path specification.
"""
super(EWFFile, self).__init__(resolver_context, path_spec)
self._file_objects = []
def _Close(self):
"""Closes the file-like object."""
# pylint: disable=protected-access
super(EWFFile, self)._Close()
self._file_objects = []
def _OpenFileObject(self, path_spec):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyewf.handle: a file-like object or None.
Raises:
PathSpecError: if the path specification is invalid.
"""
if not path_spec.HasParent():
raise errors.PathSpecError(
'Unsupported path specification without parent.')
parent_path_spec = path_spec.parent
parent_location = getattr(parent_path_spec, 'location', None)
if parent_location and parent_path_spec.IsSystemLevel():
segment_file_paths = pyewf.glob(parent_location)
ewf_handle = pyewf.handle()
ewf_handle.open(segment_file_paths)
else:
# Note that we cannot use pyewf's glob function since it does not
# handle the file system abstraction dfvfs provides.
file_system = resolver.Resolver.OpenFileSystem(
parent_path_spec, resolver_context=self._resolver_context)
segment_file_path_specs = ewf.EWFGlobPathSpec(file_system, path_spec)
if not segment_file_path_specs:
return None
for segment_file_path_spec in segment_file_path_specs:
file_object = resolver.Resolver.OpenFileObject(
segment_file_path_spec, resolver_context=self._resolver_context)
self._file_objects.append(file_object)
ewf_handle = pyewf.handle()
ewf_handle.open_file_objects(self._file_objects)
return ewf_handle
def get_size(self):
"""Retrieves the size of the file-like object.
Returns:
int: size of the RAW storage media image inside the EWF container.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError('Not opened.')
return self._file_object.get_media_size()
| apache-2.0 | 3,331,993,765,310,807,000 | 28.833333 | 76 | 0.674115 | false | 3.734353 | false | false | false |
HomeRad/TorCleaner | wc/webgui/PageTemplates/PathIterator.py | 1 | 1632 | # -*- coding: iso-8859-1 -*-
##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""
Path Iterator
A TALES Iterator with the ability to use first() and last() on
subpaths of elements.
"""
import TALES
from Expressions import restrictedTraverse, Undefs, getSecurityManager
class Iterator (TALES.Iterator):
def __bobo_traverse__ (self, REQUEST, name):
if name in ('first', 'last'):
path = REQUEST['TraversalRequestNameStack']
names = list(path)
names.reverse()
path[:] = [tuple(names)]
return getattr(self, name)
def same_part (self, name, ob1, ob2):
if name is None:
return ob1 == ob2
if isinstance(name, type('')):
name = name.split('/')
name = filter(None, name)
securityManager = getSecurityManager()
try:
ob1 = restrictedTraverse(ob1, name, securityManager)
ob2 = restrictedTraverse(ob2, name, securityManager)
except Undefs:
return 0
return ob1 == ob2
| gpl-2.0 | -6,210,051,324,867,083,000 | 33.723404 | 78 | 0.585172 | false | 4.387097 | false | false | false |
slesta/PyTcRRD | tc_graph.py | 1 | 4529 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import os
from tcvars import *
import datetime
from pyrrd.rrd import RRD, RRA, DS
from pyrrd.graph import DEF, CDEF, VDEF
from pyrrd.graph import LINE, AREA, GPRINT, PRINT, COMMENT
from pyrrd.graph import ColorAttributes, Graph
def Tcgraph():
exampleNum = 1
min = 60
hour = 60 * 60
day = 24 * 60 * 60
week = 7 * day
month = day * 30
quarter = month * 3
half = 365 * day / 2
year = 365 * day
now = int(time.time())
endTime = now
startTime = str(now - 600)
zacalo = datetime.datetime.today()
#### index pro web
filenam = wwwpath + 'index.html'
soubor = file(filenam, 'w')
soubor.write(wwwhead)
adr_list = os.listdir('rrd/')
for adr in adr_list:
filename = 'rrd/%s' % adr
print filename
graphfile = wwwpath + '%s.png' % adr.replace('.rrd', '')
graphfile_lg = 'graphs/%s.png' % adr.replace('.rrd', '')
hgraphfile_lg = wwwpath + '%sh.png' % adr.replace('.rrd', '')
dgraphfile_lg = 'graphs/%sd.png' % adr.replace('.rrd', '')
wgraphfile_lg = 'graphs/%sw.png' % adr.replace('.rrd', '')
mgraphfile_lg = 'graphs/%sm.png' % adr.replace('.rrd', '')
ygraphfile_lg = 'graphs/%sy.png' % adr.replace('.rrd', '')
now = int(time.time())
endTime = now
startTime = str(now - 600)
myRRD = RRD(filename)
def1 = DEF(rrdfile=myRRD.filename, vname='dsrate', dsName='rate')
def2 = DEF(rrdfile=myRRD.filename, vname='dsceil', dsName='ceil')
def3 = DEF(rrdfile=myRRD.filename, vname='dssent', dsName='sent')
cdef1 = CDEF(vname='sdsrate', rpn='%s,1,*,FLOOR' % def1.vname)
cdef2 = CDEF(vname='sdsceil', rpn='%s,1,*,FLOOR' % def2.vname)
cdef3 = CDEF(vname='sdssent', rpn='%s,8,*,FLOOR' % def3.vname)
area1 = LINE(defObj=cdef1, color='#468A41', legend='rate', width='2')
area2 = LINE(defObj=cdef2, color='#d91161', legend='ceil', width='2')
area3 = AREA(defObj=cdef3, color='#8399f770', legend='sent', width='1')
# area4 = LINE(defObj=cdef2, color='#468A41', legend='', width='1')
# vdef1 = VDEF(vname='rate', rpn='%s,TOTAL' % def1.vname)
# vdef2 = VDEF(vname='ceil', rpn='%s,TOTAL' % def2.vname)
vdef1 = VDEF(vname='rate', rpn='%s,MAXIMUM' % def1.vname)
vdef2 = VDEF(vname='ceil', rpn='%s,MAXIMUM' % def2.vname)
# vdef1 = VDEF(vname='RATE_last', rpn='%s,LAST' % def1.vname)
# vdef2 = VDEF(vname='RSSI_last', rpn='%s,LAST' % def2.vname)
# vdef3 = VDEF(vname='CHANN_last', rpn='%s,LAST' % def3.vname)
# vdef2 = VDEF(vname='myavgtx', rpn='%s,TOTAL' % def1.vname)
# gprint1 = GPRINT(vdef1, 'rate %lg%SMbps')
# gprint2 = GPRINT(vdef2, 'rssi %lg%SdBm')
# gprint3 = GPRINT(vdef3, 'kanal %lg%S')
gprint1 = GPRINT(vdef1, 'rate %lg %Sbits')
gprint2 = GPRINT(vdef2, 'ceil %lg %Sbits')
#gprint3 = GPRINT('2588888', 'ceil %lg %Sbits')
comment1 = COMMENT('textik')
ca = ColorAttributes()
ca.back = '#333333'
ca.canvas = '#333333'
ca.shadea = '#000000'
ca.shadeb = '#111111'
ca.mgrid = '#CCCCCC'
ca.axis = '#FFFFFF'
ca.frame = '#AAAAAA'
ca.font = '#FFFFFF'
ca.arrow = '#FFFFFF'
nadpis = adr + ' - ' + str(datetime.datetime.today())
graphwidth = 800
graphheight = 400
print hgraphfile_lg
gh = Graph(hgraphfile_lg, start=int(time.time()) - min*20, end=endTime, vertical_label='bits/s', color=ca)
gh.width = graphwidth
gh.height = graphheight
text = nadpis
text = text.replace(' ', '_')
gh.title = text
gh.data.extend([
def1, def2, def3,
cdef1, cdef2, cdef3,
area1, area2, area3, #area4,
# area6, area10, area7, area8, area9,
vdef1, gprint1, vdef2, gprint2, comment1,
])
gh.write()
if 'lan' in hgraphfile_lg:
soubor.write('<td><img src="' + str(hgraphfile_lg).replace(wwwpath, '') + '"></td><td><img src="' + str(
hgraphfile_lg).replace(wwwpath, '').replace('lan', 'wan') + '"></td></tr>')
soubor.write(wwwfooter)
soubor.close()
dobabehu = datetime.datetime.today() - zacalo
dobabehu = dobabehu.seconds
print 'Doba zpracování grafů: ' + str(dobabehu) + ' sec.'
Tcgraph() | mit | 4,879,050,164,547,318,000 | 31.804348 | 116 | 0.553469 | false | 2.857323 | false | false | false |
AaroC357/emr-sample-apps | similarity/convert_netflix.py | 3 | 3957 | # Copyright 2011-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#!/usr/bin/env python
# encoding: utf-8
"""
convert_netflix.py
place this file in the same directory as the Netflix 'training_data' folder
containing movie_titles.txt, then run:
$ python convert_netflix.py
To upload the resulting files to an S3 bucket, get s3cmd from:
http://s3tools.org/s3cmd
Then run:
$ s3cmd put --force netflix-data/ratings* s3://<yourbucket>/netflix-data/
convert_netflix.py reformats Netflix Prize data so that each line contains:
userid movieid rating
Output files are chunked for upload to S3 and placed
in a directory called 'netflix-data'. This takes about 20 minutes
on a 2 Ghz laptop and the resulting files are 428MB compressed.
Original format:
userid, rating, date
$ head mv_0000001.txt
1:
1488844,3,2005-09-06
822109,5,2005-05-13
885013,4,2005-10-19
30878,4,2005-12-26
823519,3,2004-05-03
893988,3,2005-11-17
124105,4,2004-08-05
convert_netflix.py converts these input files
to a set of files where each line contains: [userid movieid rating]
$ head user_movie_rating_1.txt
1488844 1 3
822109 1 5
885013 1 4
30878 1 4
823519 1 3
893988 1 3
124105 1 4
1248029 1 3
1842128 1 4
2238063 1 3
Created by Peter Skomoroch on 2009-03-09.
Copyright (c) 2009 Data Wrangling. All rights reserved.
"""
import sys
import os
import re
CHUNK_FILES = True
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
else:
pass
def main(args):
outfile = open('reformatted_movie_titles.txt', 'w')
movie_title_file = open('movie_titles.txt','r')
movie_title_exp=re.compile("([\w]+),([\w]+),(.*)")
movie_titles={}
for line in movie_title_file:
m = movie_title_exp.match(line.strip())
outfile.write('%s\t%s\n' % (m.group(1), m.group(3)))
outfile.close()
movie_title_file.close()
in_dir= args[1] #'training_set'
out_dir = args[2] #'netflix-data'
filenames = [in_dir +'/' + file for file in os.listdir(in_dir)]
rating_count = 0
L = 0
outfile_num = 0
mkdir(out_dir)
outfilename = out_dir+ '/' + 'ratings_'+ str(outfile_num) +'.txt'
output_file = open(outfilename, 'w')
for i, moviefile in enumerate(filenames):
# if i+1 in (10774, 175, 11064, 4472,
# 16265, 9628, 299, 16948, 9368, 8627, 10627): # for sample dataset
if i % 100 == 0: print "processing movie %s " % (i+1)
f = open(moviefile,'r')
for j, line in enumerate(f.readlines()):
if j == 0:
movieid = line.split(':')[0]
else:
(userid, rating, date) = line.split(',')
nextline = ' '.join([userid, movieid, rating+'\n'])
L += len(nextline) # when this is 65536, we start a new file
if L/1000 > 65536 and CHUNK_FILES:
output_file.close()
# os.system('gzip ' + outfilename)
outfile_num += 1
outfilename = out_dir+ '/' + \
'ratings_'+ str(outfile_num) +'.txt'
print "--- starting new file: %s" % outfilename
output_file = open(outfilename, 'w')
L = len(nextline)
output_file.write(nextline)
rating_count += 1
f.close()
output_file.close()
# os.system('gzip ' + outfilename)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | 8,507,274,734,254,621,000 | 28.75188 | 78 | 0.622189 | false | 3.243443 | false | false | false |
pywinauto/pywinauto | pywinauto/unittests/test_application_linux.py | 1 | 6365 | # TODO crossplatform join these tests with test_application.py
import sys
import os
import unittest
import subprocess
import time
sys.path.append(".")
from pywinauto.application import WindowSpecification # noqa: E402
if sys.platform.startswith('linux'):
from pywinauto.controls import atspiwrapper # register atspi backend
from pywinauto.linux.application import Application # noqa: E402
from pywinauto.linux.application import AppStartError # noqa: E402
from pywinauto.linux.application import AppNotConnected # noqa: E402
from pywinauto.linux.application import ProcessNotFoundError # noqa: E402
app_name = r"gtk_example.py"
def _test_app():
test_folder = os.path.join(os.path.dirname
(os.path.dirname
(os.path.dirname
(os.path.abspath(__file__)))),
r"apps/Gtk_samples")
sys.path.append(test_folder)
return os.path.join(test_folder, app_name)
sys.path.append(".")
if sys.platform.startswith('linux'):
class ApplicationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
self.subprocess_app = None
self.app = Application()
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
if self.subprocess_app:
self.subprocess_app.communicate()
def test__init__(self):
"""Verify that Application instance is initialized or not"""
self.assertRaises(ValueError, Application, backend='unregistered')
def test_not_connected(self):
"""Verify that it raises when the app is not connected"""
self.assertRaises(AppNotConnected, Application().__getattribute__, 'Hiya')
self.assertRaises(AppNotConnected, Application().__getitem__, 'Hiya')
self.assertRaises(AppNotConnected, Application().window_, name='Hiya')
self.assertRaises(AppNotConnected, Application().top_window, )
def test_start_problem(self):
"""Verify start_ raises on unknown command"""
self.assertRaises(AppStartError, Application().start, 'Hiya')
def test_start(self):
"""test start() works correctly"""
self.assertEqual(self.app.process, None)
self.app.start(_test_app())
self.assertNotEqual(self.app.process, None)
def test_connect_by_pid(self):
"""Create an application via subprocess then connect it to Application"""
self.subprocess_app = subprocess.Popen(_test_app().split(), stdout=subprocess.PIPE, shell=False)
time.sleep(1)
self.app.connect(pid=self.subprocess_app.pid)
self.assertEqual(self.app.process, self.subprocess_app.pid)
def test_connect_by_path(self):
"""Create an application via subprocess then connect it to Application by application name"""
self.subprocess_app = subprocess.Popen(_test_app().split(), stdout=subprocess.PIPE, shell=False)
time.sleep(1)
self.app.connect(path=_test_app())
self.assertEqual(self.app.process, self.subprocess_app.pid)
def test_cpu_usage(self):
self.app.start(_test_app())
self.assertGreater(self.app.cpu_usage(0.1), 0)
self.app.wait_cpu_usage_lower(threshold=0.1, timeout=4.0, usage_interval=0.3)
# default timings
self.assertEqual(self.app.cpu_usage(), 0)
# non-existing process
self.app.kill()
self.assertRaises(ProcessNotFoundError, self.app.cpu_usage, 7.8)
# not connected or not started app
self.assertRaises(AppNotConnected, Application().cpu_usage, 12.3)
def test_is_process_running(self):
self.app.start(_test_app())
time.sleep(1)
self.assertTrue(self.app.is_process_running())
self.app.kill()
self.assertFalse(self.app.is_process_running())
def test_kill_killed_app(self):
self.app.start(_test_app())
time.sleep(1)
self.app.kill()
self.assertTrue(self.app.kill())
def test_kill_connected_app(self):
self.subprocess_app = subprocess.Popen(_test_app().split(), stdout=subprocess.PIPE, shell=False)
time.sleep(1)
self.app.connect(pid=self.subprocess_app.pid)
self.app.kill()
# Unlock the subprocess explicity, otherwise
# it's presented in /proc as a zombie waiting for
# the parent process to pickup the return code
self.subprocess_app.communicate()
self.subprocess_app = None
self.assertFalse(self.app.is_process_running())
class WindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.WindowSpecification class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
self.app = Application()
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_app_binding(self):
self.app.start(_test_app())
self.assertEqual(self.app.NonExistingDialog.app, self.app)
self.assertEqual(self.app.Application.Panel.exists(), True)
self.assertEqual(self.app.Application.Panel.app, self.app)
self.assertIsInstance(self.app.Application.find(), atspiwrapper.AtspiWrapper)
wspec = WindowSpecification(dict(name=u"blah", app=self.app))
self.assertEqual(wspec.app, self.app)
def test_app_binding_after_app_restart(self):
self.app.start(_test_app())
old_pid = self.app.process
wspec = self.app.Application.Panel
self.app.kill()
self.assertEqual(wspec.app, self.app)
self.app.start(_test_app())
new_pid = self.app.process
self.assertNotEqual(old_pid, new_pid)
self.assertEqual(wspec.app, self.app)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 5,091,827,292,493,186,000 | 38.78125 | 108 | 0.612726 | false | 4.209656 | true | false | false |
robcarver17/pysystemtrade | sysexecution/orders/base_orders.py | 1 | 14482 | import numpy as np
from copy import copy
import datetime
from syscore.genutils import none_to_object, object_to_none, list_of_ints_with_highest_common_factor_positive_first
from syscore.objects import no_order_id, no_children, no_parent
from sysexecution.trade_qty import tradeQuantity
from sysobjects.production.tradeable_object import tradeableObject
class overFilledOrder(Exception):
pass
class orderType(object):
def __repr__(self):
return self.as_string()
def allowed_types(self):
return ["market"]
def __init__(self, type_string: str):
if type_string is None:
type_string = ""
else:
assert type_string in self.allowed_types(), "Type %s not valid" % type_string
self._type = type_string
def as_string(self):
return self._type
def __eq__(self, other):
return self.as_string() == other.as_string()
class Order(object):
"""
An order represents a desired or completed trade
This is a base class, specific orders are used for virtual and contract level orders
Need to be able to compare orders with each other to enforce the 'no multiple orders of same characteristics'
"""
def __init__(
self,
tradeable_object: tradeableObject,
trade: tradeQuantity,
fill: tradeQuantity=None,
filled_price: float=None,
fill_datetime: datetime.datetime=None,
locked=False,
order_id: int=no_order_id,
parent: int=no_parent,
children: list =no_children,
active:bool =True,
order_type: orderType = orderType("market"),
**order_info
):
"""
:param object_name: name for a tradeableObject, str
:param trade: trade we want to do, int or list
:param fill: fill done so far, int
:param fill_datetime: when fill done (if multiple, is last one)
:param fill_price: price of fill (if multiple, is last one)
:param locked: if locked an order can't be modified, bool
:param order_id: ID given to orders once in the stack, do not use when creating order
:param parent: int, order ID of parent order in upward stack
:param children: list of int, order IDs of child orders in downward stack
:param active: bool, inactive orders have been filled or cancelled
:param kwargs: other interesting arguments
"""
self._tradeable_object = tradeable_object
(
resolved_trade,
resolved_fill,
) = resolve_inputs_to_order(trade, fill)
if children == []:
children = no_children
self._trade = resolved_trade
self._fill = resolved_fill
self._filled_price = filled_price
self._fill_datetime = fill_datetime
self._locked = locked
self._order_id = order_id
self._parent = parent
self._children = children
self._active = active
self._order_type = order_type
self._order_info = order_info
def __repr__(self):
terse_repr = self.terse_repr()
return terse_repr
def full_repr(self):
terse_repr = self.terse_repr()
full_repr = terse_repr + " %s" % str(self._order_info)
return full_repr
def terse_repr(self):
if self._locked:
lock_str = " LOCKED"
else:
lock_str = ""
if not self._active:
active_str = " INACTIVE"
else:
active_str = ""
return "(Order ID:%s) Type %s for %s, qty %s, fill %s@ price, %s Parent:%s Children:%s%s%s" % (
str(self.order_id),
str(self._order_type),
str(self.key),
str(self.trade),
str(self.fill),
str(self.filled_price),
str(self.parent),
str(self.children),
lock_str,
active_str,
)
@property
def order_info(self):
return self._order_info
@property
def tradeable_object(self):
return self._tradeable_object
@property
def trade(self):
return self._trade
def as_single_trade_qty_or_error(self) -> int:
return self.trade.as_single_trade_qty_or_error()
def replace_required_trade_size_only_use_for_unsubmitted_trades(self, new_trade: tradeQuantity):
# ensure refactoring works
assert type(new_trade) is tradeQuantity
try:
assert len(new_trade)==len(self.trade)
except:
raise Exception("Trying to replace trade of length %d with one of length %d" % (len(self.trade), len(new_trade)))
new_order = copy(self)
new_order._trade = new_trade
return new_order
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, order_type: orderType):
self._order_type = order_type
@property
def fill(self):
return tradeQuantity(self._fill)
@property
def filled_price(self):
return self._filled_price
@property
def fill_datetime(self):
return self._fill_datetime
def fill_order(self, fill_qty: tradeQuantity,
filled_price: float,
fill_datetime: datetime.datetime=None):
# Fill qty is cumulative, eg this is the new amount filled
try:
assert self.trade.fill_less_than_or_equal_to_desired_trade(
fill_qty)
except:
raise overFilledOrder("Can't fill order with fill %s more than trade quantity %s "
% (str(fill_qty), str(self.trade)))
self._fill = fill_qty
self._filled_price = filled_price
if fill_datetime is None:
fill_datetime = datetime.datetime.now()
self._fill_datetime = fill_datetime
def fill_equals_zero(self) -> bool:
return self.fill.equals_zero()
def fill_equals_desired_trade(self) -> bool:
return self.fill == self.trade
def is_zero_trade(self) -> bool:
return self.trade.equals_zero()
@property
def order_id(self) -> int:
order_id = resolve_orderid(self._order_id)
return order_id
@order_id.setter
def order_id(self, order_id: int):
assert isinstance(order_id, int)
current_id = getattr(self, "_order_id", no_order_id)
if current_id is no_order_id:
self._order_id = order_id
else:
raise Exception("Can't change order id once set")
@property
def children(self) -> list:
return self._children
@children.setter
def children(self, children):
if isinstance(children, int):
children = [children]
if not self.no_children():
raise Exception(
"Can't add children to order which already has them: use add another child"
)
self._children = children
def remove_all_children(self):
self._children = no_children
def no_children(self):
return self.children is no_children
def add_a_list_of_children(self, list_of_new_children: list):
_ = [self.add_another_child(new_child) for new_child in list_of_new_children]
def add_another_child(self, new_child: int):
if self.no_children():
new_children = [new_child]
else:
new_children = self.children + [new_child]
self._children = new_children
@property
def remaining(self) -> tradeQuantity:
return self.trade - self.fill
def create_order_with_unfilled_qty(self):
new_order = copy(self)
new_trade = self.remaining
new_order._trade = new_trade
new_order._fill = new_trade.zero_version()
new_order._filled_price = None
new_order._fill_datetime = None
return new_order
def change_trade_size_proportionally_to_meet_abs_qty_limit(self, max_abs_qty:int):
# if this is a single leg trade, does a straight replacement
# otherwise
new_order = copy(self)
old_trade = new_order.trade
new_trade = old_trade.change_trade_size_proportionally_to_meet_abs_qty_limit(max_abs_qty)
new_order = new_order.replace_required_trade_size_only_use_for_unsubmitted_trades(new_trade)
return new_order
def reduce_trade_size_proportionally_so_smallest_leg_is_max_size(self, max_size: int):
new_order = copy(self)
old_trade = new_order.trade
new_trade = old_trade.reduce_trade_size_proportionally_so_smallest_leg_is_max_size(max_size)
new_order = new_order.replace_required_trade_size_only_use_for_unsubmitted_trades(new_trade)
return new_order
def trade_qty_with_lowest_abs_value_trade_from_order_list(self, list_of_orders: list) -> 'Order':
## only deals with single legs right now
new_order = self.single_leg_trade_qty_with_lowest_abs_value_trade_from_order_list(list_of_orders)
return new_order
def single_leg_trade_qty_with_lowest_abs_value_trade_from_order_list(self, list_of_orders: list) -> 'Order':
list_of_trade_qty = [order.trade for order in list_of_orders]
my_trade_qty = self.trade
new_trade = my_trade_qty.single_leg_trade_qty_with_lowest_abs_value_trade_from_list(list_of_trade_qty)
new_order= self.replace_required_trade_size_only_use_for_unsubmitted_trades(new_trade)
return new_order
def change_trade_qty_to_filled_qty(self):
self._trade = self._fill
@property
def parent(self):
parent = resolve_parent(self._parent)
return parent
@parent.setter
def parent(self, parent: int):
if self._parent == no_parent:
self._parent = int(parent)
else:
raise Exception("Can't add parent to order which already has them")
@property
def active(self):
return bool(self._active)
def deactivate(self):
# Once deactivated: filled or cancelled, we can never go back!
self._active = False
def zero_out(self):
zero_version_of_trades = self.trade.zero_version()
self._fill = zero_version_of_trades
self.deactivate()
def as_dict(self):
object_dict = dict(key=self.key)
object_dict["trade"] = list(self.trade)
object_dict["fill"] = list(self.fill)
object_dict["fill_datetime"] = self.fill_datetime
object_dict["filled_price"] = self.filled_price
object_dict["locked"] = self._locked
object_dict["order_id"] = object_to_none(self.order_id, no_order_id)
object_dict["parent"] = object_to_none(self.parent, no_parent)
object_dict["children"] = object_to_none(self.children, no_children)
object_dict["active"] = self.active
object_dict["order_type"] = self.order_type.as_string()
for info_key, info_value in self.order_info.items():
object_dict[info_key] = info_value
return object_dict
@classmethod
def from_dict(Order, order_as_dict):
# will need modifying in child classes
trade = order_as_dict.pop("trade")
object_name = order_as_dict.pop("key")
locked = order_as_dict.pop("locked")
fill = order_as_dict.pop("fill")
filled_price = order_as_dict.pop("filled_price")
fill_datetime = order_as_dict.pop("fill_datetime")
order_id = none_to_object(order_as_dict.pop("order_id"), no_order_id)
parent = none_to_object(order_as_dict.pop("parent"), no_parent)
children = none_to_object(order_as_dict.pop("children"), no_children)
active = order_as_dict.pop("active")
order_type = orderType(order_as_dict.pop("order_type", None))
order_info = order_as_dict
order = Order(
object_name,
trade,
fill=fill,
fill_datetime=fill_datetime,
filled_price=filled_price,
locked=locked,
order_id=order_id,
parent=parent,
children=children,
active=active,
order_type=order_type,
**order_info
)
return order
@property
def key(self):
return self.tradeable_object.key
def is_order_locked(self):
return bool(self._locked)
def lock_order(self):
self._locked = True
def unlock_order(self):
self._locked = False
def same_tradeable_object(self, other):
my_object = self.tradeable_object
other_object = other.tradeable_object
return my_object == other_object
def same_trade_size(self, other):
my_trade = self.trade
other_trade = other.trade
return my_trade == other_trade
def __eq__(self, other):
same_tradeable_object = self.same_tradeable_object(other)
same_trade = self.same_trade_size(other)
return same_tradeable_object and same_trade
def log_with_attributes(self, log):
"""
Returns a new log object with order attributes added
:param log: logger
:return: log
"""
return log
def resolve_inputs_to_order(trade, fill) -> (tradeQuantity, tradeQuantity):
resolved_trade = tradeQuantity(trade)
if fill is None:
resolved_fill = resolved_trade.zero_version()
else:
resolved_fill = tradeQuantity(fill)
return resolved_trade, resolved_fill
def resolve_orderid(order_id:int):
if order_id is no_order_id:
return no_order_id
if order_id is None:
return no_order_id
order_id= int(order_id)
return order_id
def resolve_parent(parent: int):
if parent is no_parent:
return no_parent
if parent is None:
return no_parent
parent= int(parent)
return parent
def resolve_multi_leg_price_to_single_price(trade_list: tradeQuantity, price_list: list) -> float:
if len(price_list)==0:
## This will be the case when an order is first created or has no fills
return None
if len(price_list)==1:
return price_list[0]
assert len(price_list) == len(trade_list)
trade_list_as_common_factor = list_of_ints_with_highest_common_factor_positive_first(trade_list)
fill_price = [x * y for x,y in zip(trade_list_as_common_factor, price_list)]
fill_price = sum(fill_price)
if np.isnan(fill_price):
return None
return fill_price | gpl-3.0 | -2,154,468,956,604,465,000 | 29.684322 | 125 | 0.609101 | false | 3.759605 | false | false | false |
lzkill/tor-info | tor-desc.py | 1 | 1497 | #!/usr/bin/python
#------------------------------------------
#
# A script to periodically download a
# Tor relay's descriptor and write it to
# the disk
#
# Author : Luiz Kill
# Date : 21/11/2014
#
# http://lzkill.com
#
#------------------------------------------
import os
import sys
import time
from stem.descriptor import DocumentHandler
from stem.descriptor.remote import DescriptorDownloader
DOWNLOAD_DELAY = 60.0
FINGERPRINT = [""]
PATHNAME="/var/lib/rpimonitor/stat/tor_desc"
def main():
try:
dump = open(PATHNAME,"wb")
downloader = DescriptorDownloader()
while True:
query = downloader.get_server_descriptors(fingerprints=FINGERPRINT)
for desc in query.run():
dump.seek(0)
dump.write("Nickname " + str(desc.nickname)+"\n")
dump.write("Fingerprint " + "".join(str(desc.fingerprint).split())+"\n")
dump.write("Published " + str(desc.published)+"\n")
dump.write("Address " + str(desc.address)+"\n")
dump.write("Version " + str(desc.tor_version)+"\n")
dump.write("Uptime " + str(desc.uptime)+"\n")
dump.write("Average_Bandwidth " + str(desc.average_bandwidth)+"\n")
dump.write("Burst_Bandwidth " + str(desc.burst_bandwidth)+"\n")
dump.write("Observed_Bandwidth " + str(desc.observed_bandwidth)+"\n")
dump.write("Hibernating " + str(desc.hibernating)+"\n")
time.sleep(DOWNLOAD_DELAY)
except Exception as exc:
print 'Unable to retrieve the server descriptors: %s' % exc
if __name__ == '__main__':
main()
| mit | 3,801,384,245,225,023,000 | 25.732143 | 76 | 0.629927 | false | 3.198718 | false | false | false |
ucb-sejits/opentuner | tests/test_api.py | 1 | 4002 | from __future__ import print_function
import unittest
import argparse
import opentuner
from opentuner.api import TuningRunManager
from opentuner.measurement.interface import DefaultMeasurementInterface
from opentuner.resultsdb.models import Result
from opentuner.search.manipulator import ConfigurationManipulator, IntegerParameter
__author__ = 'Chick Markley [email protected] U.C. Berkeley'
class TestApi(unittest.TestCase):
def test_api_start_and_stop(self):
parser = argparse.ArgumentParser(parents=opentuner.argparsers())
args = parser.parse_args(args=[])
# we set up an api instance but only run it once
manipulator = ConfigurationManipulator()
manipulator.add_parameter(IntegerParameter('x', -10, 10))
interface = DefaultMeasurementInterface(args=args,
manipulator=manipulator,
project_name='examples',
program_name='api_test',
program_version='0.1')
api = TuningRunManager(interface, args)
desired_result = api.get_next_desired_result()
cfg = desired_result.configuration.data['x']
result = Result(time=float(cfg))
api.report_result(desired_result, result)
# something changes and now we want to shut down the api
# and start a new one, this used to raise an exception
api.finish()
manipulator = ConfigurationManipulator()
manipulator.add_parameter(IntegerParameter('x', -100, 100))
interface = DefaultMeasurementInterface(args=args,
manipulator=manipulator,
project_name='examples',
program_name='api_test',
program_version='0.1')
api = TuningRunManager(interface, args)
desired_result = api.get_next_desired_result()
cfg = desired_result.configuration.data['x']
result = Result(time=float(cfg))
api.report_result(desired_result, result)
self.assertIsNotNone(api.get_best_configuration())
api.finish()
def test_small_range(self):
parser = argparse.ArgumentParser(parents=opentuner.argparsers())
args = parser.parse_args(args=[])
manipulator = ConfigurationManipulator()
manipulator.add_parameter(IntegerParameter('x', -10, 10))
interface = DefaultMeasurementInterface(args=args,
manipulator=manipulator,
project_name='examples',
program_name='api_test',
program_version='0.1')
api = TuningRunManager(interface, args)
configs_tried = set()
for x in xrange(40):
desired_result = api.get_next_desired_result()
if desired_result is None:
# The search space for this example is very small, so sometimes
# the techniques have trouble finding a config that hasn't already
# been tested. Change this to a continue to make it try again.
break
cfg = desired_result.configuration.data['x']
result = Result(time=float(cfg))
api.report_result(desired_result, result)
configs_tried.add(cfg)
best_cfg = api.get_best_configuration()
api.finish()
self.assertEqual(best_cfg['x'], -10.0)
# TODO: should this have tried everything in range?
# print(configs_tried)
# for x in range(-10, 11):
# print(x)
# self.assertTrue(
# x in configs_tried,
# "{} should have been in tried set {}".format(x, configs_tried)) | mit | -3,940,359,246,342,667,000 | 41.585106 | 83 | 0.565967 | false | 4.631944 | true | false | false |
hersche/MultiSms | contents/code/widget.py | 1 | 6282 | from PyQt4.QtCore import Qt, SIGNAL
from PyQt4.QtGui import QGraphicsLinearLayout
from PyKDE4.plasma import Plasma
from PyKDE4 import plasmascript
from PyKDE4.kdeui import KMessageBox
from PyKDE4.kdecore import i18n, KStandardDirs
from pluginmanager import PluginManager
import providerplugins.Provider
import adressplugins.AdressPlugin
import ConfigParser, os, re
class Multimobilewidget(plasmascript.Applet):
def __init__(self, parent, args=None):
plasmascript.Applet.__init__(self, parent)
def init(self):
self.setHasConfigurationInterface(False)
self.setAspectRatioMode(Plasma.Square)
self.theme = Plasma.Svg(self)
self.setBackgroundHints(Plasma.Applet.DefaultBackground)
self.layout = QGraphicsLinearLayout(Qt.Vertical, self.applet)
self.getLogin()
self.setHasConfigurationInterface(True)
self.label = Plasma.Label(self.applet)
self.label.setText(i18n("Welcome to the Multimobilewidget"))
nrlabel = Plasma.Label(self.applet)
nrlabel.setText(i18n("Phonenr(s)"))
self.messagelabel = Plasma.Label(self.applet)
self.messagelabel.setText(i18n("Message - 0 signs used"))
self.nrfield = Plasma.LineEdit()
self.messageText = Plasma.TextEdit(self.applet)
self.messageText.nativeWidget()
sendButton = Plasma.PushButton(self.applet)
sendButton.setText(i18n("Send the SMS!"))
sendButton.resize(20, 40)
configButton = Plasma.PushButton(self.applet)
configButton.setText("Config")
configButton.resize(20, 40)
self.layout.addItem(self.label)
self.layout.addItem(nrlabel)
self.layout.addItem(self.nrfield)
self.layout.addItem(self.messagelabel)
self.layout.addItem(self.messageText)
self.layout.addItem(sendButton)
self.layout.addItem(configButton)
self.applet.setLayout(self.layout)
self.connect(sendButton, SIGNAL("clicked()"), self.onSendClick)
self.connect(configButton, SIGNAL("clicked()"), self.onConfigClick)
self.connect(self.messageText, SIGNAL("textChanged()"), self.onTextChanged)
fullPath = str(self.package().path())
self.providerPluginManager = PluginManager("multimobilewidget/contents/code/providerplugins/","", providerplugins.Provider.Provider)
self.providerpluginlist = self.providerPluginManager.getPluginClassList()
for provider in self.providerpluginlist:
self.ui.providerList.addItem(provider.getObjectname())
print provider.getObjectname()
self.ui.providerList.setCurrentRow(0)
self.adressplugins = PluginManager("multimobilewidget/contents/code/adressplugins/","", adressplugins.AdressPlugin.AdressPlugin)
self.adresspluginlist = self.adressplugins.getPluginClassList()
self.adressList = list()
def onConfigClick(self):
from config import config
self.startAssistant = config(self.providerPluginManager, self.adressplugins)
self.startAssistant.show()
self.connect(self.startAssistant, SIGNAL("finished(int)"), self.getLogin)
def connectToAkonadi(self):
self.akonadiEngine = Plasma.DataEngine()
self.akonadiEngine.setName("akonadi")
def onSendClick(self):
for provider in self.providerpluginlist:
if(provider.getObjectname() == self.ui.providerList.selectedItems()[0].text()):
sms = provider
if self.ui.smstext.toPlainText() != "":
if self.ui.phonenr.text() != "":
self.getLogin()
try:
sms.setConfig(self.config)
except Exception:
self.onConfigClick()
return
sms.clearNrs()
for nr in re.findall("(\+\d*)", self.ui.phonenr.text()):
sms.addNr(nr)
sms.setText(self.ui.smstext.toPlainText())
savenr = self.ui.phonenr.text()
try:
sms.execute()
# self.notification.setText(i18n("Wurde erfolgreich an <i>%1</i> geschickt!").arg(savenr ))
# self.notification.setTitle("Erfolg!")
# self.notification.sendEvent()
KMessageBox.information(None, i18n("SMS sendet successfully to " + savenr + ". Service: "+sms.getProvidername()), i18n("Success!"))
except Exception, error:
KMessageBox.error(None, i18n(error.message), i18n("Sendproblems"))
self.ui.phonenr.clear()
self.ui.smstext.clear()
else:
KMessageBox.error(None, i18n("Please fill in a phonenr"), i18n("Please fill in a phonenr"))
else:
KMessageBox.error(None, i18n("Please fill in a Text"), i18n("Please fill in a Text"))
def onTextChanged(self):
tmp = self.messageText.nativeWidget()
if(len(tmp.toPlainText()) < 160):
self.messagelabel.setText(i18n("Message - ") + str(len(tmp.toPlainText())) + i18n(" signs used"))
else:
# count how many sms are used and update the status
self.messagelabel.setText(i18n("Message - ") + str(len(tmp.toPlainText())) + i18n(" signs used"))
def getLogin(self):
if(os.path.isfile(os.getenv("HOME") + "/.multimobile.cfg")):
try:
self.config = ConfigParser.ConfigParser()
self.config.readfp(open(os.getenv("HOME") + "/.multimobile.cfg"))
except Exception, e:
print e
from config import config
self.startAssistant = config(self.providerPluginManager, self.adressplugins, False)
self.startAssistant.show()
self.connect(self.startAssistant, SIGNAL("finished(int)"), self.getLogin)
else:
from config import config
self.startAssistant = config(self.providerPluginManager, self.adressplugins, True)
self.startAssistant.show()
self.connect(self.startAssistant, SIGNAL("finished(int)"), self.getLogin)
def CreateApplet(parent):
return Multimobilewidget(parent)
| gpl-2.0 | -307,377,277,973,551,400 | 46.590909 | 151 | 0.632442 | false | 3.868227 | true | false | false |
archlinux/archweb | devel/management/commands/generate_keyring.py | 1 | 2864 | # -*- coding: utf-8 -*-
"""
generate_keyring command
Assemble a GPG keyring with all known developer keys.
Usage: ./manage.py generate_keyring <keyserver> <keyring_path>
"""
from django.core.management.base import BaseCommand, CommandError
import logging
import subprocess
import sys
from devel.models import MasterKey, UserProfile
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s -> %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stderr)
logger = logging.getLogger()
class Command(BaseCommand):
args = "<keyserver> <keyring_path> [ownertrust_path]"
help = "Assemble a GPG keyring with all known developer keys."
def add_arguments(self, parser):
parser.add_argument('args', nargs='*', help='<arch> <filename>')
def handle(self, *args, **options):
v = int(options.get('verbosity', None))
if v == 0:
logger.level = logging.ERROR
elif v == 1:
logger.level = logging.INFO
elif v >= 2:
logger.level = logging.DEBUG
if len(args) < 2:
raise CommandError("keyserver and keyring_path must be provided")
generate_keyring(args[0], args[1])
if len(args) > 2:
generate_ownertrust(args[2])
def generate_keyring(keyserver, keyring):
logger.info("getting all known key IDs")
# Screw you Django, for not letting one natively do value != <empty string>
key_ids = UserProfile.objects.filter(
pgp_key__isnull=False).extra(where=["pgp_key != ''"]).values_list(
"pgp_key", flat=True)
logger.info("%d keys fetched from user profiles", len(key_ids))
master_key_ids = MasterKey.objects.values_list("pgp_key", flat=True)
logger.info("%d keys fetched from master keys", len(master_key_ids))
# GPG is stupid and interprets any filename without path portion as being
# in ~/.gnupg/. Fake it out if we just get a bare filename.
if '/' not in keyring:
keyring = './%s' % keyring
gpg_cmd = ["gpg", "--no-default-keyring", "--keyring", keyring,
"--keyserver", keyserver, "--recv-keys"]
logger.info("running command: %r", gpg_cmd)
gpg_cmd.extend(key_ids)
gpg_cmd.extend(master_key_ids)
subprocess.check_call(gpg_cmd)
logger.info("keyring at %s successfully updated", keyring)
TRUST_LEVELS = {
'unknown': 0,
'expired': 1,
'undefined': 2,
'never': 3,
'marginal': 4,
'fully': 5,
'ultimate': 6,
}
def generate_ownertrust(trust_path):
master_key_ids = MasterKey.objects.values_list("pgp_key", flat=True)
with open(trust_path, "w") as trustfile:
for key_id in master_key_ids:
trustfile.write("%s:%d:\n" % (key_id, TRUST_LEVELS['marginal']))
logger.info("trust file at %s created or overwritten", trust_path)
# vim: set ts=4 sw=4 et:
| gpl-2.0 | 6,337,956,910,007,053,000 | 29.795699 | 79 | 0.632332 | false | 3.479951 | false | false | false |
lpryszcz/bin | sam2aligned.py | 1 | 1557 | #!/usr/bin/env python
"""
Parse SAM file and output only pairs with at least one read aligned.
Compatible with bowtie/bwa output - one entry per read.
SAM file has to be sorted by read name.
USAGE:
samtools view -St yeast_chromosomes.fa.fai 409.sam -f3 | sam2aligned.py > 409.aligned.sam
"""
from datetime import datetime
import os, sys
def int2bin( n, count=12 ):
"""returns the binary of integer n, using count number of digits
@ http://www.daniweb.com/software-development/python/code/216539
"""
return "".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def sam2unique( handle = sys.stdin ):
"""
"""
i = k = aligned = 0
pName = lines = ''
refs=0
for l in handle:
#write header info
if l.startswith('@'):
sys.stdout.write( l )
continue
#name,flag,contig,pos,mapq,cigar,paired,pairStart,isize,seq,qual
name,flag,ref = l.split('\t')[:3]
if name != pName:
i+=1
if lines and refs:
sys.stdout.write( lines )
aligned += 1
refs = 0
lines = l
if ref != "*":
refs += 1
pName = name
else:
#reads
if ref != "*":
refs += 1
lines += l
if lines and refs:
aligned += 1
sys.stdout.write( lines )
sys.stderr.write( 'Processed pairs:\t%s\nAligned pairs:\t%s [%.2f%s]\n' % ( i,aligned,aligned*100.0/i,'%' ) ) #,bothUnique,bothUnique*100.0/pairs,'%'
if __name__=='__main__':
T0=datetime.now()
sam2unique()
sys.stderr.write( "Elapsed time: %s\n" % ( datetime.now()-T0 ) )
| gpl-3.0 | 4,649,627,124,544,415,000 | 24.52459 | 151 | 0.589595 | false | 3.052941 | false | false | false |
aaxelb/SHARE | share/regulate/steps/deduplicate.py | 2 | 2376 | from share.regulate.steps import GraphStep
class Deduplicate(GraphStep):
"""Look for duplicate nodes and merge/discard them
Example config (YAML):
```yaml
- namespace: share.regulate.steps.graph
name: deduplicate
```
"""
MAX_MERGES = 100
# map from concrete type to set of fields used to dedupe
DEDUPLICATION_CRITERIA = {
# works and agents may be merged if duplicate identifiers are merged
# 'abstractcreativework': {},
# 'abstractagent': {},
'abstractagentworkrelation': {'creative_work', 'agent', 'type'},
'abstractagentrelation': {'subject', 'related', 'type'},
'abstractworkrelation': {'subject', 'related', 'type'},
'workidentifier': {'uri'},
'agentidentifier': {'uri'},
'subject': {'name', 'parent', 'central_synonym'},
'tag': {'name'},
'throughtags': {'tag', 'creative_work'},
# 'award': {},
'throughawards': {'funder', 'award'},
'throughsubjects': {'subject', 'creative_work'},
}
def regulate_graph(self, graph):
# naive algorithm, O(n*m) (n: number of nodes, m: number of merges)
# but merges shouldn't be common, so probably not worth optimizing
count = 0
while self._merge_first_dupe(graph):
count += 1
if count > self.MAX_MERGES:
self.error('Way too many deduplications')
return
def _merge_first_dupe(self, graph):
dupe_index = {}
for node in graph:
node_key = self._get_node_key(node)
if node_key:
other_node = dupe_index.get(node_key)
if other_node:
graph.merge_nodes(node, other_node)
return True
dupe_index[node_key] = node
return False
def _get_node_key(self, node):
criteria = self.DEDUPLICATION_CRITERIA.get(node.concrete_type)
if not criteria:
return None
return (
node.concrete_type,
tuple(
self._get_criterion_value(node, criterion)
for criterion in criteria
)
)
def _get_criterion_value(self, node, criterion_name):
if criterion_name == 'type':
return node.type
return node[criterion_name]
| apache-2.0 | 6,816,239,275,741,650,000 | 32.942857 | 76 | 0.551768 | false | 4.110727 | false | false | false |
PaloAltoNetworks/minemeld-core | minemeld/ft/table.py | 1 | 17277 | # Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Table implementation based on LevelDB (https://github.com/google/leveldb).
This is a sort of poor, lazy man implementation of IndexedDB schema.
**KEYS**
Numbers are 8-bit unsigned.
- Schema Version: (0)
- Index Last Global Id: (0,1, <indexnum>)
- Last Update Key: (0,2)
- Number of Indicators: (0,3)
- Table Last Global ID: (0,4)
- Custom Metadata: (0,5)
- Indicator Version: (1,0,<indicator>)
- Indicator: (1,1,<indicator>)
**INDICATORS**
Each indicators has 2 entries associated in the DB: a version and a value.
The version number is used to track indicator existance and versioning.
When an indicator value is updated, its version number is incremented.
The version number is a 64-bit LSB unsigned int.
The value of an indicator is a 64-bit unsigned int LSB followed by a dump of
a dictionary of attributes in JSON format.
To iterate over all the indicators versions iterate from key (1,0) to key
(1,1) excluded.
NULL indicators are not allowed.
**INDEXES**
Indicators are stored in alphabetical order. Indexes are secondary indexes
on indicators attributes.
Each index has an associated id in the range 0 - 255. The attribute associated
to the index is stored at (0,1,<index id>), if the key does not exist the
index does not exist.
There is also a Last Global Id per index, used to index indicators with the
same attribute value. Each time a new indicator is added to the index, the
Last Global Id is incremented. The Last Global Id of an index is stored at
(2,<index id>,0) as a 64-bit LSB unsigned int.
Each entry in the index is stored with a key
(2,<index id>,0xF0,<encoded value>,<last global id>) and value
(<version>,<indicator>). <encoded value> depends on the type of attribute.
When iterating over an index, the value of an index entry is loaded and if
the version does not match with current indicator version the index entry is
deleted. This permits a sort of lazy garbage collection.
To retrieve all the indicators with a specific attribute value just iterate
over the keys (2,<index id>,0xF0,<encoded value>) and
(2,<index id>,0xF0,<encoded value>,0xFF..FF)
"""
import os
import plyvel
import struct
import ujson
import time
import logging
import shutil
import gevent
SCHEMAVERSION_KEY = struct.pack("B", 0)
START_INDEX_KEY = struct.pack("BBB", 0, 1, 0)
END_INDEX_KEY = struct.pack("BBB", 0, 1, 0xFF)
LAST_UPDATE_KEY = struct.pack("BB", 0, 2)
NUM_INDICATORS_KEY = struct.pack("BB", 0, 3)
TABLE_LAST_GLOBAL_ID = struct.pack("BB", 0, 4)
CUSTOM_METADATA = struct.pack("BB", 0, 5)
LOG = logging.getLogger(__name__)
class InvalidTableException(Exception):
pass
class Table(object):
def __init__(self, name, truncate=False, bloom_filter_bits=0):
if truncate:
try:
shutil.rmtree(name)
except:
pass
self.db = None
self._compact_glet = None
self.db = plyvel.DB(
name,
create_if_missing=True,
bloom_filter_bits=bloom_filter_bits
)
self._read_metadata()
self.compact_interval = int(os.environ.get('MM_TABLE_COMPACT_INTERVAL', 3600 * 6))
self.compact_delay = int(os.environ.get('MM_TABLE_COMPACT_DELAY', 3600))
self._compact_glet = gevent.spawn(self._compact_loop)
def _init_db(self):
self.last_update = 0
self.indexes = {}
self.num_indicators = 0
self.last_global_id = 0
batch = self.db.write_batch()
batch.put(SCHEMAVERSION_KEY, struct.pack("B", 1))
batch.put(LAST_UPDATE_KEY, struct.pack(">Q", self.last_update))
batch.put(NUM_INDICATORS_KEY, struct.pack(">Q", self.num_indicators))
batch.put(TABLE_LAST_GLOBAL_ID, struct.pack(">Q", self.last_global_id))
batch.write()
def _read_metadata(self):
sv = self._get(SCHEMAVERSION_KEY)
if sv is None:
return self._init_db()
sv = struct.unpack("B", sv)[0]
if sv == 0:
# add table last global id
self._upgrade_from_s0()
elif sv == 1:
pass
else:
raise InvalidTableException("Schema version not supported")
self.indexes = {}
ri = self.db.iterator(
start=START_INDEX_KEY,
stop=END_INDEX_KEY
)
with ri:
for k, v in ri:
_, _, indexid = struct.unpack("BBB", k)
if v in self.indexes:
raise InvalidTableException("2 indexes with the same name")
self.indexes[v] = {
'id': indexid,
'last_global_id': 0
}
for i in self.indexes:
lgi = self._get(self._last_global_id_key(self.indexes[i]['id']))
if lgi is not None:
self.indexes[i]['last_global_id'] = struct.unpack(">Q", lgi)[0]
else:
self.indexes[i]['last_global_id'] = -1
t = self._get(LAST_UPDATE_KEY)
if t is None:
raise InvalidTableException("LAST_UPDATE_KEY not found")
self.last_update = struct.unpack(">Q", t)[0]
t = self._get(NUM_INDICATORS_KEY)
if t is None:
raise InvalidTableException("NUM_INDICATORS_KEY not found")
self.num_indicators = struct.unpack(">Q", t)[0]
t = self._get(TABLE_LAST_GLOBAL_ID)
if t is None:
raise InvalidTableException("TABLE_LAST_GLOBAL_ID not found")
self.last_global_id = struct.unpack(">Q", t)[0]
def _get(self, key):
try:
result = self.db.get(key)
except KeyError:
return None
return result
def __del__(self):
self.close()
def get_custom_metadata(self):
cmetadata = self._get(CUSTOM_METADATA)
if cmetadata is None:
return None
return ujson.loads(cmetadata)
def set_custom_metadata(self, metadata=None):
if metadata is None:
self.db.delete(CUSTOM_METADATA)
return
cmetadata = ujson.dumps(metadata)
self.db.put(CUSTOM_METADATA, cmetadata)
def close(self):
if self.db is not None:
self.db.close()
if self._compact_glet is not None:
self._compact_glet.kill()
self.db = None
self._compact_glet = None
def exists(self, key):
if type(key) == unicode:
key = key.encode('utf8')
ikeyv = self._indicator_key_version(key)
return (self._get(ikeyv) is not None)
def get(self, key):
if type(key) == unicode:
key = key.encode('utf8')
ikey = self._indicator_key(key)
value = self._get(ikey)
if value is None:
return None
# skip version
return ujson.loads(value[8:])
def delete(self, key):
if type(key) == unicode:
key = key.encode('utf8')
ikey = self._indicator_key(key)
ikeyv = self._indicator_key_version(key)
if self._get(ikeyv) is None:
return
batch = self.db.write_batch()
batch.delete(ikey)
batch.delete(ikeyv)
self.num_indicators -= 1
batch.put(NUM_INDICATORS_KEY, struct.pack(">Q", self.num_indicators))
batch.write()
def _indicator_key(self, key):
return struct.pack("BB", 1, 1) + key
def _indicator_key_version(self, key):
return struct.pack("BB", 1, 0) + key
def _index_key(self, idxid, value, lastidxid=None):
key = struct.pack("BBB", 2, idxid, 0xF0)
if type(value) == unicode:
value = value.encode('utf8')
if type(value) == str:
key += struct.pack(">BL", 0x0, len(value))+value
elif type(value) == int or type(value) == long:
key += struct.pack(">BQ", 0x1, value)
else:
raise ValueError("Unhandled value type: %s" % type(value))
if lastidxid is not None:
key += struct.pack(">Q", lastidxid)
return key
def _last_global_id_key(self, idxid):
return struct.pack("BBB", 2, idxid, 0)
def create_index(self, attribute):
if attribute in self.indexes:
return
if len(self.indexes) == 0:
idxid = 0
else:
idxid = max([i['id'] for i in self.indexes.values()])+1
self.indexes[attribute] = {
'id': idxid,
'last_global_id': -1
}
batch = self.db.write_batch()
batch.put(struct.pack("BBB", 0, 1, idxid), attribute)
batch.write()
def put(self, key, value):
if type(key) == unicode:
key = key.encode('utf8')
if type(value) != dict:
raise ValueError()
ikey = self._indicator_key(key)
ikeyv = self._indicator_key_version(key)
exists = self._get(ikeyv)
self.last_global_id += 1
cversion = self.last_global_id
now = time.time()
self.last_update = now
batch = self.db.write_batch()
batch.put(ikey, struct.pack(">Q", cversion)+ujson.dumps(value))
batch.put(ikeyv, struct.pack(">Q", cversion))
batch.put(LAST_UPDATE_KEY, struct.pack(">Q", self.last_update))
batch.put(TABLE_LAST_GLOBAL_ID, struct.pack(">Q", self.last_global_id))
if exists is None:
self.num_indicators += 1
batch.put(
NUM_INDICATORS_KEY,
struct.pack(">Q", self.num_indicators)
)
for iattr, index in self.indexes.iteritems():
v = value.get(iattr, None)
if v is None:
continue
index['last_global_id'] += 1
idxkey = self._index_key(index['id'], v, index['last_global_id'])
batch.put(idxkey, struct.pack(">Q", cversion) + key)
batch.put(
self._last_global_id_key(index['id']),
struct.pack(">Q", index['last_global_id'])
)
batch.write()
def query(self, index=None, from_key=None, to_key=None,
include_value=False, include_stop=True, include_start=True,
reverse=False):
if type(from_key) is unicode:
from_key = from_key.encode('ascii', 'replace')
if type(to_key) is unicode:
to_key = to_key.encode('ascii', 'replace')
if index is None:
return self._query_by_indicator(
from_key=from_key,
to_key=to_key,
include_value=include_value,
include_stop=include_stop,
include_start=include_start,
reverse=reverse
)
return self._query_by_index(
index,
from_key=from_key,
to_key=to_key,
include_value=include_value,
include_stop=include_stop,
include_start=include_start,
reverse=reverse
)
def _query_by_indicator(self, from_key=None, to_key=None,
include_value=False, include_stop=True,
include_start=True, reverse=False):
if from_key is None:
from_key = struct.pack("BB", 1, 1)
include_stop = False
else:
from_key = self._indicator_key(from_key)
if to_key is None:
to_key = struct.pack("BB", 1, 2)
include_start = False
else:
to_key = self._indicator_key(to_key)
ri = self.db.iterator(
start=from_key,
stop=to_key,
include_stop=include_stop,
include_start=include_start,
reverse=reverse,
include_value=False
)
with ri:
for ekey in ri:
ekey = ekey[2:]
if include_value:
yield ekey.decode('utf8', 'ignore'), self.get(ekey)
else:
yield ekey.decode('utf8', 'ignore')
def _query_by_index(self, index, from_key=None, to_key=None,
include_value=False, include_stop=True,
include_start=True, reverse=False):
if index not in self.indexes:
raise ValueError()
idxid = self.indexes[index]['id']
if from_key is None:
from_key = struct.pack("BBB", 2, idxid, 0xF0)
include_start = False
else:
from_key = self._index_key(idxid, from_key)
if to_key is None:
to_key = struct.pack("BBB", 2, idxid, 0xF1)
include_stop = False
else:
to_key = self._index_key(
idxid,
to_key,
lastidxid=0xFFFFFFFFFFFFFFFF
)
ldeleted = 0
ri = self.db.iterator(
start=from_key,
stop=to_key,
include_value=True,
include_start=include_start,
include_stop=include_stop,
reverse=reverse
)
with ri:
for ikey, ekey in ri:
iversion = struct.unpack(">Q", ekey[:8])[0]
ekey = ekey[8:]
evalue = self._get(self._indicator_key_version(ekey))
if evalue is None:
# LOG.debug("Key does not exist")
# key does not exist
self.db.delete(ikey)
ldeleted += 1
continue
cversion = struct.unpack(">Q", evalue)[0]
if iversion != cversion:
# index value is old
# LOG.debug("Version mismatch")
self.db.delete(ikey)
ldeleted += 1
continue
if include_value:
yield ekey.decode('utf8', 'ignore'), self.get(ekey)
else:
yield ekey.decode('utf8', 'ignore')
LOG.info('Deleted in scan of {}: {}'.format(index, ldeleted))
def _compact_loop(self):
gevent.sleep(self.compact_delay)
while True:
try:
gevent.idle()
counter = 0
for idx in self.indexes.keys():
for i in self.query(index=idx, include_value=False):
if counter % 512 == 0:
gevent.sleep(0.001) # yield to other greenlets
counter += 1
except gevent.GreenletExit:
break
except:
LOG.exception('Exception in _compact_loop')
try:
gevent.sleep(self.compact_interval)
except gevent.GreenletExit:
break
def _upgrade_from_s0(self):
LOG.info('Upgrading from schema version 0 to schema version 1')
LOG.info('Loading indexes...')
indexes = {}
ri = self.db.iterator(
start=START_INDEX_KEY,
stop=END_INDEX_KEY
)
with ri:
for k, v in ri:
_, _, indexid = struct.unpack("BBB", k)
if v in indexes:
raise InvalidTableException("2 indexes with the same name")
indexes[v] = {
'id': indexid,
'last_global_id': 0
}
for i in indexes:
lgi = self._get(self._last_global_id_key(indexes[i]['id']))
if lgi is not None:
indexes[i]['last_global_id'] = struct.unpack(">Q", lgi)[0]
else:
indexes[i]['last_global_id'] = -1
LOG.info('Scanning indexes...')
last_global_id = 0
for i, idata in indexes.iteritems():
from_key = struct.pack("BBB", 2, idata['id'], 0xF0)
include_start = False
to_key = struct.pack("BBB", 2, idata['id'], 0xF1)
include_stop = False
ri = self.db.iterator(
start=from_key,
stop=to_key,
include_value=True,
include_start=include_start,
include_stop=include_stop,
reverse=False
)
with ri:
for ikey, ekey in ri:
iversion = struct.unpack(">Q", ekey[:8])[0]
if iversion > last_global_id:
last_global_id = iversion+1
LOG.info('Last global id: {}'.format(last_global_id))
batch = self.db.write_batch()
batch.put(SCHEMAVERSION_KEY, struct.pack("B", 1))
batch.put(TABLE_LAST_GLOBAL_ID, struct.pack(">Q", last_global_id))
batch.write()
| apache-2.0 | -6,170,540,603,090,287,000 | 30.876384 | 90 | 0.545465 | false | 3.843604 | false | false | false |
tyrchen/vint | vint/vint.py | 1 | 5823 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import os
from cerf_api import Cerf
from file_util import Template, FileUtil
from misc import calc_time_spent
__author__ = 'tchen'
logger = logging.getLogger(__name__)
class InterviewManager(object):
def __init__(self, id=None):
self.id = id
if self.id:
self.exam_path = 'exam%s' % self.id
else:
self.exam_path = None
self.code = None
self.interview = None
self.exam_id = None
self.cerf_api = None
def generate_environment(self):
# create exam dir
os.mkdir(self.exam_path)
# write .interview.json for further use
Template.create_exam_config(os.path.join(os.getcwd(), self.exam_path), self.interview)
# retrieve exam and write general instruction file
exam = self.cerf_api.exam.retrieve(self.exam_id)
if len(exam) == 0:
print('Can not retrieve proper exam by id %s. Please contact your hiring manager.' % self.exam_id)
exit(-1)
Template.create_exam_instruction(self.exam_path, self.interview, exam)
# generate cases
for case in exam['cases']:
self.generate_case(case)
def generate_case(self, case):
os.mkdir('%s/case%s' % (self.exam_path, case['position']))
path = os.path.join(os.getcwd(), self.exam_path, 'case%s' % str(case['position']))
# write .case.json for further use
Template.create_case_config(path, case)
# write instruction
Template.create_case_instruction(path, case)
# write code
Template.create_case_code(path, case)
def start(self):
code = raw_input('Please provide your authentication code:')
self.code = code
self.cerf_api = Cerf(self.id, code)
data = self.cerf_api.interview.start()
if len(data) == 0:
print('Can not retrieve proper interview by id %s. Please contact your hiring manager.' % self.id)
exit(-1)
if calc_time_spent(data['started']) > 1 or os.path.exists(self.exam_path):
print('This interview has been started already!')
exit(-1)
self.interview = data
self.exam_id = self.interview['exam']
print('Nice to meet you, %s! Thanks for your interest in Juniper China R&D.' % data['applicant'])
print('Creating the exam environment...'),
self.generate_environment()
print('Done!\nYou can "cd %s" to start your exam now.' % self.exam_path)
def load_data(self, interview):
self.id = interview['id']
self.code = interview['authcode']
self.interview = interview
self.exam_id = interview['exam']
self.exam_path = 'exam%d' % self.exam_id
def submit_case(self, case):
path = os.path.join(os.getcwd(), 'case%s' % case['position'])
print('\tSubmit case%s...' % case['position']),
extentions = [ext.strip() for ext in case['extentions'].split(',')]
first_list, second_list = FileUtil.get_valid_files(path, extentions)
content = ''
for name in first_list + second_list:
s = '/* %s */\n\n%s' % (name, FileUtil.read_content(os.path.join(path, name)))
content += s
data = {
'interview': self.id,
'applicant': self.interview['applicant_id'],
'case': case['cid'],
'content': content
}
if not self.cerf_api.answer.create(data):
print('Cannot submit case%s, please contact your hiring manager.' % case['position'])
# do not bail out so that we could try the latter cases.
# exit(-1)
else:
print('Done!')
def submit_cases(self):
path = os.getcwd()
for root, dirs, files in os.walk('.'):
for d in dirs:
if d.startswith('case'):
config = FileUtil.read_case(os.path.join(path, d))
self.submit_case(config)
def finish_interview(self):
data = self.cerf_api.interview.finish()
if len(data) == 0:
print('Can not finish interview by id %s. Please contact your hiring manager.' % self.id)
exit(-1)
def finish(self):
if not FileUtil.interview_exists():
print('Please change to the root of the exam directory, then execute this command again.')
exit(-1)
# do not trust existing data, retrieve interview data from server again
interview = FileUtil.read_interview('.')
self.cerf_api = Cerf(interview['id'], interview['authcode'])
interview = self.cerf_api.interview.retrieve(interview['id'])
self.load_data(interview)
if interview['time_spent']:
print('Your exam is over. Please stay tuned.')
exit(-1)
spent = calc_time_spent(interview['started'])
print('Thank you! Your exam is done! Total time spent: %d minutes.' % spent)
print('Submitting your code to generate report...')
self.submit_cases()
print('Done!')
print('Notifying the hiring manager...'),
self.finish_interview()
print('Done!')
print('Please wait for a short moment. If no one comes in 5m, please inform frontdesk.')
def main(arguments):
is_finish = arguments['finish']
is_start = arguments['start']
# sanity check
if is_finish:
InterviewManager().finish()
elif is_start:
try:
id = int(arguments['<id>'])
except:
print('Interview id is not valid. Please contact your hiring manager.')
exit(-1)
InterviewManager(id).start()
else:
print("Please specify a correct command.")
| mit | -4,617,329,500,744,596,000 | 33.455621 | 110 | 0.58372 | false | 3.838497 | false | false | false |
scott-maddox/simplepl | src/simplepl/main_window.py | 1 | 29283 | #
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of SimplePL.
#
# SimplePL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# SimplePL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with SimplePL. If not, see
# <http://www.gnu.org/licenses/>.
#
#######################################################################
# std lib imports
import os.path
# third party imports
from PySide import QtGui, QtCore
import pyqtgraph as pg
# local imports
from .scanners import Scanner, GoToer
from .simple_pl_parser import SimplePLParser
from .spectra_plot_item import SpectraPlotItem
from .measured_spectrum import MeasuredSpectrum
from .expanding_spectrum import ExpandingSpectrum
from .instruments.spectrometer import Spectrometer
from .instruments.lockin import Lockin
from .dialogs.start_scan_dialog import StartScanDialog
from .dialogs.diverters_config_dialog import DivertersConfigDialog
from .dialogs.lockin_config_dialog import LockinConfigDialog
from .dialogs.gratings_and_filters_config_dialog import (
GratingsAndFiltersConfigDialog)
from .dialogs.set_wavelength_dialog import SetWavelengthDialog
from .dialogs.config_instruments_dialog import ConfigInstrumentsDialog
from .dialogs.generate_veusz_file_dialog import GenerateVeuszFileDialog
from .dialogs.about_dialog import AboutDialog
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
# Initialize private variables
self.plot = None
self.spectrum = None
self._grating = None
self._filter = None
self._wavelength = None
self._signal = None
self._rawSignal = None
self._phase = None
self.spectrometer = None
self.lockin = None
self.scanner = None
# Internal flags
self._scanSaved = True
# Initialize QSettings object
self._settings = QtCore.QSettings()
# Initialize GUI stuff
self.initUI()
# Disable all actions except for configuring the ports,
# until the instruments are initialized
self._spectrometerInitilized = False
self._lockinInitilized = False
self.updateActions()
# Initialize the instruments
if bool(self._settings.value('autoConnect')):
self.initSpectrometer()
self.initLockin()
# Initialize the current instrument values
sysResPath = self._settings.value('sysResPath')
self._sysresParser = SimplePLParser(None, sysResPath)
def initSpectrometer(self):
self.spectrometer = Spectrometer()
self.spectrometer.sigException.connect(self.spectrometerException)
self.spectrometer.sigInitialized.connect(self.spectrometerInitialized)
self.spectrometer.sigChangingGrating.connect(self.changingGrating)
self.spectrometer.sigChangingFilter.connect(self.changingFilter)
self.spectrometer.sigChangingWavelength.connect(
self.changingWavelength)
self.spectrometer.sigGrating.connect(self.updateGrating)
self.spectrometer.sigFilter.connect(self.updateFilter)
self.spectrometer.sigWavelength.connect(self.updateWavelength)
self.spectrometer.thread.start()
def initLockin(self):
self.lockin = Lockin()
self.lockin.sigException.connect(self.lockinException)
self.lockin.sigInitialized.connect(self.lockinInitialized)
self.lockin.sigRawSignal.connect(self.updateRawSignal)
self.lockin.sigPhase.connect(self.updatePhase)
self.lockin.thread.start()
@QtCore.Slot(Exception)
def spectrometerException(self, e):
raise e
@QtCore.Slot(Exception)
def lockinException(self, e):
raise e
@QtCore.Slot(Exception)
def scannerException(self, e):
self.scanner.wait()
self.updateStatus('Scan failed.')
raise e
@QtCore.Slot()
def spectrometerInitialized(self):
self._spectrometerInitilized = True
if self._spectrometerInitilized and self._lockinInitilized:
self.updateStatus('Idle.')
self.updateActions()
@QtCore.Slot()
def lockinInitialized(self):
self._lockinInitilized = True
if self._spectrometerInitilized and self._lockinInitilized:
self.updateStatus('Idle.')
self.updateActions()
@QtCore.Slot()
def changingGrating(self):
self.gratingLabel.setText('Grating=?')
self.wavelengthLabel.setText('Wavelength=?')
@QtCore.Slot()
def changingFilter(self):
self.filterLabel.setText('Filter=?')
@QtCore.Slot()
def changingWavelength(self):
self.wavelengthLabel.setText('Wavelength=?')
@QtCore.Slot(str)
def updateStatus(self, status):
self.statusLabel.setText(status)
@QtCore.Slot(float)
def updateGrating(self, grating):
self._grating = grating
try:
s = 'Grating=%d' % grating
except:
s = 'Grating=?'
self.gratingLabel.setText(s)
@QtCore.Slot(float)
def updateFilter(self, filt):
self._filter = filt
try:
s = 'Filter=%d' % filt
except:
s = 'Filter=?'
self.filterLabel.setText(s)
@QtCore.Slot(float)
def updateWavelength(self, wavelength):
self._wavelength = wavelength
try:
s = 'Wavelength=%.1f' % wavelength
except:
s = 'Wavelength=?'
self.wavelengthLabel.setText(s)
@QtCore.Slot(float)
def updateRawSignal(self, rawSignal):
self._rawSignal = rawSignal
try:
s = 'Raw Signal=%.3E' % rawSignal
except:
s = 'Raw Signal=?'
self.rawSignalLabel.setText(s)
# Calculate the signal by dividing by the system response,
# and update that too
sysres = self._sysresParser.getSysRes(self._wavelength)
self.updateSignal(rawSignal / sysres)
@QtCore.Slot(float)
def updateSignal(self, signal):
self._signal = signal
try:
s = 'Signal=%.3E' % signal
except:
s = 'Signal=?'
self.signalLabel.setText(s)
@QtCore.Slot(float)
def updatePhase(self, phase):
self._phase = phase
try:
s = 'Phase=%.1f' % phase
except:
s = 'Phase=?'
self.phaseLabel.setText(s)
def initUI(self):
self.setWindowTitle('SimplePL')
from .resources.icons import logoIcon
self.setWindowIcon(logoIcon)
self.aboutAction = QtGui.QAction('&About', self)
self.aboutAction.triggered.connect(self.about)
self.openAction = QtGui.QAction('&Open', self)
self.openAction.setStatusTip('Open a spectrum')
self.openAction.setToolTip('Open a spectrum')
self.openAction.setShortcut('Ctrl+O')
self.openAction.triggered.connect(self.openFile)
self.saveAction = QtGui.QAction('&Save', self)
self.saveAction.setStatusTip('Save the current spectrum')
self.saveAction.setToolTip('Save the current spectrum')
self.saveAction.setShortcut('Ctrl+S')
self.saveAction.triggered.connect(self.saveFile)
self.saveAsAction = QtGui.QAction('&Save As', self)
self.saveAsAction.setStatusTip('Save the current spectrum')
self.saveAsAction.setToolTip('Save the current spectrum')
self.saveAsAction.setShortcut('Ctrl+Shift+S')
self.saveAsAction.triggered.connect(self.saveAsFile)
self.closeAction = QtGui.QAction('Close &Window', self)
self.closeAction.setStatusTip('Close the Window')
self.closeAction.setToolTip('Close the Window')
self.closeAction.setShortcut('Ctrl+W')
self.closeAction.triggered.connect(self.close)
self.viewSignal = QtGui.QAction('&Signal', self)
self.viewSignal.setStatusTip('Plot the signal with system '
'response removed')
self.viewSignal.setToolTip('Plot the signal with system '
'response removed')
self.viewSignal.toggled.connect(self.viewSignalToggled)
self.viewSignal.setCheckable(True)
self.viewSignal.setChecked(True)
self.viewRawSignal = QtGui.QAction('&Raw Signal', self)
self.viewRawSignal.setStatusTip('Plot the raw signal')
self.viewRawSignal.setToolTip('Plot the raw signal')
self.viewRawSignal.toggled.connect(self.viewRawSignalToggled)
self.viewRawSignal.setCheckable(True)
self.viewRawSignal.setChecked(False)
self.viewPhase = QtGui.QAction('&Phase', self)
self.viewPhase.setStatusTip('Plot the phase')
self.viewPhase.setToolTip('Plot the phase')
self.viewPhase.toggled.connect(self.viewPhaseToggled)
self.viewPhase.setCheckable(True)
self.viewPhase.setChecked(False)
self.viewClearPlotAction = QtGui.QAction('&Clear Plot', self)
self.viewClearPlotAction.setStatusTip('Clear the plot')
self.viewClearPlotAction.setToolTip('Clear the plot')
self.viewClearPlotAction.triggered.connect(self.clearPlot)
self.axesWavelengthAction = QtGui.QAction('&Wavelength', self)
self.axesWavelengthAction.setStatusTip('Plot against Wavelength')
self.axesWavelengthAction.setToolTip('Plot against Wavelength')
self.axesWavelengthAction.setShortcut('Ctrl+Shift+W')
self.axesWavelengthAction.triggered.connect(self.axesWavelength)
self.axesWavelengthAction.setCheckable(True)
self.axesWavelengthAction.setChecked(True)
self.axesEnergyAction = QtGui.QAction('&Energy', self)
self.axesEnergyAction.setStatusTip('Plot against Energy')
self.axesEnergyAction.setToolTip('Plot against Energy')
self.axesEnergyAction.setShortcut('Ctrl+Shift+e')
self.axesEnergyAction.triggered.connect(self.axesEnergy)
self.axesEnergyAction.setCheckable(True)
self.axesSemilogAction = QtGui.QAction('Semi-&log', self)
self.axesSemilogAction.setStatusTip('Plot the log of the y-axis')
self.axesSemilogAction.setToolTip('Plot the log of the y-axis')
self.axesSemilogAction.setShortcut('Ctrl+Shift+L')
self.axesSemilogAction.changed.connect(self.axesSemilog)
self.axesSemilogAction.setCheckable(True)
self.axesSemilogAction.setChecked(False)
group = QtGui.QActionGroup(self)
group.addAction(self.axesWavelengthAction)
group.addAction(self.axesEnergyAction)
self.gotoWavelengthAction = QtGui.QAction('&Go to wavelength', self)
self.gotoWavelengthAction.setStatusTip('Go to a wavelength')
self.gotoWavelengthAction.setToolTip('Go to a wavelength')
self.gotoWavelengthAction.setShortcut('Ctrl+G')
self.gotoWavelengthAction.triggered.connect(self.setWavelength)
self.startScanAction = QtGui.QAction('S&tart Scan', self)
self.startScanAction.setStatusTip('Start a scan')
self.startScanAction.setToolTip('Start a scan')
self.startScanAction.setShortcut('Ctrl+T')
self.startScanAction.triggered.connect(self.startScan)
self.abortScanAction = QtGui.QAction('A&bort Scan', self)
self.abortScanAction.setStatusTip('Abort the current scan')
self.abortScanAction.setToolTip('Abort the current scan')
self.abortScanAction.setShortcut('Ctrl+B')
self.abortScanAction.triggered.connect(self.abortScan)
self.abortScanAction.setEnabled(False)
self.configInstrumentsAction = QtGui.QAction('&Instruments', self)
self.configInstrumentsAction.setStatusTip('Configure the instruments')
self.configInstrumentsAction.setToolTip('Configure the instruments')
self.configInstrumentsAction.triggered.connect(self.configInstruments)
self.configSysResAction = QtGui.QAction('System &Response', self)
self.configSysResAction.setStatusTip('Configure the system response')
self.configSysResAction.setToolTip('Configure the system response')
self.configSysResAction.triggered.connect(self.configSysRes)
self.configLockinAction = QtGui.QAction('&Lock-in', self)
self.configLockinAction.setStatusTip(
'Configure the lock-in amplifier')
self.configLockinAction.setToolTip(
'Configure the lock-in amplifier')
self.configLockinAction.triggered.connect(
self.configLockin)
self.configDivertersAction = QtGui.QAction('&Diverters', self)
self.configDivertersAction.setStatusTip(
'Configure the diverters')
self.configDivertersAction.setToolTip('Configure the diverters')
self.configDivertersAction.triggered.connect(
self.configDiverters)
self.configGratingsAndFiltersAction = QtGui.QAction(
'&Gratings and Filters',
self)
self.configGratingsAndFiltersAction.setStatusTip(
'Configure the gratings and filters')
self.configGratingsAndFiltersAction.setToolTip(
'Configure the gratings and filters')
self.configGratingsAndFiltersAction.triggered.connect(
self.configGratingsAndFilters)
self.generateVeuszFileAction = QtGui.QAction('Generate &Veusz File',
self)
self.generateVeuszFileAction.setStatusTip(
'Generate a Veusz file')
self.generateVeuszFileAction.setToolTip(
'Generate a Veusz file')
self.generateVeuszFileAction.triggered.connect(
self.generateVeuszFile)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(self.openAction)
fileMenu.addAction(self.saveAction)
fileMenu.addAction(self.saveAsAction)
fileMenu.addAction(self.closeAction)
viewMenu = menubar.addMenu('&View')
viewMenu.addAction(self.viewSignal)
viewMenu.addAction(self.viewRawSignal)
viewMenu.addAction(self.viewPhase)
viewMenu.addSeparator().setText("Spectra")
viewMenu.addAction(self.viewClearPlotAction)
axesMenu = menubar.addMenu('A&xes')
axesMenu.addSeparator().setText("X Axis")
axesMenu.addAction(self.axesWavelengthAction)
axesMenu.addAction(self.axesEnergyAction)
axesMenu.addSeparator().setText("Y Axis")
axesMenu.addAction(self.axesSemilogAction)
self.axesSemilogAction.changed.connect(self.axesSemilog)
scanMenu = menubar.addMenu('&Scan')
scanMenu.addAction(self.gotoWavelengthAction)
scanMenu.addAction(self.startScanAction)
scanMenu.addAction(self.abortScanAction)
configMenu = menubar.addMenu('&Config')
configMenu.addAction(self.configInstrumentsAction)
configMenu.addAction(self.configSysResAction)
configMenu.addAction(self.configLockinAction)
configMenu.addAction(self.configDivertersAction)
configMenu.addAction(self.configGratingsAndFiltersAction)
toolsMenu = menubar.addMenu('&Tools')
toolsMenu.addAction(self.generateVeuszFileAction)
aboutMenu = menubar.addMenu('&About')
aboutMenu.addAction(self.aboutAction)
statusBar = self.statusBar()
self.statusLabel = QtGui.QLabel('Initializing...')
self.gratingLabel = QtGui.QLabel('Grating=?')
self.filterLabel = QtGui.QLabel('Filter=?')
self.wavelengthLabel = QtGui.QLabel('Wavelength=?')
self.signalLabel = QtGui.QLabel('Signal=?')
self.rawSignalLabel = QtGui.QLabel('Raw Signal=?')
self.phaseLabel = QtGui.QLabel('Phase=?')
statusBar.addWidget(self.statusLabel, stretch=1)
statusBar.addWidget(self.gratingLabel, stretch=1)
statusBar.addWidget(self.filterLabel, stretch=1)
statusBar.addWidget(self.wavelengthLabel, stretch=1)
statusBar.addWidget(self.signalLabel, stretch=1)
statusBar.addWidget(self.rawSignalLabel, stretch=1)
statusBar.addWidget(self.phaseLabel, stretch=1)
view = pg.GraphicsLayoutWidget()
self.setCentralWidget(view)
self.plot = SpectraPlotItem(xaxis='wavelength')
self.plot.setSignalEnabled(True)
self.plot.setRawSignalEnabled(False)
self.plot.setPhaseEnabled(False)
view.addItem(self.plot, 0, 0)
self.setCentralWidget(view)
self.setWindowTitle('SimplePL')
self.setMinimumSize(576, 432)
self.readWindowSettings()
@QtCore.Slot(bool)
def viewSignalToggled(self, b):
if self.plot:
self.plot.setSignalEnabled(b)
@QtCore.Slot(bool)
def viewRawSignalToggled(self, b):
if self.plot:
self.plot.setRawSignalEnabled(b)
@QtCore.Slot(bool)
def viewPhaseToggled(self, b):
if self.plot:
self.plot.setPhaseEnabled(b)
def clearPlot(self):
self.plot.clear()
def axesWavelength(self):
self.plot.setXAxisView('wavelength')
def axesEnergy(self):
self.plot.setXAxisView('energy')
def setWavelength(self):
wavelength = SetWavelengthDialog.getWavelength(
spectrometer=self.spectrometer,
wavelength=self._wavelength,
parent=self)
if wavelength is None:
return
self.scanner = GoToer(self.spectrometer, wavelength)
self.scanner.statusChanged.connect(self.updateStatus)
self.scanner.started.connect(self.updateActions)
self.scanner.finished.connect(self.updateActions)
self.scanner.sigException.connect(self.scannerException)
self.scanner.start()
def axesSemilog(self):
logMode = self.axesSemilogAction.isChecked()
if self.plot:
self.plot.setLogMode(None, logMode)
def updateActions(self):
spec = self._spectrometerInitilized
lockin = self._lockinInitilized
both = spec and lockin
scanning = bool(self.scanner) and self.scanner.isScanning()
notScanning = not scanning
all = both and notScanning
self.openAction.setEnabled(notScanning)
self.saveAction.setEnabled(not self._scanSaved and notScanning)
self.saveAsAction.setEnabled(notScanning and self.spectrum is not None)
self.gotoWavelengthAction.setEnabled(spec and notScanning)
self.startScanAction.setEnabled(all)
self.abortScanAction.setEnabled(scanning)
self.configInstrumentsAction.setEnabled(not both or notScanning)
self.configSysResAction.setEnabled(notScanning)
self.configLockinAction.setEnabled(lockin and notScanning)
self.configDivertersAction.setEnabled(spec and notScanning)
self.configGratingsAndFiltersAction.setEnabled(spec and notScanning)
def startScan(self):
if self.scanner and self.scanner.isScanning():
return # a scan is already running
if not self._scanSaved:
self.savePrompt() # Prompt the user to save the scan
self._scanSaved = False
# Get the scan parameters from the user
params = StartScanDialog.getScanParameters(
spectrometer=self.spectrometer,
parent=self)
if params is None:
return # cancel
start, stop, step, delay = params
# Remove the old spectrum from the plot, and add a new one
if self.spectrum:
result = QtGui.QMessageBox.question(self,
'Clear plot?',
'Do you want to clear the '
'plot?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if result == QtGui.QMessageBox.Yes:
self.clearPlot()
self.spectrum = ExpandingSpectrum(self._sysresParser)
self.plot.addSpectrum(self.spectrum)
self.scanner = Scanner(self.spectrometer, self.lockin, self.spectrum,
start, stop, step, delay)
self.scanner.statusChanged.connect(self.updateStatus)
self.scanner.started.connect(self.updateActions)
self.scanner.finished.connect(self.updateActions)
self.scanner.sigException.connect(self.scannerException)
self.scanner.start()
def abortScan(self):
if not self.scanner.isScanning():
self.updateActions()
return
self.updateStatus('Aborting scan...')
self.scanner.abort()
def configDiverters(self):
# Get the config parameters
entranceMirror, exitMirror, accepted = (
DivertersConfigDialog.getDivertersConfig(parent=self))
if not accepted:
return
self.spectrometer.setEntranceMirror(entranceMirror)
self.spectrometer.setExitMirror(exitMirror)
def configInstruments(self):
# Get the ports
ports = ConfigInstrumentsDialog.getConfig(parent=self)
if ports is None:
return
# Reset the status
self.updateStatus('Reinitializing...')
self._lockinInitilized = False
self._spectrometerInitilized = False
self.updateActions()
# Restart the lockin and spectrometer
if self.lockin:
self.lockin.thread.quit()
if self.spectrometer:
self.spectrometer.thread.quit()
if self.lockin:
self.lockin.thread.wait()
if self.spectrometer:
self.spectrometer.thread.wait()
self.initSpectrometer()
self.initLockin()
def configSysRes(self):
sysResPath = self._settings.value('sysResPath', None)
sysResPath, _filter = QtGui.QFileDialog.getOpenFileName(parent=self,
caption='Open a system response file',
dir=sysResPath)
if not sysResPath:
return
self._settings.setValue('sysResPath', sysResPath)
self._sysresParser = SimplePLParser(None, sysResPath)
def configLockin(self):
# Get the config parameters
timeConstantIndex, reserveModeIndex, inputLineFilterIndex, accepted = (
LockinConfigDialog.getLockinConfig(self.lockin, parent=self))
if not accepted:
return
self.lockin.setTimeConstantIndex(timeConstantIndex)
self.lockin.setReserveModeIndex(reserveModeIndex)
self.lockin.setInputLineFilterIndex(inputLineFilterIndex)
def configGratingsAndFilters(self):
GratingsAndFiltersConfigDialog.getAdvancedConfig(self.spectrometer,
parent=self)
def generateVeuszFile(self):
GenerateVeuszFileDialog(self).exec_()
def getSystemResponseFilePath(self):
sysResPath = self._settings.value('sysResPath', None)
sysResPath, _filter = QtGui.QFileDialog.getOpenFileName(parent=self,
caption='Open a system response file',
dir=sysResPath)
return sysResPath
def openFile(self):
dirpath = self._settings.value('last_directory', '')
filepath, _filter = QtGui.QFileDialog.getOpenFileName(parent=self,
caption='Open a PL spectrum file',
dir=dirpath)
if not filepath:
return
dirpath, filename = os.path.split(filepath)
self._settings.setValue('last_directory', dirpath)
# self.setWindowTitle(u'SimplePL - {}'.format(filename))
spectrum = MeasuredSpectrum.open(filepath)
# Check if the system response removed is included.
# If not, ask user to select a system response file.
if not len(spectrum.getSignal()):
result = QtGui.QMessageBox.question(self,
'Provide system response?',
'The selected file does not '
'appear to have a system-'
'response-removed column. '
'Would you like to provide a '
'system response?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if result == QtGui.QMessageBox.Yes:
sysres_filepath = self.getSystemResponseFilePath()
if sysres_filepath:
spectrum = MeasuredSpectrum.open(filepath, sysres_filepath)
# remove the previous measured spectrum
if self.spectrum:
result = QtGui.QMessageBox.question(self,
'Clear plot?',
'Do you want to clear the '
'plot?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if result == QtGui.QMessageBox.Yes:
self.clearPlot()
# plot the measured spectrum
self.plot.addSpectrum(spectrum)
self.spectrum = spectrum
self.updateActions()
def savePrompt(self):
reply = QtGui.QMessageBox.question(self, 'Save?',
'Do you want to save the current scan?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.saveFile()
def saveFile(self):
dirpath = self._settings.value('last_directory', '')
filepath, _filter = QtGui.QFileDialog.getSaveFileName(parent=self,
caption='Save the current spectrum',
dir=dirpath,
filter='Tab Delimited Text (*.txt)')
if not filepath:
return
dirpath, _filename = os.path.split(filepath)
self._settings.setValue('last_directory', dirpath)
self.spectrum.save(filepath)
self._scanSaved = True
def saveAsFile(self):
self.saveFile()
def about(self):
AboutDialog().exec_()
def moveCenter(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def moveTopLeft(self):
p = QtGui.QDesktopWidget().availableGeometry().topLeft()
self.move(p)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Quit?',
'Are you sure you want to quit?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
if not self._scanSaved:
self.abortScan()
self.savePrompt() # Prompt the user to save the scan
if self.spectrometer:
self.spectrometer.thread.quit()
if self.lockin:
self.lockin.thread.quit()
if self.spectrometer:
self.spectrometer.thread.wait()
if self.lockin:
self.lockin.thread.wait()
self.writeWindowSettings()
event.accept()
else:
event.ignore()
def writeWindowSettings(self):
self._settings.setValue("MainWindow/size", self.size())
self._settings.setValue("MainWindow/pos", self.pos())
def readWindowSettings(self):
self.resize(self._settings.value("MainWindow/size",
QtCore.QSize(1280, 800)))
pos = self._settings.value("MainWindow/pos")
if pos is None:
self.moveCenter() # default to centered
else:
self.move(pos)
| agpl-3.0 | -198,206,169,742,506,020 | 39.558172 | 79 | 0.620428 | false | 4.308224 | true | false | false |
RPGOne/Skynet | pytorch-master/torch/nn/_functions/thnn/activation.py | 1 | 3547 | import torch
from torch.autograd.function import Function, InplaceFunction
from torch._thnn import type2backend
from . import _all_functions
class PReLU(Function):
def forward(self, input, weight):
self._backend = type2backend[type(input)]
output = input.new()
self.num_parameters = weight.numel()
if self.num_parameters == 1:
self.num_parameters = 0
self._backend.PReLU_updateOutput(
self._backend.library_state,
input,
output,
weight,
self.num_parameters
)
self.save_for_backward(input, weight)
return output
def backward(self, grad_output):
input, weight = self.saved_tensors
# TODO: check if requires grad
grad_input = input.new()
self._backend.PReLU_updateGradInput(
self._backend.library_state,
input,
grad_output,
grad_input,
weight,
self.num_parameters
)
buf = weight.new()
buf2 = weight.new()
# TODO: this won't have to be zeroed in the future
grad_weight = weight.new().resize_as_(weight).zero_()
self._backend.PReLU_accGradParameters(
self._backend.library_state,
input,
grad_output,
grad_input,
weight,
grad_weight,
buf,
buf2,
self.num_parameters,
1
)
return grad_input, grad_weight
class RReLU(InplaceFunction):
def __init__(self, lower, upper, train, inplace=False):
super(RReLU, self).__init__(inplace)
self.lower = lower
self.upper = upper
self.train = train
def forward(self, input):
self._backend = type2backend[type(input)]
output = input.new()
self.noise = input.new()
self._backend.RReLU_updateOutput(
self._backend.library_state,
input,
output,
self.noise,
self.lower,
self.upper,
self.train,
self.inplace,
torch.default_generator if not input.is_cuda else 0
)
self.save_for_backward(input)
return output
def backward(self, grad_output):
input, = self.saved_tensors
# TODO: check if requires grad
grad_input = input.new()
self._backend.RReLU_updateGradInput(
self._backend.library_state,
input,
grad_output,
grad_input,
self.noise,
self.lower,
self.upper,
self.train,
self.inplace
)
return grad_input
class Softmin(Function):
def forward(self, input):
self._backend = type2backend[type(input)]
self.mininput = input.clone().mul(-1)
output = input.new()
self._backend.SoftMax_updateOutput(
self._backend.library_state,
self.mininput,
output
)
self.save_for_backward(output)
return output
def backward(self, grad_output):
output, = self.saved_tensors
grad_input = grad_output.new()
self._backend.SoftMax_updateGradInput(
self._backend.library_state,
self.mininput,
grad_output,
grad_input,
output
)
return grad_input.mul(-1)
_all_functions.append(PReLU)
_all_functions.append(RReLU)
_all_functions.append(Softmin)
| bsd-3-clause | 6,557,805,048,038,104,000 | 26.076336 | 63 | 0.54525 | false | 4.168038 | false | false | false |
abar193/LearningApp | Backend/rest/notes.py | 1 | 1576 | from flask import Flask, request, send_from_directory, jsonify
import rest.database as database
import os
app = Flask(__name__)
if os.path.isdir('./src'):
ROOT_DIR = './src/'
else:
ROOT_DIR = '../../src/'
@app.route("/")
def hello():
return send_from_directory(ROOT_DIR, 'index.html')
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory(ROOT_DIR + 'js', path)
@app.route('/css/<path:path>')
def send_css(path):
return send_from_directory(ROOT_DIR + 'css', path)
@app.route('/index')
def send_main():
return send_from_directory(ROOT_DIR, 'index.html')
@app.route('/save')
def send_save():
return send_from_directory(ROOT_DIR, 'save.html')
@app.route('/selftest')
def send_test():
return send_from_directory(ROOT_DIR, 'selftest.html')
@app.route('/api/courses')
def get_courses():
return jsonify(database.get_courses())
@app.route('/api/courses', methods=['POST'])
def post_course():
data = request.json
database.create_course(data)
return jsonify(database.get_courses())
@app.route('/api/notes/<lectureid>')
def get_notes(lectureid):
return jsonify(database.get_notes(lectureid))
#
# @app.route('/api/notes/<lectureid>', methods=['POST'])
# def post_note(lectureid):
# data = request.json
# database.create_note(lectureid, data)
# return jsonify(database.get_notes())
@app.route('/api/debug')
def debug():
print(database.__courses)
print(database.__notes)
return jsonify(database.__notes)
if __name__ == "__main__":
print(os.listdir(ROOT_DIR))
app.run()
| mit | -5,742,563,153,957,349,000 | 22.878788 | 62 | 0.657995 | false | 3.102362 | false | false | false |
dnxbjyj/python-basic | gui/wxpython/wxPython-demo-4.0.1/samples/floatcanvas/ClickableBoxes.py | 1 | 3070 | #!/usr/bin/env python
"""
This is a little demo of how to make clickable (and changeable) objects with
FloatCanvas
Also an example of constant size, rather than the usual zooming and panning
Developed as an answer to a question on the wxPYhton mailing lilst:
"get panel id while dragging across several panels'
April 5, 2012
"""
import random
import wx
## import the installed version
from wx.lib.floatcanvas import NavCanvas, FloatCanvas
## import a local version
#import sys
#sys.path.append("../")
#from floatcanvas import FloatCanvas as FC
colors = [ (255, 0 , 0 ),
(0 , 255, 0 ),
(0 , 0, 255),
(255, 255, 0 ),
(255, 0, 255),
(0 , 255, 255),
]
class DrawFrame(wx.Frame):
"""
A frame used for the Demo
"""
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
# Add the Canvas
Canvas = FloatCanvas.FloatCanvas(self,
size = (500,500),
ProjectionFun = None,
Debug = 0,
BackgroundColor = "Black",
)
self.Canvas = Canvas
self.Canvas.Bind(wx.EVT_SIZE, self.OnSize)
# build the squares:
w = 10
dx = 14
for i in range(9):
for j in range(9):
Rect = Canvas.AddRectangle((i*dx, j*dx), (w, w), FillColor="White", LineStyle = None)
Outline = Canvas.AddRectangle((i*dx, j*dx), (w, w),
FillColor=None,
LineWidth=4,
LineColor='Red',
LineStyle=None)
Rect.indexes = (i,j)
Rect.outline = Outline
Rect.Bind(FloatCanvas.EVT_FC_LEFT_DOWN, self.SquareHitLeft)
Rect.Bind(FloatCanvas.EVT_FC_ENTER_OBJECT, self.SquareEnter)
Rect.Bind(FloatCanvas.EVT_FC_LEAVE_OBJECT, self.SquareLeave)
self.Show()
Canvas.ZoomToBB()
def SquareHitLeft(self, square):
print("square hit:", square.indexes)
# set a random color
c = random.sample(colors, 1)[0]
square.SetFillColor( c )
self.Canvas.Draw(True)
def SquareEnter(self, square):
print("entering square:", square.indexes)
square.outline.SetLineStyle("Solid")
self.Canvas.Draw(True)
def SquareLeave(self, square):
print("leaving square:", square.indexes)
square.outline.SetLineStyle(None)
self.Canvas.Draw(True)
def OnSize(self, event):
"""
re-zooms the canvas to fit the window
"""
print("in OnSize")
self.Canvas.ZoomToBB()
event.Skip()
app = wx.App(False)
F = DrawFrame(None, title="FloatCanvas Demo App", size=(700,700) )
app.MainLoop()
| mit | 1,658,421,705,508,089,900 | 24.798319 | 101 | 0.513355 | false | 4.023591 | false | false | false |
petrblahos/modellerkit | step06/production.py | 1 | 5481 | from collections import namedtuple
import datetime
import random
import hotmodel
ProcessOperation = namedtuple("ProcessOperation", [
"operation",
"act",
])
ProductOperation = namedtuple("ProductOperation", [
"operation",
"tm",
"workplace",
])
class Server(object):
"""
A mock server. Can answer questions about a process for an
(article, serial_number) and about operations done on the
same.
"""
ACTS = [
"Laser",
"Automatic SMT placement", "Manual SMT placement",
"AOI",
"THT placement", "Optical inspection",
"Selective soldering", "Wave", "Manual soldering",
]
def __init__(self, op_done_rate=90):
self.op_done_rate = op_done_rate
pass
def get_product_ops(self, article, serial_num,):
"""
Returns a list of operations done from the product process.
Randomly skips some operations, and add random dates and
workplaces.
"""
ret = []
dt0 = datetime.datetime.now() - datetime.timedelta(
random.randint(3, 5),
random.randint(0, 60*60*24),
)
proc = self.get_process(article, serial_num)
for operation in proc:
if random.randint(0, 100) > self.op_done_rate:
continue
ret.append(ProductOperation(
operation.operation,
dt0,
random.randint(1, 5),
))
dt0 += datetime.timedelta(0, random.randint(10, 14400))
return ret
def get_process(self, article, dummy_sn):
"""
Returns a list of operations (operation number, act) for this
article/sn. For the articles ending with an even number, returns
one set of operations, another set for all of the rest.
"""
if article[-1] in ("0", "1", "2", "3", ):
return [ # SMT both sides
ProcessOperation(op*10, self.ACTS[act])
for (op, act) in enumerate((0, 1, 2, 3, 1, 3,))
]
if article[-1] in ("4", "5", "6", ):
return [ # SMT one side and THT
ProcessOperation(op*10, self.ACTS[act])
for (op, act) in enumerate((0, 1, 3, 4, 7, 5, 8, 5,))
]
return [ # SMT one side, THT, selective soldering
ProcessOperation(op*10, self.ACTS[act])
for (op, act) in enumerate((0, 1, 3, 4, 6, 5,))
]
class ProductModel(hotmodel.HotObject):
"""
Holds information about a product (article/serial number). When told
a new (article, serial_number), fetches the information about it's
process and performed operations from the server. Manages the selected
operation in the list of performed operations and the selected
operation in the process.
"""
def __init__(self, server):
"""
Set-up the read-only properties.
"""
super(ProductModel, self).__init__("", None)
self.server = server
self.make_hot_property("article", str, True, None)
self.make_hot_property("sn", int, True, None)
self.make_hot_property(
"process",
hotmodel.TypedHotList,
False,
hotmodel.TypedHotList(ProcessOperation),
)
self.make_hot_property(
"operations",
hotmodel.TypedHotList,
False,
hotmodel.TypedHotList(ProductOperation),
)
self.make_hot_property("process_selection", int, True, None)
self.make_hot_property("operation_selection", int, True, None)
def set_product(self, article, sn):
"""
Set the current product.
"""
self.article = article
self.sn = sn
self.process = self.server.get_process(article, sn)
self.operations = self.server.get_product_ops(article, sn)
self.process_selection = None
self.operation_selection = None
def select_operation(self, index):
"""
Set the selected operation in the list of performed operations.
"""
if index == self.operation_selection:
return
self.operation_selection = index
self._fire("select", index)
def select_process_operation(self, index):
"""
Set the selected operation in the process.
"""
if index == self.process_selection:
return
self.process_selection = index
self._fire("select", index)
def sample_handler(handler_name, model, fqname, event_name, key):
print handler_name, "-->", fqname, event_name, key
if "__main__" == __name__:
MODEL = ProductModel(Server())
MAPPER = hotmodel.Mapper()
MAPPER.add_route("/process", "", lambda a,b,c,d: sample_handler("/process-HANDLER-1", a,b,c,d),)
MAPPER.add_route("", "reset", lambda a,b,c,d: sample_handler("!!RESET-handler-1", a,b,c,d),)
MAPPER.add_route("", "", lambda a,b,c,d: sample_handler("*-handler-1", a,b,c,d),)
MODEL.add_listener(MAPPER)
MODEL.set_product("AAAQA1", 1)
MODEL.set_product("AAAQA2", 2)
MODEL.select_operation(3)
MODEL.select_process_operation(1)
MODEL.select_process_operation(2)
MODEL.select_process_operation(2)
| mit | 3,351,073,700,658,054,700 | 33.134615 | 100 | 0.555191 | false | 4.018328 | false | false | false |
demisto/content | Packs/FeedIntel471/Integrations/Intel471MalwareIndicator/Intel471MalwareIndicator.py | 1 | 6628 | import jmespath
import demistomock as demisto
from CommonServerPython import *
from JSONFeedApiModule import * # noqa: E402
from typing import Dict
DEFAULT_COUNT = 100
SEARCH_PARAMS = {
'indicator': 'indicator',
'from': 'from',
'until': 'until',
'threat_type': 'threatType',
'malware_family': 'malwareFamily',
'confidence': 'confidence',
'count': 'count',
}
FEED_INDICATOR_TYPES = {
FeedIndicatorType.URL: FeedIndicatorType.URL,
FeedIndicatorType.File: FeedIndicatorType.File,
"ipv4": FeedIndicatorType.IP
}
FEED_URL = 'https://api.intel471.com/v1/indicators/stream?'
MAPPING = {
FeedIndicatorType.File: {
'threat_type': 'threattypes.threatcategory',
'threat_data_family': 'malwarefamily',
'indicator_data_file_md5': 'md5',
'indicator_data_file_sha1': 'sha1',
'indicator_data_file_sha256': 'sha256',
'context_description': 'description',
'indicator_data_file_download_url': 'downloadurl',
'mitre_tactics': 'mitretactics',
'relation_entity_b': 'threat_data_family'
},
FeedIndicatorType.URL: {'threat_type': 'threattypes.threatcategory',
'threat_data_family': 'malwarefamily',
'indicator_data_url': 'url',
'context_description': 'description',
'mitre_tactics': 'mitretactics',
'relation_entity_b': 'threat_data_family'
},
"ipv4": {'threat_type': 'threattypes.threatcategory',
'threat_data_family': 'malwarefamily',
'indicator_data_address': 'ipaddress',
'context_description': 'description',
'mitre_tactics': 'mitretactics',
'relation_entity_b': 'threat_data_family'
}
}
INDICATOR_VALUE_FIELD = {FeedIndicatorType.File: 'indicator_data_file_sha256',
FeedIndicatorType.URL: 'indicator_data_url',
"ipv4": 'indicator_data_address'}
DEMISTO_VERSION = demisto.demistoVersion()
CONTENT_PACK = 'Intel471 Feed/2.0.1'
INTEGRATION = 'Intel471 Malware Indicator Feed'
USER_AGENT = f'XSOAR/{DEMISTO_VERSION["version"]}.{DEMISTO_VERSION["buildNumber"]} - {CONTENT_PACK} - {INTEGRATION}'
def _create_url(**kwargs):
url_suffix = ""
for param in kwargs:
url_suffix += f"&{param}={kwargs.get(param)}"
return FEED_URL + url_suffix.strip('&')
def _build_url_parameter_dict(**kwargs):
"""
Given a set of parameters, creates a dictionary with only searchable items that can be used in api.
"""
params_dict = {}
for param in kwargs:
if param in SEARCH_PARAMS:
params_dict[SEARCH_PARAMS.get(param)] = kwargs.get(param)
return params_dict
def get_params_by_indicator_type(**kwargs):
indicators_url = {}
params = _build_url_parameter_dict(**kwargs)
params['count'] = int(params.get('count', DEFAULT_COUNT))
indicator_types = argToList(kwargs.get('indicator_type'))
# allows user to choose multiple indicator types at once.
if 'All' in indicator_types:
indicator_types = FEED_INDICATOR_TYPES
for current_type in indicator_types:
params['indicatorType'] = current_type
indicators_url[current_type] = _create_url(**params)
return indicators_url
def custom_build_iterator(client: Client, feed: Dict, limit: int = 0, **kwargs) -> List:
url = feed.get('url', client.url)
fetch_time = feed.get('fetch_time')
start_date, end_date = parse_date_range(fetch_time, utc=True, to_timestamp=True)
integration_context = get_integration_context()
last_fetch = integration_context.get(f"{feed.get('indicator_type')}_fetch_time")
params = {'lastUpdatedFrom': last_fetch if last_fetch else start_date}
result: List[Dict] = []
should_continue = True
while should_continue:
r = requests.get(
url=url,
verify=client.verify,
auth=client.auth,
cert=client.cert,
headers=client.headers,
params=params,
**kwargs
)
try:
r.raise_for_status()
data = r.json()
current_result = jmespath.search(expression=feed.get('extractor'), data=data)
if current_result:
result = result + current_result
# gets next page reference and handles paging.
should_continue = len(result) < limit if result else True
should_continue = should_continue or data.get('cursorNext') != params.get('cursor')
params['cursor'] = data.get('cursorNext') if should_continue else ''
except ValueError as VE:
raise ValueError(f'Could not parse returned data to Json. \n\nError massage: {VE}')
set_integration_context({f"{feed.get('indicator_type')}_fetch_time": str(end_date)})
return result
def custom_build_relationships(feed_config: dict, mapping: dict, indicator_data: dict):
if indicator_data.get(mapping.get('relation_entity_b')):
relationships_lst = EntityRelationship(
name=feed_config.get('relation_name'),
entity_a=indicator_data.get('value'),
entity_a_type=indicator_data.get('type'),
entity_b=indicator_data.get(mapping.get('relation_entity_b')),
entity_b_type=feed_config.get('relation_entity_b_type'),
)
return [relationships_lst.to_indicator()]
def main():
params = {k: v for k, v in demisto.params().items() if v is not None}
params['headers'] = {'user-agent': USER_AGENT}
urls = get_params_by_indicator_type(**params)
params['feed_name_to_config'] = {}
for indicator_type in urls:
params['feed_name_to_config'][indicator_type] = {
'url': urls.get(indicator_type),
'extractor': 'indicators[*].data',
'indicator_type': FEED_INDICATOR_TYPES.get(indicator_type),
'indicator': INDICATOR_VALUE_FIELD.get(indicator_type),
'flat_json_with_prefix': True,
'mapping': MAPPING.get(indicator_type),
'custom_build_iterator': custom_build_iterator,
'fetch_time': params.get('fetch_time', '7 days'),
'relation_entity_b_type': 'Malware',
'relation_name': EntityRelationship.Relationships.COMMUNICATES_WITH,
'create_relations_function': custom_build_relationships,
}
feed_main(params, 'Intel471 Malware Indicators Feed', 'intel471-indicators')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | 7,709,804,647,605,894,000 | 38.218935 | 116 | 0.61557 | false | 3.713165 | false | false | false |
m4h7/juriscraper | juriscraper/oral_args/united_states/federal_appellate/cadc.py | 2 | 2046 | """Scraper for D.C. Circuit of Appeals
CourtID: cadc
Court Short Name: cadc
Author: Andrei Chelaru
Reviewer: mlr
Date created: 18 July 2014
"""
from datetime import datetime, date
from juriscraper.OralArgumentSite import OralArgumentSite
class Site(OralArgumentSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
d = date.today()
#d = date(month=5, day=1, year=2014)
self.url = 'http://www.cadc.uscourts.gov/recordings/recordings.nsf/DocsByRDate?OpenView&count=100&SKey={yearmo}'.format(
yearmo=d.strftime('%Y%m')
)
self.back_scrape_iterable = ["%s%02d" % (year, month) for year in
range(2007, d.year + 1) for month in
range(1, 13)]
def _get_download_urls(self):
path = "id('ViewBody')//div[contains(concat(' ',@class,' '),' row-entry')]//@href"
return list(self.html.xpath(path))
def _get_case_names(self):
path = "id('ViewBody')//*[contains(concat(' ',@class,' '),' column-two')]/div[1]/text()"
return list(self.html.xpath(path))
def _get_case_dates(self):
path = "id('ViewBody')//date/text()"
return map(self._return_case_date, self.html.xpath(path))
@staticmethod
def _return_case_date(e):
e = ''.join(e.split())
return datetime.strptime(e, '%m/%d/%Y').date()
def _get_docket_numbers(self):
path = "id('ViewBody')//*[contains(concat(' ',@class,' '),' row-entry')]//a//text()"
return list(self.html.xpath(path))
def _get_judges(self):
path = '//div[span[contains(., "Judges")]]/text()'
return [' '.join(s.split()) for s in self.html.xpath(path)]
def _download_backwards(self, yearmo):
self.url = 'http://www.cadc.uscourts.gov/recordings/recordings.nsf/DocsByRDate?OpenView&count=100&SKey={yearmo}'.format(
yearmo=yearmo,
)
self.html = self._download()
| bsd-2-clause | 3,309,089,422,167,154,000 | 34.275862 | 128 | 0.583089 | false | 3.177019 | false | false | false |
ldoktor/autotest | client/virt/scripts/virtio_console_guest.py | 4 | 27772 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Auxiliary script used to send data between ports on guests.
@copyright: 2010 Red Hat, Inc.
@author: Jiri Zupka ([email protected])
@author: Lukas Doktor ([email protected])
"""
import threading
from threading import Thread
import os, select, re, random, sys, array, stat
import fcntl, traceback, signal, time
DEBUGPATH = "/sys/kernel/debug"
SYSFSPATH = "/sys/class/virtio-ports/"
DEVPATH = "/dev/virtio-ports/"
exiting = False
class VirtioGuest:
"""
Test tools of virtio_ports.
"""
LOOP_NONE = 0
LOOP_POLL = 1
LOOP_SELECT = 2
def __init__(self):
self.files = {}
self.exit_thread = threading.Event()
self.threads = []
self.ports = {}
self.poll_fds = {}
self.catch_signal = None
self.use_config = threading.Event()
def _readfile(self, name):
"""
Read file and return content as string
@param name: Name of file
@return: Content of file as string
"""
out = ""
try:
f = open(name, "r")
out = f.read()
f.close()
except Exception:
print "FAIL: Cannot open file %s" % (name)
return out
def _get_port_status(self, in_files=None):
"""
Get info about ports from kernel debugfs.
@param in_files: Array of input files.
@return: Ports dictionary of port properties
"""
ports = {}
not_present_msg = "FAIL: There's no virtio-ports dir in debugfs"
if not os.path.ismount(DEBUGPATH):
os.system('mount -t debugfs none %s' % (DEBUGPATH))
try:
if not os.path.isdir('%s/virtio-ports' % (DEBUGPATH)):
print not_present_msg
except Exception:
print not_present_msg
else:
viop_names = os.listdir('%s/virtio-ports' % (DEBUGPATH))
if in_files is not None:
dev_names = os.listdir('/dev')
rep = re.compile(r"vport[0-9]p[0-9]+")
dev_names = filter(lambda x: rep.match(x) is not None, dev_names)
if len(dev_names) != len(in_files):
print ("FAIL: Not all ports were successfully initialized "
"in /dev, only %d from %d." % (len(dev_names),
len(in_files)))
return
if len(viop_names) != len(in_files):
print ("FAIL: Not all ports were successfuly initialized "
"in debugfs, only %d from %d." % (len(viop_names),
len(in_files)))
return
for name in viop_names:
open_db_file = "%s/virtio-ports/%s" % (DEBUGPATH, name)
f = open(open_db_file, 'r')
port = {}
file = []
for line in iter(f):
file.append(line)
try:
for line in file:
m = re.match("(\S+): (\S+)", line)
port[m.group(1)] = m.group(2)
if port['is_console'] == "yes":
port["path"] = "/dev/hvc%s" % (port["console_vtermno"])
# Console works like a serialport
else:
port["path"] = "/dev/%s" % name
if not os.path.exists(port['path']):
print "FAIL: %s not exist" % port['path']
sysfspath = SYSFSPATH + name
if not os.path.isdir(sysfspath):
print "FAIL: %s not exist" % (sysfspath)
info_name = sysfspath + "/name"
port_name = self._readfile(info_name).strip()
if port_name != port["name"]:
print ("FAIL: Port info does not match "
"\n%s - %s\n%s - %s" %
(info_name , port_name,
"%s/virtio-ports/%s" % (DEBUGPATH, name),
port["name"]))
dev_ppath = DEVPATH + port_name
if not os.path.exists(dev_ppath):
print "FAIL: Symlink %s does not exist." % dev_ppath
if not os.path.realpath(dev_ppath) != "/dev/name":
print "FAIL: Symlink %s is not correct." % dev_ppath
except AttributeError:
print ("Bad data on file %s:\n%s. " %
(open_db_file, "".join(file).strip()))
print "FAIL: Bad data on file %s." % open_db_file
return
ports[port['name']] = port
f.close()
return ports
def check_zero_sym(self):
"""
Check if port /dev/vport0p0 was created.
"""
symlink = "/dev/vport0p0"
if os.path.exists(symlink):
print "PASS: Symlink %s exists." % symlink
else:
print "FAIL: Symlink %s does not exist." % symlink
def init(self, in_files):
"""
Init and check port properties.
"""
self.ports = self._get_port_status(in_files)
if self.ports is None:
return
for item in in_files:
if (item[1] != self.ports[item[0]]["is_console"]):
print self.ports
print "FAIL: Host console is not like console on guest side\n"
return
print "PASS: Init and check virtioconsole files in system."
class Switch(Thread):
"""
Thread that sends data between ports.
"""
def __init__ (self, in_files, out_files, event,
cachesize=1024, method=0):
"""
@param in_files: Array of input files.
@param out_files: Array of output files.
@param method: Method of read/write access.
@param cachesize: Block to receive and send.
"""
Thread.__init__(self, name="Switch")
self.in_files = in_files
self.out_files = out_files
self.exit_thread = event
self.method = method
self.cachesize = cachesize
def _none_mode(self):
"""
Read and write to device in blocking mode
"""
data = ""
while not self.exit_thread.isSet():
data = ""
for desc in self.in_files:
data += os.read(desc, self.cachesize)
if data != "":
for desc in self.out_files:
os.write(desc, data)
def _poll_mode(self):
"""
Read and write to device in polling mode.
"""
pi = select.poll()
po = select.poll()
for fd in self.in_files:
pi.register(fd, select.POLLIN)
for fd in self.out_files:
po.register(fd, select.POLLOUT)
while not self.exit_thread.isSet():
data = ""
t_out = self.out_files
readyf = pi.poll(1.0)
for i in readyf:
data += os.read(i[0], self.cachesize)
if data != "":
while ((len(t_out) != len(readyf)) and not
self.exit_thread.isSet()):
readyf = po.poll(1.0)
for desc in t_out:
os.write(desc, data)
def _select_mode(self):
"""
Read and write to device in selecting mode.
"""
while not self.exit_thread.isSet():
ret = select.select(self.in_files, [], [], 1.0)
data = ""
if ret[0] != []:
for desc in ret[0]:
data += os.read(desc, self.cachesize)
if data != "":
ret = select.select([], self.out_files, [], 1.0)
while ((len(self.out_files) != len(ret[1])) and not
self.exit_thread.isSet()):
ret = select.select([], self.out_files, [], 1.0)
for desc in ret[1]:
os.write(desc, data)
def run(self):
if (self.method == VirtioGuest.LOOP_POLL):
self._poll_mode()
elif (self.method == VirtioGuest.LOOP_SELECT):
self._select_mode()
else:
self._none_mode()
class Sender(Thread):
"""
Creates a thread which sends random blocks of data to dst port.
"""
def __init__(self, port, event, length):
"""
@param port: Destination port
@param length: Length of the random data block
"""
Thread.__init__(self, name="Sender")
self.port = port
self.exit_thread = event
self.data = array.array('L')
for i in range(max(length / self.data.itemsize, 1)):
self.data.append(random.randrange(sys.maxint))
def run(self):
while not self.exit_thread.isSet():
os.write(self.port, self.data)
def _open(self, in_files):
"""
Open devices and return array of descriptors
@param in_files: Files array
@return: Array of descriptor
"""
f = []
for item in in_files:
name = self.ports[item]["path"]
if (name in self.files):
f.append(self.files[name])
else:
try:
self.files[name] = os.open(name, os.O_RDWR)
if (self.ports[item]["is_console"] == "yes"):
print os.system("stty -F %s raw -echo" % (name))
print os.system("stty -F %s -a" % (name))
f.append(self.files[name])
except Exception, inst:
print "FAIL: Failed to open file %s" % (name)
raise inst
return f
@staticmethod
def pollmask_to_str(mask):
"""
Conver pool mast to string
@param mask: poll return mask
"""
str = ""
if (mask & select.POLLIN):
str += "IN "
if (mask & select.POLLPRI):
str += "PRI IN "
if (mask & select.POLLOUT):
str += "OUT "
if (mask & select.POLLERR):
str += "ERR "
if (mask & select.POLLHUP):
str += "HUP "
if (mask & select.POLLMSG):
str += "MSG "
return str
def poll(self, port, expected, timeout=500):
"""
Pool event from device and print event like text.
@param file: Device.
"""
in_f = self._open([port])
p = select.poll()
p.register(in_f[0])
mask = p.poll(timeout)
maskstr = VirtioGuest.pollmask_to_str(mask[0][1])
if (mask[0][1] & expected) == expected:
print "PASS: Events: " + maskstr
else:
emaskstr = VirtioGuest.pollmask_to_str(expected)
print "FAIL: Events: " + maskstr + " Expected: " + emaskstr
def lseek(self, port, pos, how):
"""
Use lseek on the device. The device is unseekable so PASS is returned
when lseek command fails and vice versa.
@param port: Name of the port
@param pos: Offset
@param how: Relativ offset os.SEEK_{SET,CUR,END}
"""
fd = self._open([port])[0]
try:
os.lseek(fd, pos, how)
except Exception, inst:
if inst.errno == 29:
print "PASS: the lseek failed as expected"
else:
print inst
print "FAIL: unknown error"
else:
print "FAIL: the lseek unexpectedly passed"
def blocking(self, port, mode=False):
"""
Set port function mode blocking/nonblocking
@param port: port to set mode
@param mode: False to set nonblock mode, True for block mode
"""
fd = self._open([port])[0]
try:
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
if not mode:
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
else:
fcntl.fcntl(fd, fcntl.F_SETFL, fl & ~os.O_NONBLOCK)
except Exception, inst:
print "FAIL: Setting (non)blocking mode: " + str(inst)
return
if mode:
print "PASS: set to blocking mode"
else:
print "PASS: set to nonblocking mode"
def __call__(self, sig, frame):
"""
Call function. Used for signal handle.
"""
if (sig == signal.SIGIO):
self.sigio_handler(sig, frame)
def sigio_handler(self, sig, frame):
"""
Handler for sigio operation.
@param sig: signal which call handler.
@param frame: frame of caller
"""
if self.poll_fds:
p = select.poll()
map(p.register, self.poll_fds.keys())
masks = p.poll(1)
print masks
for mask in masks:
self.poll_fds[mask[0]][1] |= mask[1]
def get_sigio_poll_return(self, port):
"""
Return PASS, FAIL and poll walue in string format.
@param port: Port to check poll information.
"""
fd = self._open([port])[0]
maskstr = VirtioGuest.pollmask_to_str(self.poll_fds[fd][1])
if (self.poll_fds[fd][0] ^ self.poll_fds[fd][1]):
emaskstr = VirtioGuest.pollmask_to_str(self.poll_fds[fd][0])
print "FAIL: Events: " + maskstr + " Expected: " + emaskstr
else:
print "PASS: Events: " + maskstr
self.poll_fds[fd][1] = 0
def set_pool_want_return(self, port, poll_value):
"""
Set value to static variable.
@param port: Port which should be set excepted mask
@param poll_value: Value to check sigio signal.
"""
fd = self._open([port])[0]
self.poll_fds[fd] = [poll_value, 0]
print "PASS: Events: " + VirtioGuest.pollmask_to_str(poll_value)
def catching_signal(self):
"""
return: True if should set catch signal, False if ignore signal and
none when configuration is not changed.
"""
ret = self.catch_signal
self.catch_signal = None
return ret
def async(self, port, mode=True, exp_val=0):
"""
Set port function mode async/sync.
@param port: port which should be pooled.
@param mode: False to set sync mode, True for sync mode.
@param exp_val: Value which should be pooled.
"""
fd = self._open([port])[0]
try:
fcntl.fcntl(fd, fcntl.F_SETOWN, os.getpid())
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
self.use_config.clear()
if mode:
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_ASYNC)
self.poll_fds[fd] = [exp_val, 0]
self.catch_signal = True
else:
del self.poll_fds[fd]
fcntl.fcntl(fd, fcntl.F_SETFL, fl & ~os.O_ASYNC)
self.catch_signal = False
os.kill(os.getpid(), signal.SIGUSR1)
self.use_config.wait()
except Exception, inst:
print "FAIL: Setting (a)sync mode: " + str(inst)
return
if mode:
print "PASS: Set to async mode"
else:
print "PASS: Set to sync mode"
def close(self, file):
"""
Close open port.
@param file: File to close.
"""
descriptor = None
path = self.ports[file]["path"]
if path is not None:
if path in self.files.keys():
descriptor = self.files[path]
del self.files[path]
if descriptor is not None:
try:
os.close(descriptor)
except Exception, inst:
print "FAIL: Closing the file: " + str(inst)
return
print "PASS: Close"
def open(self, in_file):
"""
Direct open devices.
@param in_file: Array of files.
@return: Array of descriptors.
"""
name = self.ports[in_file]["path"]
try:
self.files[name] = os.open(name, os.O_RDWR)
if (self.ports[in_file]["is_console"] == "yes"):
print os.system("stty -F %s raw -echo" % (name))
print "PASS: Open all filles correctly."
except Exception, inst:
print "%s\nFAIL: Failed open file %s" % (str(inst), name)
def loopback(self, in_files, out_files, cachesize=1024, mode=LOOP_NONE):
"""
Start a switch thread.
(There is a problem with multiple opens of a single file).
@param in_files: Array of input files.
@param out_files: Array of output files.
@param cachesize: Cachesize.
@param mode: Mode of switch.
"""
self.ports = self._get_port_status()
in_f = self._open(in_files)
out_f = self._open(out_files)
s = self.Switch(in_f, out_f, self.exit_thread, cachesize, mode)
s.start()
self.threads.append(s)
print "PASS: Start switch"
def exit_threads(self):
"""
Function end all running data switch.
"""
self.exit_thread.set()
for th in self.threads:
print "join"
th.join()
self.exit_thread.clear()
del self.threads[:]
for desc in self.files.itervalues():
os.close(desc)
self.files.clear()
print "PASS: All threads finished"
def die(self):
"""
Quit consoleswitch.
"""
self.exit_threads()
exit()
def send_loop_init(self, port, length):
"""
Prepares the sender thread. Requires clean thread structure.
"""
self.ports = self._get_port_status()
in_f = self._open([port])
self.threads.append(self.Sender(in_f[0], self.exit_thread, length))
print "PASS: Sender prepare"
def send_loop(self):
"""
Start sender data transfer. Requires senderprepare run first.
"""
self.threads[0].start()
print "PASS: Sender start"
def send(self, port, length=1, mode=True, is_static=False):
"""
Send a data of some length
@param port: Port to write data
@param length: Length of data
@param mode: True = loop mode, False = one shoot mode
"""
in_f = self._open([port])
data = ""
writes = 0
if not is_static:
while len(data) < length:
data += "%c" % random.randrange(255)
try:
writes = os.write(in_f[0], data)
except Exception, inst:
print inst
else:
while len(data) < 4096:
data += "%c" % random.randrange(255)
if mode:
while (writes < length):
try:
writes += os.write(in_f[0], data)
except Exception, inst:
print inst
if writes >= length:
print "PASS: Send data length %d" % writes
else:
print ("FAIL: Partial send: desired %d, transfered %d" %
(length, writes))
def recv(self, port, length=1, buffer=1024, mode=True):
"""
Recv a data of some length
@param port: Port to write data
@param length: Length of data
@param mode: True = loop mode, False = one shoot mode
"""
in_f = self._open([port])
recvs = ""
try:
recvs = os.read(in_f[0], buffer)
except Exception, inst:
print inst
if mode:
while (len(recvs) < length):
try:
recvs += os.read(in_f[0], buffer)
except Exception, inst:
print inst
if len(recvs) >= length:
print "PASS: Recv data length %d" % len(recvs)
else:
print ("FAIL: Partial recv: desired %d, transfered %d" %
(length, len(recvs)))
def clean_port(self, port, buffer=1024):
in_f = self._open([port])
ret = select.select([in_f[0]], [], [], 1.0)
buf = ""
if ret[0]:
buf = os.read(in_f[0], buffer)
print ("PASS: Rest in socket: ") + str(buf[:10])
def is_alive():
"""
Check is only main thread is alive and if guest react.
"""
if threading.activeCount() == 2:
print ("PASS: Guest is ok no thread alive")
else:
threads = ""
for thread in threading.enumerate():
threads += thread.name + ", "
print ("FAIL: On guest run thread. Active thread:" + threads)
def compile():
"""
Compile virtio_console_guest.py to speed up.
"""
import py_compile
py_compile.compile(sys.path[0] + "/virtio_console_guest.py")
print "PASS: compile"
sys.exit()
def guest_exit():
global exiting
exiting = True
def worker(virt):
"""
Worker thread (infinite) loop of virtio_guest.
"""
global exiting
print "PASS: Daemon start."
p = select.poll()
p.register(sys.stdin.fileno())
while not exiting:
d = p.poll()
if (d[0][1] == select.POLLIN):
str = raw_input()
try:
exec str
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "On Guest exception from: \n" + "".join(
traceback.format_exception(exc_type,
exc_value,
exc_traceback))
print "FAIL: Guest command exception."
elif (d[0][1] & select.POLLHUP):
time.sleep(0.5)
def sigusr_handler(sig, frame):
pass
class Daemon:
"""
Daemonize guest
"""
def __init__(self, stdin, stdout, stderr):
"""
Init daemon.
@param stdin: path to stdin file.
@param stdout: path to stdout file.
@param stderr: path to stderr file.
"""
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
@staticmethod
def is_file_open(path):
"""
Determine process which open file.
@param path: Path to file.
@return [[pid,mode], ... ].
"""
opens = []
pids = os.listdir('/proc')
for pid in sorted(pids):
try:
int(pid)
except ValueError:
continue
fd_dir = os.path.join('/proc', pid, 'fd')
try:
for file in os.listdir(fd_dir):
try:
p = os.path.join(fd_dir, file)
link = os.readlink(os.path.join(fd_dir, file))
if link == path:
mode = os.lstat(p).st_mode
opens.append([pid, mode])
except OSError:
continue
except OSError, e:
if e.errno == 2:
continue
raise
return opens
def daemonize(self):
"""
Run guest as a daemon.
"""
try:
pid = os.fork()
if pid > 0:
return False
except OSError, e:
sys.stderr.write("Daemonize failed: %s\n" % (e))
sys.exit(1)
os.chdir("/")
os.setsid()
os.umask(0)
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
sys.stderr.write("Daemonize failed: %s\n" % (e))
sys.exit(1)
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin,'r')
so = file(self.stdout,'w')
se = file(self.stderr,'w')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0)
return True
def start(self):
"""
Start the daemon
@return: PID of daemon.
"""
# Check for a pidfile to see if the daemon already runs
openers = self.is_file_open(self.stdout)
rundaemon = False
if len(openers) > 0:
for i in openers:
if i[1] & stat.S_IWUSR:
rundaemon = True
openers.remove(i)
if len(openers) > 0:
for i in openers:
os.kill(int(i[0]), 9)
time.sleep(0.3)
# Start the daemon
if not rundaemon:
if self.daemonize():
self.run()
def run(self):
"""
Run guest main thread
"""
global exiting
virt = VirtioGuest()
slave = Thread(target=worker, args=(virt, ))
slave.start()
signal.signal(signal.SIGUSR1, sigusr_handler)
signal.signal(signal.SIGALRM, sigusr_handler)
while not exiting:
signal.alarm(1)
signal.pause()
catch = virt.catching_signal()
if catch:
signal.signal(signal.SIGIO, virt)
elif catch is False:
signal.signal(signal.SIGIO, signal.SIG_DFL)
if catch is not None:
virt.use_config.set()
print "PASS: guest_exit"
sys.exit(0)
def main():
"""
Main function with infinite loop to catch signal from system.
"""
if (len(sys.argv) > 1) and (sys.argv[1] == "-c"):
compile()
stdin = "/tmp/guest_daemon_pi"
stdout = "/tmp/guest_daemon_po"
stderr = "/tmp/guest_daemon_pe"
for f in [stdin, stdout, stderr]:
try:
os.mkfifo(f)
except OSError, e:
if e.errno == 17:
pass
daemon = Daemon(stdin,
stdout,
stderr)
daemon.start()
d_stdin = os.open(stdin, os.O_WRONLY)
d_stdout = os.open(stdout, os.O_RDONLY)
d_stderr = os.open(stderr, os.O_RDONLY)
s_stdin = sys.stdin.fileno()
s_stdout = sys.stdout.fileno()
s_stderr = sys.stderr.fileno()
pid = filter(lambda x: x[0] != str(os.getpid()),
daemon.is_file_open(stdout))[0][0]
print "PASS: Start"
while 1:
ret = select.select([d_stderr,
d_stdout,
s_stdin],
[], [], 1.0)
if s_stdin in ret[0]:
os.write(d_stdin,os.read(s_stdin, 1))
if d_stdout in ret[0]:
os.write(s_stdout,os.read(d_stdout, 1024))
if d_stderr in ret[0]:
os.write(s_stderr,os.read(d_stderr, 1024))
if not os.path.exists("/proc/" + pid):
sys.exit(0)
os.close(d_stdin)
os.close(d_stdout)
os.close(d_stderr)
if __name__ == "__main__":
main()
| gpl-2.0 | 3,521,360,449,427,522,600 | 28.89451 | 81 | 0.478359 | false | 4.0296 | false | false | false |
garretstuber/lightning-python | lightning/main.py | 1 | 6976 | import requests
from .session import Session
from .visualization import Visualization, VisualizationLocal
class Lightning(object):
def __init__(self, host="http://localhost:3000", local=False, ipython=False, auth=None, size='medium'):
if ipython:
self.startup_message_ipython()
else:
self.startup_message()
if local:
self.enable_local()
else:
self.local_enabled = False
self.set_host(host)
self.auth = auth
if auth is not None:
if isinstance(auth, tuple):
self.set_basic_auth(auth[0], auth[1])
status = self.check_status()
if not status:
raise ValueError("Could not access server")
if ipython:
self.enable_ipython()
self.set_size(size)
else:
self.ipython_enabled = False
self.set_size('full')
def __repr__(self):
s = 'Lightning\n'
if hasattr(self, 'host') and self.host is not None and not self.local_enabled:
s += 'host: %s\n' % self.host
if self.local_enabled:
s += 'host: local\n'
if hasattr(self, 'session') and self.session is not None:
s += 'session: %s\n' % self.session.id
return s
def get_ipython_markup_link(self):
return '%s/js/ipython-comm.js' % self.host
def enable_ipython(self, **kwargs):
"""
Enable plotting in the iPython notebook.
Once enabled, all lightning plots will be automatically produced
within the iPython notebook. They will also be available on
your lightning server within the current session.
"""
# inspired by code powering similar functionality in mpld3
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L357
from IPython.core.getipython import get_ipython
from IPython.display import display, Javascript, HTML
self.ipython_enabled = True
self.set_size('medium')
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
if self.local_enabled:
from lightning.visualization import VisualizationLocal
js = VisualizationLocal.load_embed()
display(HTML("<script>" + js + "</script>"))
print('Running local mode, some functionality limited.\n')
formatter.for_type(VisualizationLocal, lambda viz, kwds=kwargs: viz.get_html())
else:
formatter.for_type(Visualization, lambda viz, kwds=kwargs: viz.get_html())
r = requests.get(self.get_ipython_markup_link(), auth=self.auth)
display(Javascript(r.text))
def disable_ipython(self):
"""
Disable plotting in the iPython notebook.
After disabling, lightning plots will be produced in your lightning server,
but will not appear in the notebook.
"""
from IPython.core.getipython import get_ipython
self.ipython_enabled = False
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.type_printers.pop(Visualization, None)
formatter.type_printers.pop(VisualizationLocal, None)
def create_session(self, name=None):
"""
Create a lightning session.
Can create a session with the provided name, otherwise session name
will be "Session No." with the number automatically generated.
"""
self.session = Session.create(self, name=name)
return self.session
def use_session(self, session_id):
"""
Use the specified lightning session.
Specify a lightning session by id number. Check the number of an existing
session in the attribute lightning.session.id.
"""
self.session = Session(lgn=self, id=session_id)
return self.session
def enable_local(self):
"""
Enable a local mode.
Data is handled locally and embedded via templates.
Does not require a running Lightning server.
Useful for notebooks, and can be used offline.
"""
self.local_enabled = True
def disable_local(self):
"""
Disable local mode.
"""
self.local_enabled = False
def set_basic_auth(self, username, password):
"""
Set authenatication.
"""
from requests.auth import HTTPBasicAuth
self.auth = HTTPBasicAuth(username, password)
return self
def set_host(self, host):
"""
Set the host for a lightning server.
Host can be local (e.g. http://localhost:3000), a heroku
instance (e.g. http://lightning-test.herokuapp.com), or
a independently hosted lightning server.
"""
if host[-1] == '/':
host = host[:-1]
self.host = host
return self
def set_size(self, size='medium'):
"""
Set a figure size using one of four options.
Convention is 'small': 400px, 'medium': 600px, 'large': 800px,
and 'full' will use the entire width
"""
if size not in ['small', 'medium', 'large', 'full']:
raise ValueError("Size must be one of 'small', 'medium', 'large', 'full'")
self.size = size
def check_status(self):
"""
Check the server for status
"""
try:
r = requests.get(self.host + '/status', auth=self.auth,
timeout=(10.0, 10.0))
if not r.status_code == requests.codes.ok:
print("Problem connecting to server at %s" % self.host)
print("status code: %s" % r.status_code)
return False
else:
print("Connected to server at %s" % self.host)
return True
except (requests.exceptions.ConnectionError,
requests.exceptions.MissingSchema,
requests.exceptions.InvalidSchema) as e:
print("Problem connecting to server at %s" % self.host)
print("error: %s" % e)
return False
def startup_message_ipython(self):
import os
import base64
try:
from IPython.display import display, HTML
icon = os.path.join(os.path.dirname(__file__), 'lib/icon.png')
with open(icon, "rb") as imfile:
im = b"".join([b'data:image/png;base64,', base64.b64encode(imfile.read())]).decode("utf-8")
t = "<div style='margin-top:8px'><img src='%s' width='30px' height='35px' " \
"style='display: inline-block; padding-right: 10px'>" \
"</img><span>Lightning initialized</span></div>" % im
display(HTML(t))
except:
print("Lightning initialized")
def startup_message(self):
print("Lightning initialized") | mit | -3,404,102,391,427,864,600 | 33.369458 | 107 | 0.578698 | false | 4.23301 | false | false | false |
trong-nguyen/udacity-fullstack | aws_lambda/trongn/proxy.py | 1 | 4789 | import os
import json
import urllib
from base64 import b64encode
# external libs
import requests
class Proxy(object):
URL = None
ACCESS_TOKEN = None
def __init__(self):
super(Proxy, self).__init__()
def build_query(self, params):
return urllib.urlencode(params)
def build_headers(self):
return {
'Content-Type': 'application/json',
}
def get_url(self):
return self.URL
def fetch(self, params):
response = requests.get(
url = '?'.join([self.get_url(), self.build_query(params)]),
headers = self.build_headers()
)
return response
class YelpProxy(Proxy):
URL = 'https://api.yelp.com/v3/businesses/search'
def get_access_token(self):
'''
Use environment credentials to obtain access token
Tokens are cached so it takes only one request
'''
if not self.ACCESS_TOKEN:
url = 'https://api.yelp.com/oauth2/token'
params = {
'grant_type' : 'client_credentials',
'client_id' : os.environ['YELP_CLIENT_ID'],
'client_secret' : os.environ['YELP_CLIENT_SECRET']
}
response = requests.post(url, params=params)
message = response.json()
self.ACCESS_TOKEN = message['access_token']
return self.ACCESS_TOKEN
def build_headers(self):
headers = super(YelpProxy, self).build_headers()
access_token = self.get_access_token()
headers.update({
'Authorization': 'Bearer {}'.format(access_token)
})
return headers
class TwitterProxy(Proxy):
URL = 'https://api.twitter.com/1.1/search/tweets.json'
def get_access_token(self):
if not self.ACCESS_TOKEN:
url = 'https://api.twitter.com/oauth2/token'
credentials = b64encode(':'.join([
os.environ['TWITTER_CONSUMER_KEY'],
os.environ['TWITTER_CONSUMER_SECRET']
]))
headers = {
'Authorization': 'Basic ' + credentials,
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'
}
params = {
'grant_type': 'client_credentials'
}
response = requests.post(url, params=params, headers=headers)
message = response.json()
self.ACCESS_TOKEN = message['access_token']
return self.ACCESS_TOKEN
def build_headers(self):
headers = super(TwitterProxy, self).build_headers()
access_token = self.get_access_token()
headers.update({
'Authorization': 'Bearer {}'.format(access_token)
})
return headers
class FoursquareProxy(Proxy):
URL = 'https://api.foursquare.com/v2/venues/search'
def build_query(self, params):
params = dict(params)
auth = {
'client_id' : os.environ['FOURSQUARE_CLIENT_ID'],
'client_secret' : os.environ['FOURSQUARE_CLIENT_SECRET'],
'v' : '20170801'
}
params.update(auth)
return super(FoursquareProxy, self).build_query(params)
def base_handler(proxy):
'''
A simple catch - adding auth - request and return kind of proxy server
The handle forward the entire query string to 3rd party API
only adding authentication credentials if neccessary
'''
fetcher = proxy()
# depending on wheather proxy integration is used
# data should be in serialized or plain dict form
# status code, headers and body mus be present in the response
def handler(event, context):
response_headers = {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
}
try:
params = event['queryStringParameters']
r = fetcher.fetch(params)
return {
'statusCode' : r.status_code,
'body' : r.content,
'headers' : response_headers
}
except Exception as e:
print(e) # print the cause
return {
'statusCode' : '400',
'body' : 'Exception thrown while fetching data',
'headers' : response_headers
}
return handler
def make_proxy_handler(name):
proxy = None
available_proxies = {
'Twitter' : TwitterProxy,
'Foursquare' : FoursquareProxy,
'Yelp' : YelpProxy
}
try:
proxy = available_proxies[name]
except KeyError as e:
raise ValueError('Invalid proxy: {}'.format(name))
return base_handler(proxy)
| gpl-3.0 | -5,885,370,068,204,638,000 | 27.005848 | 81 | 0.554604 | false | 4.19352 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.