repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
bkbilly/AlarmPI | alarmcode/logs.py | 1 | 8304 | #!/usr/bin/env python
import re
import threading
import time
from datetime import datetime
import pytz
import logging
logging = logging.getLogger('alarmpi')
class Logs():
def __init__(self, wd, logfile, timezone):
self.wd = wd
self.logfile = logfile
try:
self.mytimezone = pytz.timezone(timezone)
except Exception:
logging.exception("Can't find the correct timezone")
self.mytimezone = pytz.utc
self.updateUI = lambda **args: 0
self.limit = 10
self.logtypes = 'all'
def setCallbackUpdateUI(self, callback):
self.updateUI = callback
def setLogFilters(self, limit, logtypes):
""" Sets the global filters for the getSensorsLog method """
self.limit = limit
self.logtypes = logtypes
def writeLog(self, logType, message):
""" Write log events into a file and send the last to UI.
It also uses the timezone from json file to get the local time.
"""
myTimeLog = datetime.now(tz=self.mytimezone)
myTimeLog = myTimeLog.strftime("%Y-%m-%d %H:%M:%S")
logmsg = '({0}) [{1}] {2}\n'.format(logType, myTimeLog, message)
with open(self.logfile, "a") as myfile:
myfile.write(logmsg)
self.updateUI('sensorsLog', self.getSensorsLog(
self.limit, selectTypes=self.logtypes))
def startTrimThread(self, lines=1000):
threadTrimLogFile = threading.Thread(
target=self.trimLogFile,
args=[lines]
)
threadTrimLogFile.daemon = True
threadTrimLogFile.start()
def _convert_timedelta(self, duration):
""" Converts a time difference into human readable format """
days, seconds = duration.days, duration.seconds
hours = days * 24 + seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 60)
diffTxt = ""
if days > 0:
diffTxt = "{days} days, {hours} hour, {minutes} min, {seconds} sec"
elif hours > 0:
diffTxt = "{hours} hour, {minutes} min, {seconds} sec"
elif minutes > 0:
diffTxt = "{minutes} min, {seconds} sec"
else:
diffTxt = "{seconds} sec"
diffTxt = diffTxt.format(
days=days, hours=hours, minutes=minutes, seconds=seconds)
return diffTxt
def trimLogFile(self, lines):
""" Trims the log file in an interval of 24 hours to 1000 lines """
# lines = 1000 # Number of lines of logs to keep
repeat_every_n_sec = 86400 # 24 Hours
while True and lines is None and lines > 0:
with open(self.logfile, 'r') as f:
data = f.readlines()
with open(self.logfile, 'w') as f:
f.writelines(data[-lines:])
time.sleep(repeat_every_n_sec)
def getSensorsLog(self, limit=100, fromText=None,
selectTypes='all', filterText=None,
getFormat='text', combineSensors=True):
""" Returns the last n lines if the log file.
If selectTypes is specified, then it returns only this type of logs.
Available types: user_action, sensor,
system, alarm
If the getFormat is specified as json, then it returns it in a
json format (programmer friendly)
"""
# Fix inputs
if (type(limit) != int and limit is not None):
if (limit.isdigit()):
limit = int(limit)
else:
limit = 100
if (type(selectTypes) == str):
selectTypes = selectTypes.split(',')
elif selectTypes is None:
selectTypes = 'all'.split(',')
if (type(combineSensors) != bool and combineSensors is not None):
if (combineSensors.lower() == 'true'):
combineSensors = True
elif (combineSensors.lower() == 'false'):
combineSensors = False
else:
combineSensors = True
if getFormat is None:
getFormat = 'text'
# Read from File the Logs
logs = []
with open(self.logfile, "r") as f:
lines = f.readlines()
startedSensors = {}
for line in lines:
logType = None
logTime = None
logText = None
# Analyze log line for each category
try:
mymatch = re.match(r'^\((.*)\) \[(.*)\] (.*)', line)
if mymatch:
logType = mymatch.group(1).split(',')
logTime = mymatch.group(2)
logText = mymatch.group(3)
except Exception:
logging.exception("Can't find the correct log group:")
mymatch = re.match(r'^\[(.*)\] (.*)', line)
if mymatch:
logType = ["unknown", "unknown"]
logTime = mymatch.group(1)
logText = mymatch.group(2)
# append them to a list
if logType is not None and logTime is not None and logText is not None:
logs.append({
'type': logType,
'event': logText,
'time': logTime
})
# Add endtime to the sensors
if (combineSensors):
tmplogs = []
index = 0
startedSensors = {}
for log in logs:
if 'sensor' in log['type'][0].lower():
status, uuid = log['type'][1], log['type'][2]
if status == 'on' and uuid not in startedSensors:
startedSensors[uuid] = {
'start': log['time'],
'ind': index
}
index += 1
tmplogs.append(log)
elif status == 'off':
try:
info = startedSensors.pop(uuid, None)
if info is not None:
starttime = datetime.strptime(
info['start'], "%Y-%m-%d %H:%M:%S")
endtime = datetime.strptime(
log['time'], "%Y-%m-%d %H:%M:%S")
timediff = self._convert_timedelta(endtime - starttime)
tmplogs[info['ind']]['timediff'] = timediff
tmplogs[info['ind']]['timeend'] = log['time']
except Exception:
logging.exception("Error combining logs")
logging.error(info)
else:
index += 1
tmplogs.append(log)
logs = tmplogs
# Filter from last found text till the end (e.g. Alarm activated)
if (fromText not in (None, 'all')):
tmplogs = []
index = 0
for log in reversed(logs):
index += 1
if (fromText.lower() in log['event'].lower()):
break
logs = logs[-index:]
# Filter by Types (e.g. sensor, user_action, ...)
if (selectTypes is not None):
if ('all' not in selectTypes):
tmplogs = []
for log in logs:
if (log['type'][0].lower() in selectTypes):
tmplogs.append(log)
logs = tmplogs
# Filter by text (e.g. pir, ...)
if (filterText not in (None, 'all')):
tmplogs = []
for log in logs:
if (filterText.lower() in log['event'].lower()):
tmplogs.append(log)
logs = tmplogs
# Convert to Human format
if (getFormat == 'text'):
tmplogs = []
for log in logs:
if ('timediff' in log):
tmplogs.append('[{0}] ({1}) {2}'.format(log['timeend'], log['timediff'], log['event']))
else:
tmplogs.append('[{0}] {1}'.format(log['time'], log['event']))
logs = tmplogs
return {"log": logs[-limit:]}
| mit | 9,113,431,894,522,070,000 | 36.071429 | 107 | 0.486031 | false |
weichweich/Pi-Timeswitch | Flask-Server/timeswitch/switch/schema.py | 1 | 2441 | import logging
import time
from flask import request
from flask_restful import Resource
from marshmallow import ValidationError, post_load, validates_schema
from marshmallow_jsonapi import Schema, fields
from timeswitch.switch.model import (Pin, Sequence, is_absolute_time,
is_relative_time)
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
LOGGER = logging.getLogger(__name__)
class AppError(Exception):
pass
def dasherize(text):
return text.replace('_', '-')
class SequenceSchema(Schema):
id = fields.String(dump_only=True)
start_time = fields.String(required=True)
start_range = fields.String(required=True)
end_time = fields.String(required=True)
end_range = fields.String(required=True)
pin = fields.Relationship(
related_url='/api/pins/{pin_id}',
related_url_kwargs={'pin_id': '<pin>'},
# Include resource linkage
many=False, include_data=True,
type_='pins'
)
@post_load
def make_sequence(self, data):
return Sequence(**data)
def handle_error(self, exc, data):
raise ValidationError(
'An error occurred with input: {0} \n {1}'.format(data, exc.messages))
def __str__(self):
if self.pin is None:
return "<Sequence: Start " + self.start_time + " End " +\
self.end_time + " Pin none>"
else:
return "<Sequence: Start " + self.start_time + " End " +\
self.end_time + " Pin " + str(self.pin) + ">"
class Meta:
type_ = 'sequences'
strict = True
class PinSchema(Schema):
id = fields.Str(dump_only=True)
number = fields.Integer(required=True)
name = fields.String(attribute='name')
state = fields.Integer()
sequences = fields.Relationship(
related_url='/api/pins/{pin_id}/sequences',
related_url_kwargs={'pin_id': '<id>'},
# Include resource linkage
many=True,
include_data=True,
type_='sequences',
schema='SequenceSchema'
)
@post_load
def make_pin(self, data):
return Pin(**data)
def handle_error(self, exc, data):
raise ValidationError(
'An error occurred with input: {0} \n {1}'.format(data, exc.messages))
class Meta:
type_ = 'pins'
strict = True
| mit | 4,909,760,856,457,402,000 | 25.824176 | 82 | 0.60508 | false |
GNOME/orca | test/keystrokes/firefox/line_nav_roledescriptions.py | 1 | 2374 | #!/usr/bin/python
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"1. Line Down",
["BRAILLE LINE: 'Focus me 1'",
" VISIBLE: 'Focus me 1', cursor=1",
"SPEECH OUTPUT: 'Focus me 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: 'Focus me 2'",
" VISIBLE: 'Focus me 2', cursor=1",
"SPEECH OUTPUT: 'Focus me 2 kill switch'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Line Down",
["BRAILLE LINE: 'Focus me 3 push button'",
" VISIBLE: 'Focus me 3 push button', cursor=1",
"SPEECH OUTPUT: 'Focus me 3 push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Line Down",
["BRAILLE LINE: 'Focus me 4 kill switch'",
" VISIBLE: 'Focus me 4 kill switch', cursor=1",
"SPEECH OUTPUT: 'Focus me 4 kill switch'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"5. Line Down",
["BRAILLE LINE: 'Focus me 5 push button Focus me 6 kill switch'",
" VISIBLE: 'Focus me 5 push button Focus me ', cursor=1",
"SPEECH OUTPUT: 'Focus me 5 push button'",
"SPEECH OUTPUT: 'Focus me 6 kill switch'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"6. Line Down",
["BRAILLE LINE: 'Here are some slides'",
" VISIBLE: 'Here are some slides', cursor=1",
"SPEECH OUTPUT: 'Presentation slide set'",
"SPEECH OUTPUT: 'Here are some slides'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| lgpl-2.1 | 4,970,380,769,082,865,000 | 34.432836 | 70 | 0.70219 | false |
cmcqueen/cobs-python | python3/cobs/cobs/_cobs_py.py | 1 | 2890 | """
Consistent Overhead Byte Stuffing (COBS)
This version is for Python 3.x.
"""
class DecodeError(Exception):
pass
def _get_buffer_view(in_bytes):
mv = memoryview(in_bytes)
if mv.ndim > 1 or mv.itemsize > 1:
raise BufferError('object must be a single-dimension buffer of bytes.')
try:
mv = mv.cast('c')
except AttributeError:
pass
return mv
def encode(in_bytes):
"""Encode a string using Consistent Overhead Byte Stuffing (COBS).
Input is any byte string. Output is also a byte string.
Encoding guarantees no zero bytes in the output. The output
string will be expanded slightly, by a predictable amount.
An empty string is encoded to '\\x01'"""
if isinstance(in_bytes, str):
raise TypeError('Unicode-objects must be encoded as bytes first')
in_bytes_mv = _get_buffer_view(in_bytes)
final_zero = True
out_bytes = bytearray()
idx = 0
search_start_idx = 0
for in_char in in_bytes_mv:
if in_char == b'\x00':
final_zero = True
out_bytes.append(idx - search_start_idx + 1)
out_bytes += in_bytes_mv[search_start_idx:idx]
search_start_idx = idx + 1
else:
if idx - search_start_idx == 0xFD:
final_zero = False
out_bytes.append(0xFF)
out_bytes += in_bytes_mv[search_start_idx:idx+1]
search_start_idx = idx + 1
idx += 1
if idx != search_start_idx or final_zero:
out_bytes.append(idx - search_start_idx + 1)
out_bytes += in_bytes_mv[search_start_idx:idx]
return bytes(out_bytes)
def decode(in_bytes):
"""Decode a string using Consistent Overhead Byte Stuffing (COBS).
Input should be a byte string that has been COBS encoded. Output
is also a byte string.
A cobs.DecodeError exception will be raised if the encoded data
is invalid."""
if isinstance(in_bytes, str):
raise TypeError('Unicode-objects are not supported; byte buffer objects only')
in_bytes_mv = _get_buffer_view(in_bytes)
out_bytes = bytearray()
idx = 0
if len(in_bytes_mv) > 0:
while True:
length = ord(in_bytes_mv[idx])
if length == 0:
raise DecodeError("zero byte found in input")
idx += 1
end = idx + length - 1
copy_mv = in_bytes_mv[idx:end]
if b'\x00' in copy_mv:
raise DecodeError("zero byte found in input")
out_bytes += copy_mv
idx = end
if idx > len(in_bytes_mv):
raise DecodeError("not enough input bytes for length code")
if idx < len(in_bytes_mv):
if length < 0xFF:
out_bytes.append(0)
else:
break
return bytes(out_bytes)
| mit | 147,667,922,598,717,340 | 31.111111 | 86 | 0.577855 | false |
kiddinn/plaso | plaso/analysis/viper.py | 1 | 6045 | # -*- coding: utf-8 -*-
"""Analysis plugin to look up files in Viper and tag events."""
from plaso.analysis import hash_tagging
from plaso.analysis import logger
from plaso.analysis import manager
from plaso.containers import events
from plaso.lib import errors
class ViperAnalyzer(hash_tagging.HTTPHashAnalyzer):
"""Class that analyzes file hashes by consulting Viper.
REST API reference:
https://viper-framework.readthedocs.io/en/latest/usage/web.html#api
"""
SUPPORTED_HASHES = ['md5', 'sha256']
SUPPORTED_PROTOCOLS = ['http', 'https']
def __init__(self, hash_queue, hash_analysis_queue, **kwargs):
"""Initializes a Viper hash analyzer.
Args:
hash_queue (Queue.queue): contains hashes to be analyzed.
hash_analysis_queue (Queue.queue): that the analyzer will append
HashAnalysis objects this queue.
"""
super(ViperAnalyzer, self).__init__(
hash_queue, hash_analysis_queue, **kwargs)
self._checked_for_old_python_version = False
self._host = None
self._port = None
self._protocol = None
self._url = None
def _QueryHash(self, digest):
"""Queries the Viper Server for a specific hash.
Args:
digest (str): hash to look up.
Returns:
dict[str, object]: JSON response or None on error.
"""
if not self._url:
self._url = '{0:s}://{1:s}:{2:d}/file/find'.format(
self._protocol, self._host, self._port)
request_data = {self.lookup_hash: digest}
try:
json_response = self.MakeRequestAndDecodeJSON(
self._url, 'POST', data=request_data)
except errors.ConnectionError as exception:
json_response = None
logger.error('Unable to query Viper with error: {0!s}.'.format(
exception))
return json_response
def Analyze(self, hashes):
"""Looks up hashes in Viper using the Viper HTTP API.
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: hash analysis.
Raises:
RuntimeError: If no host has been set for Viper.
"""
hash_analyses = []
for digest in hashes:
json_response = self._QueryHash(digest)
hash_analysis = hash_tagging.HashAnalysis(digest, json_response)
hash_analyses.append(hash_analysis)
return hash_analyses
def SetHost(self, host):
"""Sets the address or hostname of the server running Viper server.
Args:
host (str): IP address or hostname to query.
"""
self._host = host
def SetPort(self, port):
"""Sets the port where Viper server is listening.
Args:
port (int): port to query.
"""
self._port = port
def SetProtocol(self, protocol):
"""Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: if the protocol is not supported.
"""
if protocol not in self.SUPPORTED_PROTOCOLS:
raise ValueError('Unsupported protocol: {0!s}'.format(protocol))
self._protocol = protocol
def TestConnection(self):
"""Tests the connection to the Viper server.
Returns:
bool: True if the Viper server instance is reachable.
"""
url = '{0:s}://{1:s}:{2:d}/test'.format(
self._protocol, self._host, self._port)
try:
json_response = self.MakeRequestAndDecodeJSON(url, 'GET')
except errors.ConnectionError:
json_response = None
return json_response is not None
class ViperAnalysisPlugin(hash_tagging.HashTaggingAnalysisPlugin):
"""An analysis plugin for looking up SHA256 hashes in Viper."""
# TODO: Check if there are other file types worth checking Viper for.
DATA_TYPES = ['pe:compilation:compilation_time']
NAME = 'viper'
def __init__(self):
"""Initializes a Viper analysis plugin."""
super(ViperAnalysisPlugin, self).__init__(ViperAnalyzer)
def GenerateLabels(self, hash_information):
"""Generates a list of strings that will be used in the event tag.
Args:
hash_information (dict[str, object]): JSON decoded contents of the result
of a Viper lookup, as produced by the ViperAnalyzer.
Returns:
list[str]: list of labels to apply to events.
"""
if not hash_information:
return ['viper_not_present']
projects = []
tags = []
for project, entries in hash_information.items():
if not entries:
continue
projects.append(project)
for entry in entries:
if entry['tags']:
tags.extend(entry['tags'])
if not projects:
return ['viper_not_present']
strings = ['viper_present']
for project_name in projects:
label = events.EventTag.CopyTextToLabel(
project_name, prefix='viper_project_')
strings.append(label)
for tag_name in tags:
label = events.EventTag.CopyTextToLabel(tag_name, prefix='viper_tag_')
strings.append(label)
return strings
def SetHost(self, host):
"""Sets the address or hostname of the server running Viper server.
Args:
host (str): IP address or hostname to query.
"""
self._analyzer.SetHost(host)
def SetPort(self, port):
"""Sets the port where Viper server is listening.
Args:
port (int): port to query.
"""
self._analyzer.SetPort(port)
def SetProtocol(self, protocol):
"""Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: If an invalid protocol is selected.
"""
protocol = protocol.lower().strip()
if protocol not in ['http', 'https']:
raise ValueError('Invalid protocol specified for Viper lookup')
self._analyzer.SetProtocol(protocol)
def TestConnection(self):
"""Tests the connection to the Viper server.
Returns:
bool: True if the Viper server instance is reachable.
"""
return self._analyzer.TestConnection()
manager.AnalysisPluginManager.RegisterPlugin(ViperAnalysisPlugin)
| apache-2.0 | -6,213,781,568,674,037,000 | 26.352941 | 79 | 0.656245 | false |
mabotech/mabo.io | py/opcda/backup/monitor.py | 1 | 10535 |
"""
import logging
import logging.handlers
import logging.config
logging.config.fileConfig('logging.ini')
log = logging.getLogger(__file__)
"""
import os, sys
import subprocess
import traceback
from singleton import Singleton
import simplejson
from apscheduler import events
from apscheduler.scheduler import Scheduler
import time
from flask.config import Config
from config import CENTRAL_CONFIG
from mabolab.core.global_obj import Global
settings = Config("")
settings.from_pyfile(CENTRAL_CONFIG)
settings['APP_NAME'] = "monitor_bli"
g = Global(settings)
db = g.get_db('postgresql')
ora = g.get_db('oracle')
log = g.get_logger()
from mabolab.equipment.opc.opc_proxy import OPCProxy
"""
bil_tags = ['Flag', 'PalletNo', 'SerialNo', 'TestCell', 'TestCount',
'TestStatus', 'TestTypeRun1', 'TestTypeRun2', 'TestTypeRun3']
"""
class Cache(object):
__metaclass__ = Singleton
def __init__(self):
self.serial_no = None
self.flag = 0
#self.worker = worker
def set(self, data):
self.serial_no = data[2][1]
self.flag = data[0][1]
def get(self):
return {"esn" : self.serial_no, "flag" : self.flag}
def reset(self):
pass
#def update(self):
#
# pass
class SpotCache(object):
__metaclass__ = Singleton
def __init__(self):
self.serial_no = None
self.spot = 0
#self.worker = worker
def set(self, data):
self.serial_no = data[0][1]
self.spot = data[1][1]
def get(self):
return {"esn" : self.serial_no, "spot":self.spot}
def reset(self):
pass
def get_obj(data):
obj = {}
for val in data:
#print val
if val[2] != "Good":
raise(Exception("Not good"))
#print val[0], val[1], val[2]
key = val[0].split(".")[-1]
obj[key] = val[1]
return obj
class SpotWorker(object):
def __init__(self):
pass
def save(self, data):
#print ">>"*25
esn = data[0][1]
spotflat = data[1][1]
if spotflat == True:
spottest = 2
else:
spottest = 0
sql = "select mt_f_serialno_test_spot('%s','%s','spot','42701')" % (esn, spottest)
#print sql
rtn = db.execute(sql)
sql="""select cwbvr.status AS status ,cwbvr.lastupdateon FROM cob_t_build_verification cbv inner join cob_t_wo_build_verifi_req cwbvr on cbv.id=cwbvr.buildverificationid
and upper(cbv.type)='AUDIT' and cwbvr.esn='%s' and cbv.workstation='42700'"""%(esn)
"""sql = "select status, lastupdateon from cob_t_serial_no_workstation
where serialno = '%s' and workstation = '42700'
order by id desc" % (esn)"""
#print ">>"*20
#print sql
rtn = ora.execute(sql)
row = rtn.fetchone()
if row != None:
if row[0] ==1:
auditstatus = 'P'
else:
auditstatus = 'F'
sql = "select mt_f_serialno_test_audit('%s','%s','audit','42701')" %(esn, auditstatus)
#print ">>"*30
log.debug( sql )
rtn = db.execute(sql)
else:
log.debug("can't find esn:%s in ng db" %(esn))
class Worker(object):
def __init__(self):
pass
def save(self, data):
text = ""
#db.call_sp(text)
try:
obj = get_obj(data)
except:
return
#print "=="*20
#raise(Exception("save exception"))
#print "save"
if obj['TestTypeRun1'] == '01':
obj['TestType'] = '9M'
elif obj['TestTypeRun2'] == '01':
obj['TestType'] = '30M'
elif obj['TestTypeRun3'] == '01':
obj['TestType'] = '3'
else:
raise(Exception("type wrong"))
obj['TestCount'] = int(obj['TestCount'] )
if obj['TestStatus'] not in ['N','P','F','I']:
raise(Exception("status wrong"))
#mt_f_serialno_test_data
#(i_serialno character varying, i_container character varying, i_testcell character varying, i_testtype character varying, i_testcount integer, i_teststatus character varying, i_resource character varying, i_workcenter character varying)
#print "data: %s" % (data)
sql = "select mt_f_serialno_test_data('%(SerialNo)s', '%(PalletNo)s', '%(TestCell)s', '%(TestType)s','%(TestCount)s', '%(TestStatus)s', 'rs','42700' )" % obj
#print sql
#sql = "select now() as dt"
rtn = db.execute(sql)
#print rtn.fetchone()
#print "saved"
class SpotMonitor(object):
def __init__(self):
self.worker = SpotWorker()
self.cache = SpotCache()
self.opcc = OPCClient()
self.group = "BLI.42701S"
pass
def check(self):
#raise( Exception("err"))
data = self.opcc.read(self.group)
#print data
if data != None:
esn = data[0][1]
spot = data[1][1]
else:
return 0
log.debug( "esn:%s" % (esn ) )
prev_esn = self.cache.get()["esn"]
if esn != prev_esn :
log.debug("pervious esn: %s" % (prev_esn) )
self.cache.set(data)
try:
self.worker.save(data)
except Exception, e:
log.debug(e.message)
return 1
else:
return 0
pass
class BLIMonitor(object):
def __init__(self):
self.worker = Worker()
self.cache = Cache()
self.opcc = OPCClient()
self.group = "Channel1.Device1"
pass
def check(self):
#raise( Exception("err"))
data = self.opcc.read(self.group)
#print data
if data != None:
print data
esn = data[2][1]
flag = data[0][1]
else:
return 0
log.debug( "esn:%s" % (esn ) )
prev_esn = self.cache.get()["esn"]
if flag == 1 and esn != prev_esn :
log.debug("pervious esn: %s" % (prev_esn) )
self.cache.set(data)
try:
self.worker.save(data)
#print data
except Exception, e:
log.debug(e.message)
return 1
else:
return 0
pass
class OPCClient(object):
def __init__(self):
group_names =["Channel1.Device1"]# ["BLI.42700M","BLI.42701S"]
fh = open("config.json", "r")
data = fh.read()
json = simplejson.loads(data)
host = 'mabo01'
port = 7766
provider = "SWToolbox.TOPServer.V5"
opc_server = '192.168.100.107'#
timeout = 20000
self.opc_proxy = OPCProxy(host, port, provider, opc_server, timeout)
self.opc_proxy.connect()
for group_name in group_names:
points = json[group_name]["points"]
print points
print group_name
print self.opc_proxy.read2(points, group_name)
def read(self, group_name):
print group_name
v = self.opc_proxy.read3(group_name)
return v
def err_listener(ev):
#print dir(ev)
if ev.exception:
log.debug ( traceback.format_exc() )
if ( type( ev.exception .message) == unicode ):
log.debug( ev.exception .message.encode('utf8')) #.encode("gb2312")
else:
log.debug("exception:"+ev.exception .message)
log.debug ( sys.exc_info() )
log.debug ('%s error.' % str(ev.job))
else:
log.debug( "%s:[%s]" % ( ev.code, ev.retval ) )
pass
#
def kill(fn):
pidf = open(fn, 'r')
pid = pidf.read()
cmd ="tskill %s"%(pid)
log.debug("kill process")
try:
subprocess.Popen(cmd, shell=True)
except:
log.error("kill failed")
pidf.close()
def update_pid(fn):
pidf = open(fn, 'w')
pid = str(os.getpid())
pidf.write(pid)
pidf.close()
def check_pid():
fn = "monitor_pli.pid"
if os.path.exists(fn):
kill(fn)
update_pid(fn)
def run():
log.info( "BLI Monitor starting..." )
#check_pid()
bli = BLIMonitor()
spot = SpotMonitor()
while True:
bli.check()
time.sleep(3)
#sched = Scheduler(daemonic = False)
#sched.add_listener(err_listener, events.EVENT_ALL)
#sched.add_interval_job(lambda:bli.check(), seconds=3)
#sched.add_interval_job(lambda:spot.check(), seconds=3)
#sched.add_listener(err_listener, events.EVENT_JOB_ERROR | events.EVENT_JOB_EXECUTED| events.EVENT_JOB_MISSED)
#sched.start()
log.info( "started" )
"""
while 1:
time.sleep(2)
monitor.check()
"""
pass
def stop():
pass
def dbtest(serialno):
sql = """select status, lastupdateon from cob_t_serial_no_workstation
where serialno = '%s' and workstation = '42700'
order by id desc""" % (serialno)
rtn = ora.execute(sql)
row = rtn.fetchone()
#print row
sql = "select now()"
rtn = db.execute(sql)
row = rtn.fetchone()
#print row
if __name__ == "__main__":
run()
#esn = '90000641'
#dbtest(esn)
| mit | -5,451,429,152,634,972,000 | 19.337838 | 245 | 0.470242 | false |
tengqm/senlin | senlin/tests/engine/test_policy_types.py | 1 | 2469 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_messaging.rpc import dispatcher as rpc
from senlin.common import exception
from senlin.engine import environment
from senlin.engine import service
from senlin.tests.common import base
from senlin.tests.common import utils
from senlin.tests import fakes
class PolicyTypeTest(base.SenlinTestCase):
def setUp(self):
super(PolicyTypeTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='policy_type_test_tenant')
self.eng = service.EngineService('host-a', 'topic-a')
self.eng.init_tgm()
environment.global_env().register_policy('TestPolicy',
fakes.TestPolicy)
def test_policy_type_list(self):
types = self.eng.policy_type_list(self.ctx)
self.assertIsInstance(types, list)
self.assertIn({'name': 'TestPolicy'}, types)
self.assertNotIn({'name': 'some-weird-stuff'}, types)
def test_policy_type_schema(self):
type_name = 'TestPolicy'
expected = {
'spec': {
'KEY1': {
'type': 'String',
'required': False,
'required': False,
'description': 'key1',
'default': 'default1',
},
'KEY2': {
'type': 'Integer',
'required': False,
'description': 'key2',
'default': 1,
},
}
}
schema = self.eng.policy_type_schema(self.ctx, type_name=type_name)
self.assertEqual(expected, schema)
def test_policy_type_schema_nonexist(self):
ex = self.assertRaises(rpc.ExpectedException,
self.eng.policy_type_schema,
self.ctx, type_name='Bogus')
self.assertEqual(exception.PolicyTypeNotFound, ex.exc_info[0])
| apache-2.0 | 4,855,372,240,069,778,000 | 36.409091 | 75 | 0.592143 | false |
gregdetre/abracadjabra | abracadjabra/views.py | 1 | 3115 | from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.models import User
from django.db.models import Sum, Count
from django.http import Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from exceptions import SlugAttributeError
from models import Experiment, ExperimentUser
from utils.dt import dt_ranges, recent_day, recent_week
@staff_member_required
def experiments_vw(request):
active_experiments = Experiment.active.all()
inactive_experiments = Experiment.inactive.all()
analyses = [] # Analysis.get_all_analyses()
nAnalyses = len(analyses)
return render_to_response('abracadjabra/experiments.html',
{'active_experiments': active_experiments,
'inactive_experiments': inactive_experiments,
'analyses': analyses,
'nExperiments': Experiment.objects.count(),
'nExperimentsActive': active_experiments.count(),
'nExperimentsInactive': inactive_experiments.count(),
'nAnalyses': nAnalyses,},
context_instance=RequestContext(request))
@staff_member_required
def experiment_detail_vw(request, experiment_id):
dt_joined_str = request.GET.get('dt_joined', 'recent_week')
dt_joined = dt_ranges[dt_joined_str][0] # e.g. recent_week()
# use .objects to allow inactive Experiments to still be viewable
expt = get_object_or_404(Experiment, id=experiment_id)
buckets, dt_joined = expt.compute_buckets(dt_joined=dt_joined)
last_exptuser = ExperimentUser.get_latest(expt)
return render_to_response('abracadjabra/experiment_detail.html',
{'expt': expt,
'buckets': buckets,
'dt_joined': dt_joined,
'last_ran': last_exptuser.cre,},
context_instance=RequestContext(request))
@staff_member_required
def analysis_detail_vw(request, analysis_slug):
dt_joined_str = request.GET.get('dt_joined', 'recent_week')
dt_joined = dt_ranges[dt_joined_str][0] # e.g. recent_week()
try:
analysis = Analysis(analysis_slug, dt_joined)
# and send it by email 60s later, in case this times out
# send_analysis_mail.apply_async(args=[analysis_slug, analysis.dt_joined],
# countdown=60)
analysis.run()
except SlugAttributeError:
raise Http404
# for some reason, some of these variables are outside the EXPT scope in experiment_detail.html
context = {'expt': analysis.as_dict(),
'dt_joined': analysis.dt_joined,
'last_ran': None,
'buckets': analysis.buckets,}
return render_to_response('abracadjabra/analysis_detail.html',
context,
context_instance=RequestContext(request))
| mit | -9,047,564,593,219,679,000 | 44.808824 | 99 | 0.614446 | false |
jgravois/ArcREST | src/arcrest/agol/layer.py | 1 | 54466 | """
.. module:: layer
:platform: Windows, Linux
:synopsis: Class that contians feature service layer information.
.. moduleauthor:: Esri
"""
from .._abstract import abstract
from ..security import security
import types
from ..common import filters
from ..common.geometry import SpatialReference
from ..common.general import _date_handler, _unicode_convert, Feature
from ..common.spatial import scratchFolder, scratchGDB, json_to_featureclass
from ..common.spatial import get_OID_field, get_records_with_attachments
from ..common.spatial import create_feature_layer, merge_feature_class
from ..common.spatial import featureclass_to_json, create_feature_class
from ..common.spatial import get_attachment_data
from ..common.general import FeatureSet
from ..hostedservice import AdminFeatureServiceLayer
import featureservice
import os
import json
import math
import urlparse
import mimetypes
import uuid
from re import search
from urlparse import urlparse
########################################################################
class FeatureLayer(abstract.BaseAGOLClass):
"""
This contains information about a feature service's layer.
"""
_objectIdField = None
_allowGeometryUpdates = None
_globalIdField = None
_token_url = None
_currentVersion = None
_id = None
_name = None
_type = None
_description = None
_definitionExpression = None
_geometryType = None
_hasZ = None
_hasM = None
_copyrightText = None
_parentLayer = None
_subLayers = None
_minScale = None
_maxScale = None
_effectiveMinScale = None
_effectiveMaxScale = None
_defaultVisibility = None
_extent = None
_timeInfo = None
_drawingInfo = None
_hasAttachments = None
_htmlPopupType = None
_displayField = None
_typeIdField = None
_fields = None
_types = None # sub-types
_relationships = None
_maxRecordCount = None
_canModifyLayer = None
_supportsValidateSql = None
_supportsCoordinatesQuantization = None
_supportsStatistics = None
_supportsAdvancedQueries = None
_hasLabels = None
_canScaleSymbols = None
_capabilities = None
_supportedQueryFormats = None
_isDataVersioned = None
_ownershipBasedAccessControlForFeatures = None
_useStandardizedQueries = None
_templates = None
_indexes = None
_hasStaticData = None
_supportsRollbackOnFailureParameter = None
_advancedQueryCapabilities = None
_editingInfo = None
_proxy_url = None
_proxy_port = None
_securityHandler = None
_supportsCalculate = None
_supportsAttachmentsByUploadId = None
_editFieldsInfo = None
_serverURL = None
_supportsValidateSql = None
_supportsCoordinatesQuantization = None
#----------------------------------------------------------------------
def __init__(self, url,
securityHandler=None,
initialize=False,
proxy_url=None,
proxy_port=None):
"""Constructor"""
self._url = url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if securityHandler is not None and \
isinstance(securityHandler, abstract.BaseSecurityHandler):
self._securityHandler = securityHandler
if not securityHandler.referer_url is None:
self._referer_url = securityHandler.referer_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" initializes the service """
params = {
"f" : "json",
}
json_dict = self._do_get(self._url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print k, " - attribute not implemented in Feature Layer."
self._parentLayer = featureservice.FeatureService(
url=os.path.dirname(self._url),
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def refresh(self):
"""refreshes all the properties of the service"""
self.__init()
#----------------------------------------------------------------------
def __str__(self):
""" returns object as string """
return json.dumps(dict(self), default=_date_handler)
#----------------------------------------------------------------------
def __iter__(self):
""" iterator generator for public values/properties
It only returns the properties that are public.
"""
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_') and \
not isinstance(getattr(self, attr), (types.MethodType,
types.BuiltinFunctionType,
types.BuiltinMethodType))
]
for att in attributes:
yield (att, getattr(self, att))
#----------------------------------------------------------------------
@property
def url(self):
""" returns the url for the feature layer"""
return self._url
#----------------------------------------------------------------------
@property
def administration(self):
"""returns the hostservice object to manage the back-end functions"""
url = self._url
res = search("/rest/", url).span()
addText = "admin/"
part1 = url[:res[1]]
part2 = url[res[1]:]
adminURL = "%s%s%s" % (part1, addText, part2)
res = AdminFeatureServiceLayer(url=adminURL,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
return res
#----------------------------------------------------------------------
@property
def supportsValidateSql(self):
""" returns the supports calculate values """
if self._supportsValidateSql is None:
self.__init()
return self._supportsValidateSql
#----------------------------------------------------------------------
@property
def supportsCoordinatesQuantization(self):
""" returns the supports calculate values """
if self._supportsCoordinatesQuantization is None:
self.__init()
return self._supportsCoordinatesQuantization
#----------------------------------------------------------------------
@property
def supportsCalculate(self):
""" returns the supports calculate values """
if self._supportsCalculate is None:
self.__init()
return self._supportsCalculate
#----------------------------------------------------------------------
@property
def editFieldsInfo(self):
""" returns edit field info """
if self._editFieldsInfo is None:
self.__init()
return self._editFieldsInfo
#----------------------------------------------------------------------
@property
def supportsAttachmentsByUploadId(self):
""" returns is supports attachments by uploads id """
if self._supportsAttachmentsByUploadId is None:
self.__init()
return self._supportsAttachmentsByUploadId
#----------------------------------------------------------------------
@property
def editingInfo(self):
""" returns the edit information """
if self._editingInfo is None:
self.__init()
return self._editingInfo
#----------------------------------------------------------------------
@property
def advancedQueryCapabilities(self):
""" returns the advanced query capabilities """
if self._advancedQueryCapabilities is None:
self.__init()
return self._advancedQueryCapabilities
#----------------------------------------------------------------------
@property
def supportsRollbackOnFailureParameter(self):
""" returns if rollback on failure supported """
if self._supportsRollbackOnFailureParameter is None:
self.__init()
return self._supportsRollbackOnFailureParameter
#----------------------------------------------------------------------
@property
def hasStaticData(self):
"""boolean T/F if static data is present """
if self._hasStaticData is None:
self.__init()
return self._hasStaticData
#----------------------------------------------------------------------
@property
def indexes(self):
"""gets the indexes"""
if self._indexes is None:
self.__init()
return self._indexes
#----------------------------------------------------------------------
@property
def templates(self):
""" gets the template """
if self._templates is None:
self.__init()
return self._templates
#----------------------------------------------------------------------
@property
def allowGeometryUpdates(self):
""" returns boolean if geometry updates are allowed """
if self._allowGeometryUpdates is None:
self.__init()
return self._allowGeometryUpdates
#----------------------------------------------------------------------
@property
def globalIdField(self):
""" returns the global id field """
if self._globalIdField is None:
self.__init()
return self._globalIdField
#----------------------------------------------------------------------
@property
def objectIdField(self):
if self._objectIdField is None:
self.__init()
return self._objectIdField
#----------------------------------------------------------------------
@property
def currentVersion(self):
""" returns the current version """
if self._currentVersion is None:
self.__init()
return self._currentVersion
#----------------------------------------------------------------------
@property
def id(self):
""" returns the id """
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def name(self):
""" returns the name """
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def type(self):
""" returns the type """
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def description(self):
""" returns the layer's description """
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def definitionExpression(self):
"""returns the definitionExpression"""
if self._definitionExpression is None:
self.__init()
return self._definitionExpression
#----------------------------------------------------------------------
@property
def geometryType(self):
"""returns the geometry type"""
if self._geometryType is None:
self.__init()
return self._geometryType
#----------------------------------------------------------------------
@property
def hasZ(self):
""" returns if it has a Z value or not """
if self._hasZ is None:
self.__init()
return self._hasZ
#----------------------------------------------------------------------
@property
def hasM(self):
""" returns if it has a m value or not """
if self._hasM is None:
self.__init()
return self._hasM
#----------------------------------------------------------------------
@property
def copyrightText(self):
""" returns the copyright text """
if self._copyrightText is None:
self.__init()
return self._copyrightText
#----------------------------------------------------------------------
@property
def parentLayer(self):
""" returns information about the parent """
if self._parentLayer is None:
self.__init()
return self._parentLayer
#----------------------------------------------------------------------
@property
def subLayers(self):
""" returns sublayers for layer """
if self._subLayers is None:
self.__init()
return self._subLayers
#----------------------------------------------------------------------
@property
def minScale(self):
""" minimum scale layer will show """
if self._minScale is None:
self.__init()
return self._minScale
@property
def maxScale(self):
""" sets the max scale """
if self._maxScale is None:
self.__init()
return self._maxScale
@property
def effectiveMinScale(self):
""" returns the effective minimum scale value """
if self._effectiveMinScale is None:
self.__init()
return self._effectiveMinScale
@property
def effectiveMaxScale(self):
""" returns the effective maximum scale value """
if self._effectiveMaxScale is None:
self.__init()
return self._effectiveMaxScale
@property
def defaultVisibility(self):
""" returns the default visibility of the layer """
if self._defaultVisibility is None:
self.__init()
return self._defaultVisibility
@property
def extent(self):
""" returns the extent """
if self._extent is None:
self.__init()
return self._extent
@property
def timeInfo(self):
""" returns the time information about the layer """
if self._timeInfo is None:
self.__init()
return self._timeInfo
@property
def drawingInfo(self):
""" returns the symbol information about the layer """
if self._drawingInfo is None:
self.__init()
return self._drawingInfo
@property
def hasAttachments(self):
""" boolean that tells if attachments are associated with layer """
if self._hasAttachments is None:
self.__init()
return self._hasAttachments
@property
def htmlPopupType(self):
""" returns the popup type """
if self._htmlPopupType is None:
self.__init()
return self._htmlPopupType
@property
def displayField(self):
""" returns the primary display field """
if self._displayField is None:
self.__init()
return self._displayField
@property
def typeIdField(self):
""" returns the type Id field """
if self._typeIdField is None:
self.__init()
return self._typeIdField
@property
def fields(self):
""" returns the layer's fields """
if self._fields is None:
self.__init()
return self._fields
@property
def types(self):
""" returns the types """
if self._types is None:
self.__init()
return self._types
@property
def relationships(self):
""" returns the relationships for the layer """
if self._relationships is None:
self.__init()
return self._relationships
@property
def maxRecordCount(self):
""" returns the maximum returned records """
if self._maxRecordCount is None:
self.__init()
if self._maxRecordCount is None:
self._maxRecordCount = 1000
return self._maxRecordCount
@property
def canModifyLayer(self):
""" returns boolean to say if layer can be modified """
if self._canModifyLayer is None:
self.__init()
return self._canModifyLayer
@property
def supportsStatistics(self):
""" boolean to if supports statistics """
if self._supportsStatistics is None:
self.__init()
return self._supportsStatistics
@property
def supportsAdvancedQueries(self):
""" boolean value if advanced queries is supported """
if self._supportsAdvancedQueries is None:
self.__init()
return self._supportsAdvancedQueries
@property
def hasLabels(self):
""" returns if layer has labels on or not """
if self._hasLabels is None:
self.__init()
return self._hasLabels
@property
def canScaleSymbols(self):
""" states if symbols can scale """
if self._canScaleSymbols is None:
self.__init()
return self._canScaleSymbols
@property
def capabilities(self):
""" operations that can be performed on layer """
if self._capabilities is None:
self.__init()
return self._capabilities
@property
def supportedQueryFormats(self):
""" returns supported query formats """
if self._supportedQueryFormats is None:
self.__init()
return self._supportedQueryFormats
@property
def isDataVersioned(self):
""" returns boolean if data is in version control """
if self._isDataVersioned is None:
self.__init()
return self._isDataVersioned
@property
def ownershipBasedAccessControlForFeatures(self):
""" returns value for owernship based access control """
if self._ownershipBasedAccessControlForFeatures is None:
self.__init()
return self._ownershipBasedAccessControlForFeatures
@property
def useStandardizedQueries(self):
""" returns value if standardized queries can be used """
if self._useStandardizedQueries is None:
self.__init()
return self._useStandardizedQueries
#----------------------------------------------------------------------
@property
def securityHandler(self):
""" gets the security handler """
return self._securityHandler
#----------------------------------------------------------------------
@securityHandler.setter
def securityHandler(self, value):
""" sets the security handler """
if isinstance(value, abstract.BaseSecurityHandler):
if isinstance(value, security.AGOLTokenSecurityHandler):
self._securityHandler = value
self._token = value.token
self._username = value.username
self._password = value._password
self._token_url = value.token_url
elif isinstance(value, security.OAuthSecurityHandler):
self._token = value.token
self._securityHandler = value
else:
pass
#----------------------------------------------------------------------
def addAttachment(self, oid, file_path):
""" Adds an attachment to a feature service
Input:
oid - string - OBJECTID value to add attachment to
file_path - string - path to file
Output:
JSON Repsonse
"""
if self.hasAttachments == True:
attachURL = self._url + "/%s/addAttachment" % oid
params = {'f':'json'}
parsed = urlparse(attachURL)
files = []
files.append(('attachment', file_path, os.path.basename(file_path)))
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
securityHandler=self._securityHandler,
files=files,
fields=params,
port=parsed.port,
ssl=parsed.scheme.lower() == 'https',
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return self._unicode_convert(res)
else:
return "Attachments are not supported for this feature service."
#----------------------------------------------------------------------
def deleteAttachment(self, oid, attachment_id):
""" removes an attachment from a feature service feature
Input:
oid - integer or string - id of feature
attachment_id - integer - id of attachment to erase
Output:
JSON response
"""
url = self._url + "/%s/deleteAttachments" % oid
params = {
"f":"json",
"attachmentIds" : "%s" % attachment_id
}
return self._do_post(url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateAttachment(self, oid, attachment_id, file_path):
""" updates an existing attachment with a new file
Inputs:
oid - string/integer - Unique record ID
attachment_id - integer - Unique attachment identifier
file_path - string - path to new attachment
Output:
JSON response
"""
url = self._url + "/%s/updateAttachment" % oid
params = {
"f":"json",
"attachmentId" : "%s" % attachment_id
}
parsed = urlparse(url)
port = parsed.port
files = []
files.append(('attachment', file_path, os.path.basename(file_path)))
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
files=files,
port=port,
fields=params,
securityHandler=self._securityHandler,
ssl=parsed.scheme.lower() == 'https',
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return self._unicode_convert(res)
#----------------------------------------------------------------------
def listAttachments(self, oid):
""" list attachements for a given OBJECT ID """
url = self._url + "/%s/attachments" % oid
params = {
"f":"json"
}
return self._do_get(url, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def create_fc_template(self, out_path, out_name):
"""creates a featureclass template on local disk"""
fields = self.fields
objectIdField = self.objectIdField
geomType = self.geometryType
wkid = self.parentLayer.spatialReference['wkid']
return create_feature_class(out_path,
out_name,
geomType,
wkid,
fields,
objectIdField)
def create_feature_template(self):
"""creates a feature template"""
fields = self.fields
feat_schema = {}
att = {}
for fld in fields:
self._globalIdField
if not fld['name'] == self._objectIdField and not fld['name'] == self._globalIdField:
att[fld['name']] = ''
feat_schema['attributes'] = att
feat_schema['geometry'] = ''
return Feature(feat_schema)
#----------------------------------------------------------------------
def query(self,
where="1=1",
out_fields="*",
timeFilter=None,
geometryFilter=None,
returnGeometry=True,
returnIDsOnly=False,
returnCountOnly=False,
returnFeatureClass=False,
out_fc=None):
""" queries a feature service based on a sql statement
Inputs:
where - the selection sql statement
out_fields - the attribute fields to return
timeFilter - a TimeFilter object where either the start time
or start and end time are defined to limit the
search results for a given time. The values in
the timeFilter should be as UTC timestampes in
milliseconds. No checking occurs to see if they
are in the right format.
geometryFilter - a GeometryFilter object to parse down a given
query by another spatial dataset.
returnGeometry - true means a geometry will be returned,
else just the attributes
returnIDsOnly - false is default. True means only OBJECTIDs
will be returned
returnCountOnly - if True, then an integer is returned only
based on the sql statement
returnFeatureClass - Default False. If true, query will be
returned as feature class
out_fc - only valid if returnFeatureClass is set to True.
Output location of query.
Output:
A list of Feature Objects (default) or a path to the output featureclass if
returnFeatureClass is set to True.
"""
params = {"f": "json",
"where": where,
"outFields": out_fields,
"returnGeometry" : returnGeometry,
"returnIdsOnly" : returnIDsOnly,
"returnCountOnly" : returnCountOnly,
}
if not timeFilter is None and \
isinstance(timeFilter, filters.TimeFilter):
params['time'] = timeFilter.filter
if not geometryFilter is None and \
isinstance(geometryFilter, filters.GeometryFilter):
gf = geometryFilter.filter
params['geometry'] = gf['geometry']
params['geometryType'] = gf['geometryType']
params['spatialRelationship'] = gf['spatialRel']
params['inSR'] = gf['inSR']
fURL = self._url + "/query"
results = self._do_get(fURL, params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'error' in results:
raise ValueError (results)
if not returnCountOnly and not returnIDsOnly:
if returnFeatureClass:
json_text = json.dumps(results)
temp = scratchFolder() + os.sep + uuid.uuid4().get_hex() + ".json"
with open(temp, 'wb') as writer:
writer.write(json_text)
writer.flush()
del writer
fc = json_to_featureclass(json_file=temp,
out_fc=out_fc)
os.remove(temp)
return fc
else:
return FeatureSet.fromJSON(json.dumps(results))
else:
return results
return
#----------------------------------------------------------------------
def query_related_records(self,
objectIds,
relationshipId,
outFields="*",
definitionExpression=None,
returnGeometry=True,
maxAllowableOffset=None,
geometryPrecision=None,
outWKID=None,
gdbVersion=None,
returnZ=False,
returnM=False):
"""
The Query operation is performed on a feature service layer
resource. The result of this operation are feature sets grouped
by source layer/table object IDs. Each feature set contains
Feature objects including the values for the fields requested by
the user. For related layers, if you request geometry
information, the geometry of each feature is also returned in
the feature set. For related tables, the feature set does not
include geometries.
Inputs:
objectIds - the object IDs of the table/layer to be queried
relationshipId - The ID of the relationship to be queried.
outFields - the list of fields from the related table/layer
to be included in the returned feature set. This
list is a comma delimited list of field names. If
you specify the shape field in the list of return
fields, it is ignored. To request geometry, set
returnGeometry to true.
You can also specify the wildcard "*" as the
value of this parameter. In this case, the result
s will include all the field values.
definitionExpression - The definition expression to be
applied to the related table/layer.
From the list of objectIds, only those
records that conform to this
expression are queried for related
records.
returnGeometry - If true, the feature set includes the
geometry associated with each feature. The
default is true.
maxAllowableOffset - This option can be used to specify the
maxAllowableOffset to be used for
generalizing geometries returned by the
query operation. The maxAllowableOffset
is in the units of the outSR. If outSR
is not specified, then
maxAllowableOffset is assumed to be in
the unit of the spatial reference of the
map.
geometryPrecision - This option can be used to specify the
number of decimal places in the response
geometries.
outWKID - The spatial reference of the returned geometry.
gdbVersion - The geodatabase version to query. This parameter
applies only if the isDataVersioned property of
the layer queried is true.
returnZ - If true, Z values are included in the results if
the features have Z values. Otherwise, Z values are
not returned. The default is false.
returnM - If true, M values are included in the results if
the features have M values. Otherwise, M values are
not returned. The default is false.
"""
params = {
"f" : "json",
"objectIds" : objectIds,
"relationshipId" : relationshipId,
"outFields" : outFields,
"returnGeometry" : returnGeometry,
"returnM" : returnM,
"returnZ" : returnZ
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if definitionExpression is not None:
params['definitionExpression'] = definitionExpression
if outWKID is not None:
params['outSR'] = SpatialReference(outWKID).asDictionary
if maxAllowableOffset is not None:
params['maxAllowableOffset'] = maxAllowableOffset
if geometryPrecision is not None:
params['geometryPrecision'] = geometryPrecision
quURL = self._url + "/queryRelatedRecords"
res = self._do_get(url=quURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return res
#----------------------------------------------------------------------
def getHTMLPopup(self, oid):
"""
The htmlPopup resource provides details about the HTML pop-up
authored by the user using ArcGIS for Desktop.
Input:
oid - object id of the feature where the HTML pop-up
Output:
"""
if self.htmlPopupType != "esriServerHTMLPopupTypeNone":
popURL = self._url + "/%s/htmlPopup" % oid
params = {
'f' : "json"
}
return self._do_get(url=popURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return ""
#----------------------------------------------------------------------
def _chunks(self, l, n):
""" Yield n successive chunks from a list l.
"""
l.sort()
newn = int(1.0 * len(l) / n + 0.5)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
#----------------------------------------------------------------------
def get_local_copy(self, out_path, includeAttachments=False):
""" exports the whole feature service to a feature class
Input:
out_path - path to where the data will be placed
includeAttachments - default False. If sync is not supported
then the paramter is ignored.
Output:
path to exported feature class or fgdb (as list)
"""
if self.hasAttachments and \
self.parentLayer.syncEnabled:
return self.parentLayer.createReplica(replicaName="fgdb_dump",
layers="%s" % self.id,
returnAsFeatureClass=True,
returnAttachments=includeAttachments,
out_path=out_path)[0]
elif self.hasAttachments == False and \
self.parentLayer.syncEnabled:
return self.parentLayer.createReplica(replicaName="fgdb_dump",
layers="%s" % self.id,
returnAsFeatureClass=True,
out_path=out_path)[0]
else:
result_features = []
res = self.query(returnIDsOnly=True)
OIDS = res['objectIds']
OIDS.sort()
OIDField = res['objectIdFieldName']
count = len(OIDS)
if count <= self.maxRecordCount:
bins = 1
else:
bins = count / self.maxRecordCount
v = count % self.maxRecordCount
if v > 0:
bins += 1
chunks = self._chunks(OIDS, bins)
for chunk in chunks:
chunk.sort()
sql = "%s >= %s and %s <= %s" % (OIDField, chunk[0],
OIDField, chunk[len(chunk) -1])
temp_base = "a" + uuid.uuid4().get_hex()[:6] + "a"
temp_fc = r"%s\%s" % (scratchGDB(), temp_base)
temp_fc = self.query(where=sql,
returnFeatureClass=True,
out_fc=temp_fc)
result_features.append(temp_fc)
return merge_feature_class(merges=result_features,
out_fc=out_path)
#----------------------------------------------------------------------
def updateFeature(self,
features,
gdbVersion=None,
rollbackOnFailure=True):
"""
updates an existing feature in a feature service layer
Input:
feature - feature object(s) to get updated. A single
feature, a list of feature objects can be passed,
or a FeatureSet object.
Output:
dictionary of result messages
"""
params = {
"f" : "json",
"rollbackOnFailure" : rollbackOnFailure
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if isinstance(features, Feature):
params['features'] = json.dumps([features.asDictionary],
default=_date_handler
)
elif isinstance(features, list):
vals = []
for feature in features:
if isinstance(feature, Feature):
vals.append(feature.asDictionary)
params['features'] = json.dumps(vals,
default=_date_handler
)
elif isinstance(features, FeatureSet):
params['features'] = json.dumps(
[feature.asDictionary for feature in features.features],
default=_date_handler
)
else:
return {'message' : "invalid inputs"}
updateURL = self._url + "/updateFeatures"
res = self._do_post(url=updateURL,
securityHandler=self._securityHandler,
param_dict=params, proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return res
#----------------------------------------------------------------------
def deleteFeatures(self,
objectIds="",
where="",
geometryFilter=None,
gdbVersion=None,
rollbackOnFailure=True
):
""" removes 1:n features based on a sql statement
Input:
objectIds - The object IDs of this layer/table to be deleted
where - A where clause for the query filter. Any legal SQL
where clause operating on the fields in the layer is
allowed. Features conforming to the specified where
clause will be deleted.
geometryFilter - a filters.GeometryFilter object to limit
deletion by a geometry.
gdbVersion - Geodatabase version to apply the edits. This
parameter applies only if the isDataVersioned
property of the layer is true
rollbackOnFailure - parameter to specify if the edits should
be applied only if all submitted edits
succeed. If false, the server will apply
the edits that succeed even if some of
the submitted edits fail. If true, the
server will apply the edits only if all
edits succeed. The default value is true.
Output:
JSON response as dictionary
"""
dURL = self._url + "/deleteFeatures"
params = {
"f": "json",
}
if geometryFilter is not None and \
isinstance(geometryFilter, filters.GeometryFilter):
gfilter = geometryFilter.filter
params['geometry'] = gfilter['geometry']
params['geometryType'] = gfilter['geometryType']
params['inSR'] = gfilter['inSR']
params['spatialRel'] = gfilter['spatialRel']
if where is not None and \
where != "":
params['where'] = where
if objectIds is not None and \
objectIds != "":
params['objectIds'] = objectIds
result = self._do_post(url=dURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return result
#----------------------------------------------------------------------
def applyEdits(self,
addFeatures=[],
updateFeatures=[],
deleteFeatures=None,
gdbVersion=None,
rollbackOnFailure=True):
"""
This operation adds, updates, and deletes features to the
associated feature layer or table in a single call.
Inputs:
addFeatures - The array of features to be added. These
features should be common.Feature objects
updateFeatures - The array of features to be updateded.
These features should be common.Feature
objects
deleteFeatures - string of OIDs to remove from service
gdbVersion - Geodatabase version to apply the edits.
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
dictionary of messages
"""
editURL = self._url + "/applyEdits"
params = {"f": "json"
}
if len(addFeatures) > 0 and \
isinstance(addFeatures[0], Feature):
params['adds'] = json.dumps([f.asDictionary for f in addFeatures],
default=_date_handler)
if len(updateFeatures) > 0 and \
isinstance(updateFeatures[0], Feature):
params['updates'] = json.dumps([f.asDictionary for f in updateFeatures],
default=_date_handler)
if deleteFeatures is not None and \
isinstance(deleteFeatures, str):
params['deletes'] = deleteFeatures
return self._do_post(url=editURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def addFeature(self, features,
gdbVersion=None,
rollbackOnFailure=True):
""" Adds a single feature to the service
Inputs:
feature - list of common.Feature object or a single
common.Feature Object or a FeatureSet object
gdbVersion - Geodatabase version to apply the edits
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed. If false, the
server will apply the edits that succeed
even if some of the submitted edits fail.
If true, the server will apply the edits
only if all edits succeed. The default
value is true.
Output:
JSON message as dictionary
"""
url = self._url + "/addFeatures"
params = {
"f" : "json"
}
if gdbVersion is not None:
params['gdbVersion'] = gdbVersion
if isinstance(rollbackOnFailure, bool):
params['rollbackOnFailure'] = rollbackOnFailure
if isinstance(features, list):
params['features'] = json.dumps([feature.asDictionary for feature in features],
default=_date_handler)
elif isinstance(features, Feature):
params['features'] = json.dumps([features.asDictionary],
default=_date_handler)
elif isinstance(features, FeatureSet):
params['features'] = json.dumps([feature.asDictionary for feature in feature.features],
default=_date_handler)
else:
return None
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def addFeatures(self, fc, attachmentTable=None,
nameField="ATT_NAME", blobField="DATA",
contentTypeField="CONTENT_TYPE",
rel_object_field="REL_OBJECTID"):
""" adds a feature to the feature service
Inputs:
fc - string - path to feature class data to add.
attachmentTable - string - (optional) path to attachment table
nameField - string - (optional) name of file field in attachment table
blobField - string - (optional) name field containing blob data
contentTypeField - string - (optional) name of field containing content type
rel_object_field - string - (optional) name of field with OID of feature class
Output:
boolean, add results message as list of dictionaries
"""
messages = {'addResults':[]}
if attachmentTable is None:
count = 0
bins = 1
uURL = self._url + "/addFeatures"
max_chunk = 250
js = json.loads(self._unicode_convert(
featureclass_to_json(fc)))
js = js['features']
if len(js) == 0:
return {'addResults':None}
if len(js) <= max_chunk:
bins = 1
else:
bins = int(len(js)/max_chunk)
if len(js) % max_chunk > 0:
bins += 1
chunks = self._chunks(l=js, n=bins)
for chunk in chunks:
params = {
"f" : 'json',
"features" : json.dumps(chunk,
default=self._date_handler)
}
result = self._do_post(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if messages is None:
messages = result
else:
if 'addResults' in result:
if 'addResults' in messages:
messages['addResults'] = messages['addResults'] + result['addResults']
else:
messages['addResults'] = result['addResults']
else:
messages['errors'] = result
del params
del result
return messages
else:
oid_field = get_OID_field(fc)
OIDs = get_records_with_attachments(attachment_table=attachmentTable)
fl = create_feature_layer(fc, "%s not in ( %s )" % (oid_field, ",".join(OIDs)))
result = self.addFeatures(fl)
if result is not None:
messages.update(result)
del fl
for oid in OIDs:
fl = create_feature_layer(fc, "%s = %s" % (oid_field, oid), name="layer%s" % oid)
msgs = self.addFeatures(fl)
for result in msgs['addResults']:
oid_fs = result['objectId']
sends = get_attachment_data(attachmentTable, sql="%s = %s" % (rel_object_field, oid))
result['addAttachmentResults'] = []
for s in sends:
attRes = self.addAttachment(oid_fs, s['blob'])
if 'addAttachmentResult' in attRes:
attRes['addAttachmentResult']['AttachmentName'] = s['name']
result['addAttachmentResults'].append(attRes['addAttachmentResult'])
else:
attRes['AttachmentName'] = s['name']
result['addAttachmentResults'].append(attRes)
del s
del sends
del result
messages.update( msgs)
del fl
del oid
del OIDs
return messages
#----------------------------------------------------------------------
def calculate(self, where, calcExpression, sqlFormat="standard"):
"""
The calculate operation is performed on a feature service layer
resource. It updates the values of one or more fields in an
existing feature service layer based on SQL expressions or scalar
values. The calculate operation can only be used if the
supportsCalculate property of the layer is true.
Neither the Shape field nor system fields can be updated using
calculate. System fields include ObjectId and GlobalId.
See Calculate a field for more information on supported expressions
Inputs:
where - A where clause can be used to limit the updated records.
Any legal SQL where clause operating on the fields in
the layer is allowed.
calcExpression - The array of field/value info objects that
contain the field or fields to update and their
scalar values or SQL expression. Allowed types
are dictionary and list. List must be a list
of dictionary objects.
Calculation Format is as follows:
{"field" : "<field name>",
"value" : "<value>"}
sqlFormat - The SQL format for the calcExpression. It can be
either standard SQL92 (standard) or native SQL
(native). The default is standard.
Values: standard, native
Output:
JSON as string
Usage:
>>>sh = arcrest.AGOLTokenSecurityHandler("user", "pw")
>>>fl = arcrest.agol.FeatureLayer(url="someurl",
securityHandler=sh, initialize=True)
>>>print fl.calculate(where="OBJECTID < 2",
calcExpression={"field": "ZONE",
"value" : "R1"})
{'updatedFeatureCount': 1, 'success': True}
"""
url = self._url + "/calculate"
params = {
"f" : "json",
"where" : where,
}
if isinstance(calcExpression, dict):
params["calcExpression"] = json.dumps([calcExpression],
default=_date_handler)
elif isinstance(calcExpression, list):
params["calcExpression"] = json.dumps(calcExpression,
default=_date_handler)
if sqlFormat.lower() in ['native', 'standard']:
params['sqlFormat'] = sqlFormat.lower()
else:
params['sqlFormat'] = "standard"
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
########################################################################
class TableLayer(FeatureLayer):
"""Table object is exactly like FeatureLayer object"""
pass | apache-2.0 | -7,880,301,906,038,631,000 | 42.296502 | 105 | 0.482191 | false |
alphacsc/alphacsc | alphacsc/other/swm.py | 1 | 5148 | """
Code adopted from Voytek Lab package neurodsp:
https://github.com/voytekresearch/neurodsp/blob/master/neurodsp/shape/swm.py
The sliding window matching algorithm identifies the waveform shape of
neural oscillations using correlations.
"""
# Authors: Scott Cole
# Mainak Jas <[email protected]>
import numpy as np
from scipy.spatial.distance import pdist
from alphacsc.utils import check_random_state
def sliding_window_matching(x, L, G, max_iterations=500, T=1,
window_starts_custom=None, random_state=None):
"""Find recurring patterns in a time series using SWM algorithm.
Parameters
----------
x : array-like 1d
voltage time series
L : float
window length (seconds)
G : float
minimum window spacing (seconds)
T : float
temperature parameter. Controls probability of accepting a new window
max_iterations : int
Maximum number of iterations of potential changes in window placement
window_starts_custom : np.ndarray (1d)
Pre-set locations of initial windows (instead of evenly spaced by 2G)
random_state : int
The random state
Returns
-------
avg_window : ndarray (1d)
The average waveform in x.
window_starts : ndarray (1d)
Indices at which each window begins for the final set of windows
J : np.ndarray (1d)
Cost function value at each iteration
References
----------
Gips, B., Bahramisharif, A., Lowet, E., Roberts, M. J., de Weerd, P.,
Jensen, O., & van der Eerden, J. (2017). Discovering recurring
patterns in electrophysiological recordings.
Journal of Neuroscience Methods, 275, 66-79.
MATLAB code: https://github.com/bartgips/SWM
Notes
-----
* Apply a highpass filter if looking at high frequency activity,
so that it does not converge on a low frequency motif
* L and G should be chosen to be about the size of the motif of interest
"""
rng = check_random_state(random_state)
# Initialize window positions, separated by 2*G
if window_starts_custom is None:
window_starts = np.arange(0, len(x) - L, 2 * G)
else:
window_starts = window_starts_custom
N_windows = len(window_starts)
# Calculate initial cost
J = np.zeros(max_iterations)
J[0] = _compute_J(x, window_starts, L)
# Randomly sample windows with replacement
random_window_idx = rng.choice(range(N_windows), size=max_iterations)
# For each iteration, randomly replace a window with a new window
# to improve cross-window similarity
for idx in range(1, max_iterations):
# Pick a random window position
window_idx_replace = random_window_idx[idx]
# Find a new allowed position for the window
window_starts_temp = np.copy(window_starts)
window_starts_temp[window_idx_replace] = _find_new_windowidx(
window_starts, G, L, len(x) - L, rng)
# Calculate the cost with replaced windows
J_temp = _compute_J(x, window_starts_temp, L)
# Calculate the change in cost function
deltaJ = J_temp - J[idx - 1]
# Calculate the acceptance probability
p_accept = np.exp(-deltaJ / float(T))
# Accept update to J with a certain probability
if rng.rand() < p_accept:
J[idx] = J_temp
# Update X
window_starts = window_starts_temp
else:
J[idx] = J[idx - 1]
print('[iter %03d] Cost function: %s' % (idx, J[idx]))
# Calculate average window
avg_window = np.zeros(L)
for w in range(N_windows):
avg_window += x[window_starts[w]:window_starts[w] + L]
avg_window = avg_window / float(N_windows)
return avg_window, window_starts, J
def _compute_J(x, window_starts, L):
"""Compute the cost, which is proportional to the
difference between pairs of windows"""
# Get all windows and zscore them
N_windows = len(window_starts)
windows = np.zeros((N_windows, L))
for w in range(N_windows):
temp = x[window_starts[w]:window_starts[w] + L]
windows[w] = (temp - np.mean(temp)) / np.std(temp)
# Calculate distances for all pairs of windows
dist = pdist(np.vstack(windows),
lambda u, v: np.sum((u - v) ** 2))
J = np.sum(dist) / float(L * (N_windows - 1))
return J
def _find_new_windowidx(window_starts, G, L, N_samp, rng,
tries_limit=1000):
"""Find a new sample for the starting window"""
found = False
N_tries = 0
while found is False:
# Generate a random sample
new_samp = rng.randint(N_samp)
# Check how close the sample is to other window starts
dists = np.abs(window_starts - new_samp)
if np.min(dists) > G:
return new_samp
else:
N_tries += 1
if N_tries > tries_limit:
raise RuntimeError('SWM algorithm has difficulty finding a new'
' window. Increase the spacing parameter,'
' G.')
| bsd-3-clause | -5,241,638,121,785,414,000 | 32.428571 | 79 | 0.621018 | false |
Peter-Liang/CodeWars-Python | solutions/Conway_s_Game_of_Life_Unlimited_Edition.py | 1 | 1775 | """
Conway's Game of Life - Unlimited Edition
http://www.codewars.com/kata/52423db9add6f6fc39000354/train/python
"""
from copy import deepcopy
def get_generation(cells, generations):
origin = deepcopy(cells)
if generations == 0:
return origin
if generations > 1:
origin = get_generation(origin, generations - 1)
for row in origin:
row.insert(0, 0)
row.append(0)
origin.insert(0, [0] * len(origin[0]))
origin.append([0] * len(origin[0]))
result = deepcopy(origin)
for r in range(len(origin)):
for c in range(len(origin[0])):
neighbours = get_living_neighbours(origin, r, c)
if neighbours > 3 or neighbours < 2:
result[r][c] = 0
elif neighbours == 3:
result[r][c] = 1
trim_result(result)
return result
def trim_result(result):
while is_row_all_empty(result[0]):
result.pop(0)
while is_row_all_empty(result[-1]):
result.pop()
start_empty, end_empty = True, True
while start_empty or end_empty:
for r in result:
if r[0] != 0:
start_empty = False
if r[-1] != 0:
end_empty = False
for r in result:
if start_empty:
r.pop(0)
if end_empty:
r.pop()
def is_row_all_empty(row):
return sum(row) == 0
def get_living_neighbours(cells, row, col):
livings = 0
for r in [-1, 0, 1]:
if 0 <= row + r <= len(cells) - 1:
for c in [-1, 0, 1]:
if 0 <= col + c <= len(cells[0]) - 1:
if c == 0 and r == 0:
continue
livings += cells[row + r][col + c]
return livings | mit | 7,685,390,107,174,797,000 | 25.117647 | 66 | 0.514366 | false |
Mercy-Nekesa/sokoapp | sokoapp/tracking/utils.py | 1 | 5688 | import re
headers = ('HTTP_CLIENT_IP', 'HTTP_X_FORWARDED_FOR', 'HTTP_X_FORWARDED',
'HTTP_X_CLUSTERED_CLIENT_IP', 'HTTP_FORWARDED_FOR', 'HTTP_FORWARDED',
'REMOTE_ADDR')
# Back ported from Django trunk
# This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
def is_valid_ipv4_address(ip_str):
return bool(ipv4_re.match(ip_str))
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
if not is_valid_ipv4_address(hextet):
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in a expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
hextets = ip_str.split(':')
return hextets[-1]
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in range(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if [x for x in ip_str.split(':') if len(x) < 4]:
return True
return False
def get_ip_address(request):
for header in headers:
if request.META.get(header, None):
ip = request.META[header].split(',')[0]
if ':' in ip and is_valid_ipv6_address(ip) or is_valid_ipv4_address(ip):
return ip
| mit | -1,081,139,503,090,599,600 | 27.582915 | 94 | 0.56962 | false |
pymor/dune-hdd | examples/linearparabolic/morepas3__prepare.py | 1 | 23451 | #!/usr/bin/env python2
#
# This file is part of the dune-hdd project:
# https://github.com/pymor/dune-hdd
# Copyright Holders: Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import division, print_function
import numpy as np
from functools import partial
from pymor.algorithms.timestepping import ImplicitEulerTimeStepper
from pymor.core.logger import getLogger
from pymor.discretizations.basic import InstationaryDiscretization
from pymor.grids.oned import OnedGrid
from pymor.parameters.spaces import CubicParameterSpace
from pymor.vectorarrays.list import ListVectorArray
from dune.pymor.la.container import make_listvectorarray
from generic_multiscale import dune_module, examples, wrapper
logger = getLogger('.morepas3.prepare')
logger.setLevel('INFO')
class InstationaryDuneVisualizer(object):
def __init__(self, disc, prefix):
self.disc = disc
self.prefix = prefix
def visualize(self, U, *args, **kwargs):
import numpy as np
dune_disc = self.disc._impl
assert isinstance(U, ListVectorArray)
filename = kwargs['filename'] if 'filename' in kwargs else self.prefix
size = len(U)
pad = len(str(size))
for ss in np.arange(size):
dune_disc.visualize(U._list[ss]._impl,
filename + '_' + str(ss).zfill(pad),
'solution',
False) # do not add dirichlet shift
def bochner_norm(T, space_norm2, U, mu=None, order=2):
'''
L^2-in-time, X-in-space
'''
nt = len(U)
time_grid = OnedGrid(domain=(0., T), num_intervals=nt-1)
assert len(U) == time_grid.size(1)
qq = time_grid.quadrature_points(0, order=order)
integral = 0.
for entity in np.arange(time_grid.size(0)):
# get quadrature
qq_e = qq[entity] # points
ww = time_grid.reference_element.quadrature(order)[1] # weights
ie = time_grid.integration_elements(0)[entity] # integration element
# create shape function evaluations
a = time_grid.centers(1)[entity]
b = time_grid.centers(1)[entity + 1]
SF = np.array((1./(a - b)*qq_e[..., 0] - b/(a - b),
1./(b - a)*qq_e[..., 0] - a/(b - a)))
U_a = U._list[entity]
U_b = U._list[entity + 1]
values = np.zeros(len(qq_e))
for ii in np.arange(len(qq_e)):
# compute U(t)
U_t = U_a.copy()
U_t.scal(SF[0][ii])
U_t.axpy(SF[1][ii], U_b)
# compute the X-norm of U(t)
values[ii] = space_norm2(make_listvectorarray(U_t), mu)
integral += np.dot(values, ww)*ie
return np.sqrt(integral)
def discretize(num_elements, num_partitions, T, nt, initial_data, parameter_range, name='detailed discretization'):
Example = examples[2]['aluconformgrid']['fem']['istl']
logger_cfg = Example.logger_options()
logger_cfg.set('info', -1, True)
logger_cfg.set('info_color', 'blue', True)
grid_cfg = Example.grid_options('grid.multiscale.provider.cube')
grid_cfg.set('lower_left', '[0 0]', True)
grid_cfg.set('upper_right', '[5 1]', True)
grid_cfg.set('num_elements', num_elements, True)
grid_cfg.set('num_partitions', num_partitions, True)
boundary_cfg = Example.boundary_options('stuff.grid.boundaryinfo.alldirichlet')
problem_cfg = Example.problem_options('hdd.linearelliptic.problem.OS2015.spe10model1')
problem_cfg.set('parametric_channel', 'true', True)
problem_cfg.set('channel_boundary_layer', '0', True)
problem_cfg.set('filename', 'perm_case1.dat', True)
problem_cfg.set('lower_left', '[0 0]', True)
problem_cfg.set('upper_right', '[5 1]', True)
problem_cfg.set('num_elements', '[100 20]', True)
problem_cfg.set('forces.0.domain', '[0.95 1.10; 0.30 0.45]', True)
problem_cfg.set('forces.0.value', '2000', True)
problem_cfg.set('forces.1.domain', '[3.00 3.15; 0.75 0.90]', True)
problem_cfg.set('forces.1.value', '-1000', True)
problem_cfg.set('forces.2.domain', '[4.25 4.40; 0.25 0.40]', True)
problem_cfg.set('forces.2.value', '-1000', True)
problem_cfg.set('channel.0.value', '-1.07763239495', True)
problem_cfg.set('channel.1.value', '-1.07699512772', True)
problem_cfg.set('channel.2.value', '-1.07356156439', True)
problem_cfg.set('channel.3.value', '-1.06602281736', True)
problem_cfg.set('channel.4.value', '-1.06503683743', True)
problem_cfg.set('channel.5.value', '-1.07974870426', True)
problem_cfg.set('channel.6.value', '-1.05665895923', True)
problem_cfg.set('channel.7.value', '-1.08310334837', True)
problem_cfg.set('channel.8.value', '-1.05865484973', True)
problem_cfg.set('channel.9.value', '-1.05871039535', True)
problem_cfg.set('channel.10.value', '-1.08136695901', True)
problem_cfg.set('channel.11.value', '-1.08490172721', True)
problem_cfg.set('channel.12.value', '-1.06641120758', True)
problem_cfg.set('channel.13.value', '-1.06812773298', True)
problem_cfg.set('channel.14.value', '-1.07695652049', True)
problem_cfg.set('channel.15.value', '-1.08630079205', True)
problem_cfg.set('channel.16.value', '-1.08273722112', True)
problem_cfg.set('channel.17.value', '-1.07500402155', True)
problem_cfg.set('channel.18.value', '-1.08607142562', True)
problem_cfg.set('channel.19.value', '-1.07268761799', True)
problem_cfg.set('channel.20.value', '-1.08537037362', True)
problem_cfg.set('channel.21.value', '-1.08466927273', True)
problem_cfg.set('channel.22.value', '-1.08444661815', True)
problem_cfg.set('channel.23.value', '-1.08957037967', True)
problem_cfg.set('channel.24.value', '-1.08047394052', True)
problem_cfg.set('channel.25.value', '-1.08221229083', True)
problem_cfg.set('channel.26.value', '-1.08568599863', True)
problem_cfg.set('channel.27.value', '-1.08428347872', True)
problem_cfg.set('channel.28.value', '-1.09104098734', True)
problem_cfg.set('channel.29.value', '-1.09492700673', True)
problem_cfg.set('channel.30.value', '-1.09760440537', True)
problem_cfg.set('channel.31.value', '-1.09644989453', True)
problem_cfg.set('channel.32.value', '-1.09441681025', True)
problem_cfg.set('channel.33.value', '-1.09533290654', True)
problem_cfg.set('channel.34.value', '-1.1001430808', True)
problem_cfg.set('channel.35.value', '-1.10065627621', True)
problem_cfg.set('channel.36.value', '-1.10125877186', True)
problem_cfg.set('channel.37.value', '-1.10057485893', True)
problem_cfg.set('channel.38.value', '-1.10002261906', True)
problem_cfg.set('channel.39.value', '-1.10219154209', True)
problem_cfg.set('channel.40.value', '-1.09994463801', True)
problem_cfg.set('channel.41.value', '-1.10265630533', True)
problem_cfg.set('channel.42.value', '-1.10448566526', True)
problem_cfg.set('channel.43.value', '-1.10735820121', True)
problem_cfg.set('channel.44.value', '-1.1070022367', True)
problem_cfg.set('channel.45.value', '-1.10777650387', True)
problem_cfg.set('channel.46.value', '-1.10892785562', True)
problem_cfg.set('channel.0.domain', '[1.7 1.75; 0.5 0.55]', True)
problem_cfg.set('channel.1.domain', '[1.75 1.8; 0.5 0.55]', True)
problem_cfg.set('channel.2.domain', '[1.8 1.85; 0.5 0.55]', True)
problem_cfg.set('channel.3.domain', '[1.85 1.9; 0.5 0.55]', True)
problem_cfg.set('channel.4.domain', '[1.9 1.95; 0.5 0.55]', True)
problem_cfg.set('channel.5.domain', '[1.95 2.0; 0.5 0.55]', True)
problem_cfg.set('channel.6.domain', '[2.0 2.05; 0.5 0.55]', True)
problem_cfg.set('channel.7.domain', '[2.05 2.1; 0.5 0.55]', True)
problem_cfg.set('channel.8.domain', '[2.1 2.15; 0.5 0.55]', True)
problem_cfg.set('channel.9.domain', '[2.15 2.2; 0.5 0.55]', True)
problem_cfg.set('channel.10.domain', '[2.2 2.25; 0.5 0.55]', True)
problem_cfg.set('channel.11.domain', '[2.25 2.3; 0.5 0.55]', True)
problem_cfg.set('channel.12.domain', '[2.3 2.35; 0.5 0.55]', True)
problem_cfg.set('channel.13.domain', '[2.35 2.4; 0.5 0.55]', True)
problem_cfg.set('channel.14.domain', '[2.4 2.45; 0.5 0.55]', True)
problem_cfg.set('channel.15.domain', '[2.45 2.5; 0.5 0.55]', True)
problem_cfg.set('channel.16.domain', '[2.5 2.55; 0.5 0.55]', True)
problem_cfg.set('channel.17.domain', '[2.55 2.6; 0.5 0.55]', True)
problem_cfg.set('channel.18.domain', '[2.6 2.65; 0.5 0.55]', True)
problem_cfg.set('channel.19.domain', '[2.65 2.7; 0.5 0.55]', True)
problem_cfg.set('channel.20.domain', '[2.7 2.75; 0.5 0.55]', True)
problem_cfg.set('channel.21.domain', '[2.75 2.8; 0.5 0.55]', True)
problem_cfg.set('channel.22.domain', '[2.8 2.85; 0.5 0.55]', True)
problem_cfg.set('channel.23.domain', '[2.85 2.9; 0.5 0.55]', True)
problem_cfg.set('channel.24.domain', '[2.9 2.95; 0.5 0.55]', True)
problem_cfg.set('channel.25.domain', '[2.95 3.0; 0.5 0.55]', True)
problem_cfg.set('channel.26.domain', '[3.0 3.05; 0.5 0.55]', True)
problem_cfg.set('channel.27.domain', '[3.05 3.1; 0.5 0.55]', True)
problem_cfg.set('channel.28.domain', '[3.1 3.15; 0.5 0.55]', True)
problem_cfg.set('channel.29.domain', '[3.15 3.2; 0.5 0.55]', True)
problem_cfg.set('channel.30.domain', '[3.2 3.25; 0.5 0.55]', True)
problem_cfg.set('channel.31.domain', '[3.25 3.3; 0.5 0.55]', True)
problem_cfg.set('channel.32.domain', '[3.3 3.35; 0.5 0.55]', True)
problem_cfg.set('channel.33.domain', '[3.35 3.4; 0.5 0.55]', True)
problem_cfg.set('channel.34.domain', '[3.4 3.45; 0.5 0.55]', True)
problem_cfg.set('channel.35.domain', '[3.45 3.5; 0.5 0.55]', True)
problem_cfg.set('channel.36.domain', '[3.5 3.55; 0.5 0.55]', True)
problem_cfg.set('channel.37.domain', '[3.55 3.6; 0.5 0.55]', True)
problem_cfg.set('channel.38.domain', '[3.6 3.65; 0.5 0.55]', True)
problem_cfg.set('channel.39.domain', '[3.65 3.7; 0.5 0.55]', True)
problem_cfg.set('channel.40.domain', '[3.7 3.75; 0.5 0.55]', True)
problem_cfg.set('channel.41.domain', '[3.75 3.8; 0.5 0.55]', True)
problem_cfg.set('channel.42.domain', '[3.8 3.85; 0.5 0.55]', True)
problem_cfg.set('channel.43.domain', '[3.85 3.9; 0.5 0.55]', True)
problem_cfg.set('channel.44.domain', '[3.9 3.95; 0.5 0.55]', True)
problem_cfg.set('channel.45.domain', '[3.95 4.0; 0.5 0.55]', True)
problem_cfg.set('channel.46.domain', '[4.0 4.05; 0.5 0.55]', True)
problem_cfg.set('channel.47.value', '-1.10372589211', True)
problem_cfg.set('channel.48.value', '-1.1020889988', True)
problem_cfg.set('channel.49.value', '-1.09806955069', True)
problem_cfg.set('channel.50.value', '-1.10000902421', True)
problem_cfg.set('channel.51.value', '-1.08797468724', True)
problem_cfg.set('channel.52.value', '-1.08827472176', True)
problem_cfg.set('channel.53.value', '-1.08692237109', True)
problem_cfg.set('channel.54.value', '-1.07893190093', True)
problem_cfg.set('channel.55.value', '-1.08748373853', True)
problem_cfg.set('channel.56.value', '-1.07445197324', True)
problem_cfg.set('channel.57.value', '-1.08246613163', True)
problem_cfg.set('channel.58.value', '-1.06726790504', True)
problem_cfg.set('channel.59.value', '-1.07891217847', True)
problem_cfg.set('channel.60.value', '-1.07260827126', True)
problem_cfg.set('channel.61.value', '-1.07094062748', True)
problem_cfg.set('channel.62.value', '-1.0692399429', True)
problem_cfg.set('channel.63.value', '-1.00099885701', True)
problem_cfg.set('channel.64.value', '-1.00109544002', True)
problem_cfg.set('channel.65.value', '-0.966491003242', True)
problem_cfg.set('channel.66.value', '-0.802284684014', True)
problem_cfg.set('channel.67.value', '-0.980790923021', True)
problem_cfg.set('channel.68.value', '-0.614478271687', True)
problem_cfg.set('channel.69.value', '-0.288129858959', True)
problem_cfg.set('channel.70.value', '-0.929509396842', True)
problem_cfg.set('channel.71.value', '-0.992376505995', True)
problem_cfg.set('channel.72.value', '-0.968162494855', True)
problem_cfg.set('channel.73.value', '-0.397316938901', True)
problem_cfg.set('channel.74.value', '-0.970934956609', True)
problem_cfg.set('channel.75.value', '-0.784344730096', True)
problem_cfg.set('channel.76.value', '-0.539725422323', True)
problem_cfg.set('channel.77.value', '-0.915632282372', True)
problem_cfg.set('channel.78.value', '-0.275089177273', True)
problem_cfg.set('channel.79.value', '-0.949684959286', True)
problem_cfg.set('channel.80.value', '-0.936132529794', True)
problem_cfg.set('channel.47.domain', '[2.6 2.65; 0.45 0.50]', True)
problem_cfg.set('channel.48.domain', '[2.65 2.7; 0.45 0.50]', True)
problem_cfg.set('channel.49.domain', '[2.7 2.75; 0.45 0.50]', True)
problem_cfg.set('channel.50.domain', '[2.75 2.8; 0.45 0.50]', True)
problem_cfg.set('channel.51.domain', '[2.8 2.85; 0.45 0.50]', True)
problem_cfg.set('channel.52.domain', '[2.85 2.9; 0.45 0.50]', True)
problem_cfg.set('channel.53.domain', '[2.9 2.95; 0.45 0.50]', True)
problem_cfg.set('channel.54.domain', '[2.95 3.0; 0.45 0.50]', True)
problem_cfg.set('channel.55.domain', '[3.0 3.05; 0.45 0.50]', True)
problem_cfg.set('channel.56.domain', '[3.05 3.1; 0.45 0.50]', True)
problem_cfg.set('channel.57.domain', '[3.1 3.15; 0.45 0.50]', True)
problem_cfg.set('channel.58.domain', '[3.15 3.2; 0.45 0.50]', True)
problem_cfg.set('channel.59.domain', '[3.2 3.25; 0.45 0.50]', True)
problem_cfg.set('channel.60.domain', '[3.25 3.3; 0.45 0.50]', True)
problem_cfg.set('channel.61.domain', '[3.3 3.35; 0.45 0.50]', True)
problem_cfg.set('channel.62.domain', '[3.35 3.4; 0.45 0.50]', True)
problem_cfg.set('channel.63.domain', '[3.4 3.45; 0.45 0.50]', True)
problem_cfg.set('channel.64.domain', '[3.45 3.5; 0.45 0.50]', True)
problem_cfg.set('channel.65.domain', '[3.5 3.55; 0.45 0.50]', True)
problem_cfg.set('channel.66.domain', '[3.55 3.6; 0.45 0.50]', True)
problem_cfg.set('channel.67.domain', '[3.6 3.65; 0.45 0.50]', True)
problem_cfg.set('channel.68.domain', '[3.65 3.7; 0.45 0.50]', True)
problem_cfg.set('channel.69.domain', '[3.7 3.75; 0.45 0.50]', True)
problem_cfg.set('channel.70.domain', '[3.75 3.8; 0.45 0.50]', True)
problem_cfg.set('channel.71.domain', '[3.8 3.85; 0.45 0.50]', True)
problem_cfg.set('channel.72.domain', '[3.85 3.9; 0.45 0.50]', True)
problem_cfg.set('channel.73.domain', '[3.9 3.95; 0.45 0.50]', True)
problem_cfg.set('channel.74.domain', '[3.95 4.0; 0.45 0.50]', True)
problem_cfg.set('channel.75.domain', '[4.0 4.05; 0.45 0.50]', True)
problem_cfg.set('channel.76.domain', '[4.05 4.1; 0.45 0.50]', True)
problem_cfg.set('channel.77.domain', '[4.1 4.15; 0.45 0.50]', True)
problem_cfg.set('channel.78.domain', '[4.15 4.2; 0.45 0.50]', True)
problem_cfg.set('channel.79.domain', '[4.2 4.25; 0.45 0.50]', True)
problem_cfg.set('channel.80.domain', '[4.25 4.3; 0.45 0.50]', True)
problem_cfg.set('channel.81.value', '-1.10923642795', True)
problem_cfg.set('channel.82.value', '-1.10685618623', True)
problem_cfg.set('channel.83.value', '-1.1057800376', True)
problem_cfg.set('channel.84.value', '-1.10187723629', True)
problem_cfg.set('channel.85.value', '-1.10351710464', True)
problem_cfg.set('channel.86.value', '-1.10037551137', True)
problem_cfg.set('channel.87.value', '-1.09724407076', True)
problem_cfg.set('channel.88.value', '-1.09604600208', True)
problem_cfg.set('channel.89.value', '-1.09354469656', True)
problem_cfg.set('channel.90.value', '-1.08934455354', True)
problem_cfg.set('channel.91.value', '-1.08155476586', True)
problem_cfg.set('channel.92.value', '-1.07815397899', True)
problem_cfg.set('channel.93.value', '-1.09174062023', True)
problem_cfg.set('channel.94.value', '-1.07433616068', True)
problem_cfg.set('channel.95.value', '-1.08030587701', True)
problem_cfg.set('channel.81.domain', '[1.95 2.0; 0.40 0.45]', True)
problem_cfg.set('channel.82.domain', '[2.0 2.05; 0.40 0.45]', True)
problem_cfg.set('channel.83.domain', '[2.05 2.1; 0.40 0.45]', True)
problem_cfg.set('channel.84.domain', '[2.1 2.15; 0.40 0.45]', True)
problem_cfg.set('channel.85.domain', '[2.15 2.2; 0.40 0.45]', True)
problem_cfg.set('channel.86.domain', '[2.2 2.25; 0.40 0.45]', True)
problem_cfg.set('channel.87.domain', '[2.25 2.3; 0.40 0.45]', True)
problem_cfg.set('channel.88.domain', '[2.3 2.35; 0.40 0.45]', True)
problem_cfg.set('channel.89.domain', '[2.35 2.4; 0.40 0.45]', True)
problem_cfg.set('channel.90.domain', '[2.4 2.45; 0.40 0.45]', True)
problem_cfg.set('channel.91.domain', '[2.45 2.5; 0.40 0.45]', True)
problem_cfg.set('channel.92.domain', '[2.5 2.55; 0.40 0.45]', True)
problem_cfg.set('channel.93.domain', '[2.55 2.6; 0.40 0.45]', True)
problem_cfg.set('channel.94.domain', '[2.6 2.65; 0.40 0.45]', True)
problem_cfg.set('channel.95.domain', '[2.65 2.7; 0.40 0.45]', True)
problem_cfg.set('channel.96.value', '-1.00032869407', True)
problem_cfg.set('channel.97.value', '-1.01175908905', True)
problem_cfg.set('channel.98.value', '-1.04954395793', True)
problem_cfg.set('channel.99.value', '-1.017967697', True)
problem_cfg.set('channel.100.value', '-1.04647184091', True)
problem_cfg.set('channel.101.value', '-1.01911894831', True)
problem_cfg.set('channel.102.value', '-1.00699340158', True)
problem_cfg.set('channel.103.value', '-0.995492960025', True)
problem_cfg.set('channel.104.value', '-1.0373059007', True)
problem_cfg.set('channel.96.domain', '[2.25 2.3; 0.35 0.40]', True)
problem_cfg.set('channel.97.domain', '[2.3 2.35; 0.35 0.40]', True)
problem_cfg.set('channel.98.domain', '[2.35 2.4; 0.35 0.40]', True)
problem_cfg.set('channel.99.domain', '[2.4 2.45; 0.35 0.40]', True)
problem_cfg.set('channel.100.domain', '[2.45 2.5; 0.35 0.40]', True)
problem_cfg.set('channel.101.domain', '[2.5 2.55; 0.35 0.40]', True)
problem_cfg.set('channel.102.domain', '[2.55 2.6; 0.35 0.40]', True)
problem_cfg.set('channel.103.domain', '[2.6 2.65; 0.35 0.40]', True)
problem_cfg.set('channel.104.domain', '[2.65 2.7; 0.35 0.4]', True)
example = Example(logger_cfg, grid_cfg, boundary_cfg, problem_cfg, ['l2', 'h1', 'elliptic_penalty'])
elliptic_LRBMS_disc = wrapper[example.discretization()]
parameter_space = CubicParameterSpace(elliptic_LRBMS_disc.parameter_type, parameter_range[0], parameter_range[1])
elliptic_LRBMS_disc = elliptic_LRBMS_disc.with_(parameter_space=parameter_space)
elliptic_disc = elliptic_LRBMS_disc.as_nonblocked().with_(parameter_space=parameter_space)
def prolong(coarse_disc, coarse_U):
time_grid_ref = OnedGrid(domain=(0., T), num_intervals=nt)
time_grid = OnedGrid(domain=(0., T), num_intervals=(len(coarse_U) - 1))
U_fine = [None for ii in time_grid_ref.centers(1)]
for n in np.arange(len(time_grid_ref.centers(1))):
t_n = time_grid_ref.centers(1)[n]
coarse_entity = min((time_grid.centers(1) <= t_n).nonzero()[0][-1],
time_grid.size(0) - 1)
a = time_grid.centers(1)[coarse_entity]
b = time_grid.centers(1)[coarse_entity + 1]
SF = np.array((1./(a - b)*t_n - b/(a - b),
1./(b - a)*t_n - a/(b - a)))
U_t = coarse_U.copy(ind=coarse_entity)
U_t.scal(SF[0][0])
U_t.axpy(SF[1][0], coarse_U, x_ind=(coarse_entity + 1))
U_fine[n] = wrapper[example.prolong(coarse_disc._impl, U_t._list[0]._impl)]
return make_listvectorarray(U_fine)
if isinstance(initial_data, str):
initial_data = make_listvectorarray(wrapper[example.project(initial_data)])
# initial_data = elliptic_disc.operator.apply_inverse(initial_data, mu=(1, 1))
else:
coarse_disc = initial_data[0]
initial_data = initial_data[1]
assert len(initial_data) == 1
initial_data = example.prolong(coarse_disc._impl, initial_data._list[0]._impl)
initial_data = make_listvectorarray(wrapper[initial_data])
parabolic_disc = InstationaryDiscretization(T=T,
initial_data=initial_data,
operator=elliptic_disc.operator,
rhs=elliptic_disc.rhs,
mass=elliptic_disc.products['l2'],
time_stepper=ImplicitEulerTimeStepper(nt, solver_options='operator'),
products=elliptic_disc.products,
operators=elliptic_disc.operators,
functionals=elliptic_disc.functionals,
vector_operators=elliptic_disc.vector_operators,
visualizer=InstationaryDuneVisualizer(elliptic_disc, 'dune_discretization.solution'),
parameter_space=parameter_space,
cache_region='disk',
name='{} ({} DoFs)'.format(name, elliptic_disc.solution_space.dim))
return {'example': example,
'initial_data': initial_data,
'wrapper': wrapper,
'elliptic_LRBMS_disc': elliptic_LRBMS_disc,
'elliptic_disc': elliptic_disc,
'parabolic_disc': parabolic_disc,
'prolongator': prolong}
def prepare(cfg):
detailed_data = discretize(cfg['dune_num_elements'], cfg['dune_num_partitions'], cfg['end_time'], cfg['nt'],
cfg['initial_data'], (cfg['mu_min'], cfg['mu_max']))
wrapper, parabolic_disc = detailed_data['wrapper'], detailed_data['parabolic_disc']
logger.info('creating products and norms ...')
for tp in ('mu_bar', 'mu_hat', 'mu_tilde', 'mu_min', 'mu_max'):
detailed_data[tp] = parabolic_disc.parse_parameter(cfg[tp])
space_products = {}
for kk, prod in parabolic_disc.products.items():
space_products[kk] = prod
if prod.parametric:
for tp in 'mu_bar', 'mu_hat', 'mu_tilde':
mu = wrapper.dune_parameter(detailed_data[tp])
space_products['{}_{}'.format(kk, tp)] = wrapper[prod._impl.freeze_parameter(mu)]
def create_norm2(prod):
def norm2(U, mu=None):
return prod.apply2(U, U, mu=mu)[0][0]
return norm2
space_norms2 = {kk: create_norm2(prod)
for kk, prod in space_products.items()}
def create_bochner_norm(space_norm2):
return partial(bochner_norm, cfg['end_time'], space_norm2, order=cfg['integration_order_time'])
bochner_norms = {kk: create_bochner_norm(space_norm2)
for kk, space_norm2 in space_norms2.items()}
detailed_data['space_products'] = space_products
detailed_data['space_norms2'] = space_norms2
detailed_data['bochner_norms'] = bochner_norms
return detailed_data
| bsd-2-clause | -5,464,840,250,336,528,000 | 54.835714 | 133 | 0.612255 | false |
wateraccounting/wa | Collect/MOD9/DataAccess.py | 1 | 12824 | # -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2016
Contact: [email protected]
Repository: https://github.com/wateraccounting/wa
Module: Collect/MOD9
"""
# import general python modules
import os
import numpy as np
import pandas as pd
import gdal
import urllib
import urllib2
from bs4 import BeautifulSoup
import re
import urlparse
import glob
import requests
from joblib import Parallel, delayed
# Water Accounting modules
import wa
import wa.General.raster_conversions as RC
import wa.General.data_conversions as DC
from wa import WebAccounts
def DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar, cores, hdf_library, remove_hdf):
"""
This function downloads MOD9 daily data
Keyword arguments:
Dir -- 'C:/file/to/path/'
Startdate -- 'yyyy-mm-dd'
Enddate -- 'yyyy-mm-dd'
latlim -- [ymin, ymax] (values must be between -90 and 90)
lonlim -- [xmin, xmax] (values must be between -180 and 180)
cores -- The number of cores used to run the routine. It can be 'False'
to avoid using parallel computing routines.
Waitbar -- 1 (Default) will print a waitbar
"""
# Check start and end date and otherwise set the date to max
if not Startdate:
Startdate = pd.Timestamp('2000-02-24')
if not Enddate:
Enddate = pd.Timestamp('Now')
# Make an array of the days of which the NDVI is taken
Dates = pd.date_range(Startdate, Enddate, freq = 'D')
# Create Waitbar
if Waitbar == 1:
import wa.Functions.Start.WaitbarConsole as WaitbarConsole
total_amount = len(Dates)
amount = 0
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
# Check the latitude and longitude and otherwise set lat or lon on greatest extent
if latlim[0] < -90 or latlim[1] > 90:
print 'Latitude above 90N or below 90S is not possible. Value set to maximum'
latlim[0] = np.max(latlim[0], -90)
latlim[1] = np.min(latlim[1], 90)
if lonlim[0] < -180 or lonlim[1] > 180:
print 'Longitude must be between 180E and 180W. Now value is set to maximum'
lonlim[0] = np.max(lonlim[0], -180)
lonlim[1] = np.min(lonlim[1], 180)
# Make directory for the MODIS NDVI data
Dir = Dir.replace("/", os.sep)
output_folder = os.path.join(Dir, 'Reflectance', 'MOD9')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
TilesVertical, TilesHorizontal = wa.Collect.MOD15.DataAccess.Get_tiles_from_txt(output_folder, hdf_library, latlim, lonlim)
# Pass variables to parallel function and run
args = [output_folder, TilesVertical, TilesHorizontal, lonlim, latlim, hdf_library]
if not cores:
for Date in Dates:
RetrieveData(Date, args)
if Waitbar == 1:
amount += 1
WaitbarConsole.printWaitBar(amount, total_amount, prefix = 'Progress:', suffix = 'Complete', length = 50)
results = True
else:
results = Parallel(n_jobs=cores)(delayed(RetrieveData)(Date, args)
for Date in Dates)
if remove_hdf == 1:
# Remove all .hdf files
os.chdir(output_folder)
files = glob.glob("*.hdf")
for f in files:
os.remove(os.path.join(output_folder, f))
# Remove all .txt files
files = glob.glob("*.txt")
for f in files:
os.remove(os.path.join(output_folder, f))
return results
def RetrieveData(Date, args):
"""
This function retrieves MOD9 Reflectance data for a given date from the
http://e4ftl01.cr.usgs.gov/ server.
Keyword arguments:
Date -- 'yyyy-mm-dd'
args -- A list of parameters defined in the DownloadData function.
"""
# Argument
[output_folder, TilesVertical, TilesHorizontal, lonlim, latlim, hdf_library] = args
# Collect the data from the MODIS webpage and returns the data and lat and long in meters of those tiles
try:
Collect_data(TilesHorizontal, TilesVertical, Date, output_folder, hdf_library)
except:
print "Was not able to download the file"
# Define the output name of the collect data function
name_collect = os.path.join(output_folder, 'Merged.tif')
# Reproject the MODIS product to epsg_to
epsg_to ='4326'
name_reprojected = RC.reproject_MODIS(name_collect, epsg_to)
# Clip the data to the users extend
data, geo = RC.clip_data(name_reprojected, latlim, lonlim)
# Save results as Gtiff
ReffileName = os.path.join(output_folder, 'Reflectance_MOD09GQ_-_daily_' + Date.strftime('%Y') + '.' + Date.strftime('%m') + '.' + Date.strftime('%d') + '.tif')
DC.Save_as_tiff(name=ReffileName, data=data, geo=geo, projection='WGS84')
# remove the side products
os.remove(os.path.join(output_folder, name_collect))
os.remove(os.path.join(output_folder, name_reprojected))
return True
def Collect_data(TilesHorizontal,TilesVertical,Date,output_folder, hdf_library):
'''
This function downloads all the needed MODIS tiles from http://e4ftl01.cr.usgs.gov/MOLT/MOD13Q1.006/ as a hdf file.
Keywords arguments:
TilesHorizontal -- [TileMin,TileMax] max and min horizontal tile number
TilesVertical -- [TileMin,TileMax] max and min vertical tile number
Date -- 'yyyy-mm-dd'
output_folder -- 'C:/file/to/path/'
'''
# Make a new tile for the data
sizeX = int((TilesHorizontal[1] - TilesHorizontal[0] + 1) * 4800)
sizeY = int((TilesVertical[1] - TilesVertical[0] + 1) * 4800)
DataTot = np.zeros((sizeY, sizeX))
# Load accounts
username, password = WebAccounts.Accounts(Type = 'NASA')
# Create the Lat and Long of the MODIS tile in meters
for Vertical in range(int(TilesVertical[0]), int(TilesVertical[1])+1):
Distance = 231.65635826395834 # resolution of a MODIS pixel in meter
countY=(TilesVertical[1] - TilesVertical[0] + 1) - (Vertical - TilesVertical[0])
for Horizontal in range(int(TilesHorizontal[0]), int(TilesHorizontal[1]) + 1):
countX=Horizontal - TilesHorizontal[0] + 1
# Download the MODIS NDVI data
url = 'https://e4ftl01.cr.usgs.gov/MOLT/MOD09GQ.006/' + Date.strftime('%Y') + '.' + Date.strftime('%m') + '.' + Date.strftime('%d') + '/'
# Reset the begin parameters for downloading
downloaded = 0
N=0
# Check the library given by user
if hdf_library is not None:
os.chdir(hdf_library)
hdf_name = glob.glob("MOD09GQ.A%s%03s.h%02dv%02d.*" %(Date.strftime('%Y'), Date.strftime('%j'), Horizontal, Vertical))
if len(hdf_name) == 1:
hdf_file = os.path.join(hdf_library, hdf_name[0])
if os.path.exists(hdf_file):
downloaded = 1
file_name = hdf_file
if not downloaded == 1:
# Get files on FTP server
f = urllib2.urlopen(url)
# Sum all the files on the server
soup = BeautifulSoup(f, "lxml")
for i in soup.findAll('a', attrs = {'href': re.compile('(?i)(hdf)$')}):
# Find the file with the wanted tile number
Vfile=str(i)[30:32]
Hfile=str(i)[27:29]
if int(Vfile) is int(Vertical) and int(Hfile) is int(Horizontal):
# Define the whole url name
full_url = urlparse.urljoin(url, i['href'])
# if not downloaded try to download file
while downloaded == 0:
try:# open http and download whole .hdf
nameDownload = full_url
file_name = os.path.join(output_folder,nameDownload.split('/')[-1])
if os.path.isfile(file_name):
downloaded = 1
else:
x = requests.get(nameDownload, allow_redirects = False)
try:
y = requests.get(x.headers['location'], auth = (username, password))
except:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
y = requests.get(x.headers['location'], auth = (username, password), verify = False)
z = open(file_name, 'wb')
z.write(y.content)
z.close()
statinfo = os.stat(file_name)
# Say that download was succesfull
if int(statinfo.st_size) > 10000:
downloaded = 1
# If download was not succesfull
except:
# Try another time
N = N + 1
# Stop trying after 10 times
if N == 10:
print 'Data from ' + Date.strftime('%Y-%m-%d') + ' is not available'
downloaded = 1
try:
# Open .hdf only band with NDVI and collect all tiles to one array
dataset = gdal.Open(file_name)
sdsdict = dataset.GetMetadata('SUBDATASETS')
sdslist = [sdsdict[k] for k in sdsdict.keys() if '_2_NAME' in k]
sds = []
for n in sdslist:
sds.append(gdal.Open(n))
full_layer = [i for i in sdslist if 'sur_refl_b01_1' in i]
idx = sdslist.index(full_layer[0])
if Horizontal == TilesHorizontal[0] and Vertical == TilesVertical[0]:
geo_t = sds[idx].GetGeoTransform()
# get the projection value
proj = sds[idx].GetProjection()
data = sds[idx].ReadAsArray()
countYdata = (TilesVertical[1] - TilesVertical[0] + 2) - countY
DataTot[int((countYdata - 1) * 4800):int(countYdata * 4800), int((countX - 1) * 4800):int(countX * 4800)]=data
del data
# if the tile not exists or cannot be opened, create a nan array with the right projection
except:
if Horizontal==TilesHorizontal[0] and Vertical==TilesVertical[0]:
x1 = (TilesHorizontal[0] - 19) * 4800 * Distance
x4 = (TilesVertical[0] - 9) * 4800 * -1 * Distance
geo = [x1, Distance, 0.0, x4, 0.0, -Distance]
geo_t=tuple(geo)
proj='PROJCS["unnamed",GEOGCS["Unknown datum based upon the custom spheroid",DATUM["Not specified (based on custom spheroid)",SPHEROID["Custom spheroid",6371007.181,0]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Sinusoidal"],PARAMETER["longitude_of_center",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]'
data=np.ones((4800,4800)) * (-9999)
countYdata=(TilesVertical[1] - TilesVertical[0] + 2) - countY
DataTot[(countYdata - 1) * 4800:countYdata * 4800,(countX - 1) * 4800:countX * 4800] = data
# Make geotiff file
name2 = os.path.join(output_folder, 'Merged.tif')
driver = gdal.GetDriverByName("GTiff")
dst_ds = driver.Create(name2, DataTot.shape[1], DataTot.shape[0], 1, gdal.GDT_Float32, ['COMPRESS=LZW'])
try:
dst_ds.SetProjection(proj)
except:
proj='PROJCS["unnamed",GEOGCS["Unknown datum based upon the custom spheroid",DATUM["Not specified (based on custom spheroid)",SPHEROID["Custom spheroid",6371007.181,0]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Sinusoidal"],PARAMETER["longitude_of_center",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]'
x1 = (TilesHorizontal[0] - 18) * 4800 * Distance
x4 = (TilesVertical[0] - 9) * 4800 * -1 * Distance
geo = [x1, Distance, 0.0, x4, 0.0, -Distance]
geo_t = tuple(geo)
dst_ds.SetProjection(proj)
dst_ds.GetRasterBand(1).SetNoDataValue(-9999)
dst_ds.SetGeoTransform(geo_t)
dst_ds.GetRasterBand(1).WriteArray(DataTot*0.0001)
dst_ds = None
sds = None
return()
| apache-2.0 | -289,952,064,989,498,560 | 42.471186 | 378 | 0.573456 | false |
Stbot/PyCrypt | first writes/b64cy.py | 1 | 2516 | ###############################################################################
import b64_mod
from binascii import unhexlify
###############################################################################
def decode(bstring,flag = 0):
"""Decode, when flag is set to 0 or not set takes a Base64 strig and converts it to english plain test. when flag is set to 1 the input string is already in hex format."""
declist = []
outstring = ''
if flag ==0:
declist = b64_mod.hextodec(b64_mod.basetohex(bstring))
elif flag == 1:
declist = b64_mod.hextodec(bstring)
for x in declist:
outstring += ""+chr(x)
return outstring
##############################################################################
def encode(ascstring, key=None):
"""Given an ascii english string of text with any quotes properly escaped this will encode it into a Base64 string."""
if key!=None:
if len(key)<len(ascstring):
key = keylen(b64_mod.ascitohex(ascstring),key)
outlist = []
for x in ascstring:
outlist.append(ord(x))
return b64_mod.hextobase(''.join(b64_mod.dec_to_hex(outlist)))
##############################################################################
def hexorsum(hexstring1, hexstring2):
"""Calculates the Xor sum of 2 equal length hex strings"""
binlist1 = []
binlist2 = []
binstring1 = b64_mod.hextobin(hexstring1)
binstring2 = b64_mod.hextobin(hexstring2)
for x in binstring1:
binlist1.append(x)
for x in binstring2:
binlist2.append(x)
sumlist = []
sumstring = ''
#print len(binlist1)
#print len(binlist2)
for x in range (len(binlist1)):
if binlist1[x] == binlist2[x]:
sumlist.append('0')
elif binlist1[x] != binlist2[x]:
sumlist.append('1')
sumstring = ''.join(sumlist)
return b64_mod.bintohex(sumstring)
##############################################################################
def keylen(hexstring, key, flag =0):
if flag == 0:
key = b64_mod.ascitohex(key)
while len(hexstring) != len(key):
if len(key)>len(hexstring):
key = key[:len(key)-1]
if len(key)<len(hexstring):
key+=key
return key
##############################################################################
def repkeyxor_encoder(text, key):
text = b64_mod.ascitohex(text)
if len(key)<len(text):
key = keylen(text,key)
return hexorsum(text,key)
| mit | -6,056,610,221,976,268,000 | 36 | 175 | 0.49841 | false |
googleapis/python-api-core | tests/unit/test_client_info.py | 1 | 2488 | # Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.api_core import client_info
def test_constructor_defaults():
info = client_info.ClientInfo()
assert info.python_version is not None
assert info.grpc_version is not None
assert info.api_core_version is not None
assert info.gapic_version is None
assert info.client_library_version is None
assert info.rest_version is None
def test_constructor_options():
info = client_info.ClientInfo(
python_version="1",
grpc_version="2",
api_core_version="3",
gapic_version="4",
client_library_version="5",
user_agent="6",
rest_version="7",
)
assert info.python_version == "1"
assert info.grpc_version == "2"
assert info.api_core_version == "3"
assert info.gapic_version == "4"
assert info.client_library_version == "5"
assert info.user_agent == "6"
assert info.rest_version == "7"
def test_to_user_agent_minimal():
info = client_info.ClientInfo(
python_version="1", api_core_version="2", grpc_version=None
)
user_agent = info.to_user_agent()
assert user_agent == "gl-python/1 gax/2"
def test_to_user_agent_full():
info = client_info.ClientInfo(
python_version="1",
grpc_version="2",
api_core_version="3",
gapic_version="4",
client_library_version="5",
user_agent="app-name/1.0",
)
user_agent = info.to_user_agent()
assert user_agent == "app-name/1.0 gl-python/1 grpc/2 gax/3 gapic/4 gccl/5"
def test_to_user_agent_rest():
info = client_info.ClientInfo(
python_version="1",
grpc_version=None,
rest_version="2",
api_core_version="3",
gapic_version="4",
client_library_version="5",
user_agent="app-name/1.0",
)
user_agent = info.to_user_agent()
assert user_agent == "app-name/1.0 gl-python/1 rest/2 gax/3 gapic/4 gccl/5"
| apache-2.0 | -3,570,916,829,049,336,300 | 27.272727 | 79 | 0.64791 | false |
ItsCalebJones/SpaceLaunchNow-Server | api/v330/spacestation/views.py | 1 | 1860 | from rest_framework.viewsets import ModelViewSet
from rest_framework.filters import SearchFilter, OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from api.models import *
from api.permission import HasGroupPermission
from api.v330.spacestation.serializers import SpaceStationDetailedSerializer, SpaceStationSerializer
class SpaceStationViewSet(ModelViewSet):
"""
API endpoint that allows Space Stations to be viewed.
GET:
Return a list of all the existing space stations.
FILTERS:
Parameters - 'name', 'status', 'owners', 'orbit', 'type', 'owners__name', 'owners__abrev'
Example - /api/3.3.0/spacestation/?status=Active
SEARCH EXAMPLE:
Example - /api/3.3.0/spacestation/?search=ISS
Searches through 'name', 'owners__name', 'owners__abbrev'
ORDERING:
Fields - 'id', 'status', 'type', 'founded', 'volume'
Example - /api/3.3.0/spacestation/?ordering=id
"""
def get_serializer_class(self):
mode = self.request.query_params.get("mode", "normal")
if self.action == 'retrieve' or mode == "detailed":
return SpaceStationDetailedSerializer
else:
return SpaceStationSerializer
queryset = SpaceStation.objects.all()
permission_classes = [HasGroupPermission]
permission_groups = {
'retrieve': ['_Public'], # retrieve can be accessed without credentials (GET 'site.com/api/foo/1')
'list': ['_Public'] # list returns None and is therefore NOT accessible by anyone (GET 'site.com/api/foo')
}
filter_backends = (DjangoFilterBackend, SearchFilter, OrderingFilter)
filter_fields = ('name', 'status', 'owners', 'orbit', 'type', 'owners__name', 'owners__abbrev')
search_fields = ('$name', 'owners__name', 'owners__abbrev')
ordering_fields = ('id', 'status', 'type', 'founded', 'volume') | apache-2.0 | -6,008,000,186,509,618,000 | 39.456522 | 114 | 0.687634 | false |
luan-th-nguyen/seisflows_ndt | seisflows/config.py | 1 | 6784 |
import copy_reg
import imp
import os
import re
import sys
import types
from importlib import import_module
from os.path import abspath, join, exists
from seisflows.tools import msg
from seisflows.tools.err import ParameterError
from seisflows.tools import unix
from seisflows.tools.tools import loadjson, loadobj, loadpy, savejson, saveobj
from seisflows.tools.tools import module_exists, package_exists
# SeisFlows consists of interacting 'system', 'preprocess', 'solver', 'postprocess', 'optimize', and 'workflow' objects. Each corresponds simultaneously to a module in the SeisFlows source code, a class that is instantiated and made accessible via sys.modules, and a parameter in a global dictionary. Once in memory, these objects can be thought of as comprising the complete 'state' of a SeisFlows session
# The following list is one of the few hardwired aspects of the whole SeisFlows package. Any changes may result in circular imports or other problems
names = []
names += ['system']
names += ['preprocess']
names += ['solver']
names += ['postprocess']
names += ['optimize']
names += ['workflow']
def config():
""" Instantiates SeisFlows objects and makes them globally accessible by
registering them in sys.modules
"""
# parameters and paths must already be loaded
# (normally this is done by sfsubmit)
assert 'seisflows_parameters' in sys.modules
assert 'seisflows_paths' in sys.modules
# check if objects already exist on disk
if exists(_output()):
print msg.WarningOverwrite
sys.exit()
# instantiate and register objects
for name in names:
sys.modules['seisflows_'+name] = custom_import(name)()
# error checking
for name in names:
sys.modules['seisflows_'+name].check()
if not hasattr(sys.modules['seisflows_parameters'], 'workflow'.upper()):
print msg.MissingParameter_Worfklow
sys.exit(-1)
if not hasattr(sys.modules['seisflows_parameters'], 'system'.upper()):
print msg.MissingParameter_System
sys.exit(-1)
def save():
""" Exports session to disk
"""
unix.mkdir(_output())
for name in ['parameters', 'paths']:
fullfile = join(_output(), 'seisflows_'+name+'.json')
savejson(fullfile, sys.modules['seisflows_'+name].__dict__)
for name in names:
fullfile = join(_output(), 'seisflows_'+name+'.p')
saveobj(fullfile, sys.modules['seisflows_'+name])
def load(path):
""" Imports session from disk
"""
for name in ['parameters', 'paths']:
fullfile = join(_full(path), 'seisflows_'+name+'.json')
sys.modules['seisflows_'+name] = Dict(loadjson(fullfile))
for name in names:
fullfile = join(_full(path), 'seisflows_'+name+'.p')
sys.modules['seisflows_'+name] = loadobj(fullfile)
class Dict(object):
""" Dictionary-like object for holding parameters or paths
"""
def __iter__(self):
return iter(sorted(self.__dict__.keys()))
def __getattr__(self, key):
return self.__dict__[key]
def __getitem__(self, key):
return self.__dict__[key]
def __setattr__(self, key, val):
if key in self.__dict__:
raise TypeError("Once defined, parameters cannot be changed.")
self.__dict__[key] = val
def __delattr__(self, key):
if key in self.__dict__:
raise TypeError("Once defined, parameters cannot be deleted.")
raise KeyError
def update(self, newdict):
super(Dict, self).__setattr__('__dict__', newdict)
def __init__(self, newdict):
self.update(newdict)
class Null(object):
""" Always and reliably does nothing
"""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __nonzero__(self):
return False
def __getattr__(self, key):
return self
def __setattr__(self, key, val):
return self
def __delattr__(self, key):
return self
def custom_import(*args):
""" Imports SeisFlows module and extracts class of same name. For example,
custom_import('workflow', 'inversion')
imports 'seisflows.workflow.inversion' and, from this module, extracts
class 'inversion'.
"""
# parse input arguments
if len(args) == 0:
raise Exception(msg.ImportError1)
if args[0] not in names:
raise Exception(msg.ImportError2)
if len(args) == 1:
args += (_try(args[0]),)
if not args[1]:
return Null
# generate package list
packages = ['seisflows']
# does module exist?
_exists = False
for package in packages:
full_dotted_name = package+'.'+args[0]+'.'+args[1]
if module_exists(full_dotted_name):
_exists = True
break
if not _exists:
raise Exception(msg.ImportError3 %
(args[0], args[1], args[0].upper()))
# import module
module = import_module(full_dotted_name)
# extract class
if hasattr(module, args[1]):
return getattr(module, args[1])
else:
raise Exception(msg.ImportError4 %
(args[0], args[1], args[1]))
def tilde_expand(mydict):
""" Expands tilde character in path strings
"""
for key,val in mydict.items():
if type(val) not in [str, unicode]:
raise Exception
if val[0:2] == '~/':
mydict[key] = os.getenv('HOME') +'/'+ val[2:]
return mydict
# utility functions
def _par(key):
return sys.modules['seisflows_parameters'][key.upper()]
def _path(key):
return sys.modules['seisflows_paths'][key.upper()]
def _try(key):
try:
return _par(key)
except KeyError:
return None
def _output():
try:
return _full(_path('output'))
except:
return _full(join('.', 'output'))
def _full(path):
try:
return join(abspath(path), '')
except:
raise IOError
# the following code changes how instance methods are handled by pickle. placing it here, in this module, ensures that pickle changes will be in effect for all SeisFlows workflows
# for relevant discussion, see stackoverflow thread "Can't pickle <type 'instancemethod'> when using python's multiprocessing Pool.map()"
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
| bsd-2-clause | -5,586,993,668,159,306,000 | 26.803279 | 406 | 0.627064 | false |
cs-au-dk/Artemis | WebKit/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py | 1 | 38185 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's Python module for interacting with Bugzilla
import mimetypes
import re
import StringIO
import urllib
from datetime import datetime # used in timestamp()
from .attachment import Attachment
from .bug import Bug
from webkitpy.common.system.deprecated_logging import log
from webkitpy.common.config import committers
import webkitpy.common.config.urls as config_urls
from webkitpy.common.net.credentials import Credentials
from webkitpy.common.system.user import User
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, BeautifulStoneSoup, SoupStrainer
class EditUsersParser(object):
def __init__(self):
self._group_name_to_group_string_cache = {}
def _login_and_uid_from_row(self, row):
first_cell = row.find("td")
# The first row is just headers, we skip it.
if not first_cell:
return None
# When there were no results, we have a fake "<none>" entry in the table.
if first_cell.find(text="<none>"):
return None
# Otherwise the <td> contains a single <a> which contains the login name or a single <i> with the string "<none>".
anchor_tag = first_cell.find("a")
login = unicode(anchor_tag.string).strip()
user_id = int(re.search(r"userid=(\d+)", str(anchor_tag['href'])).group(1))
return (login, user_id)
def login_userid_pairs_from_edit_user_results(self, results_page):
soup = BeautifulSoup(results_page, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
results_table = soup.find(id="admin_table")
login_userid_pairs = [self._login_and_uid_from_row(row) for row in results_table('tr')]
# Filter out None from the logins.
return filter(lambda pair: bool(pair), login_userid_pairs)
def _group_name_and_string_from_row(self, row):
label_element = row.find('label')
group_string = unicode(label_element['for'])
group_name = unicode(label_element.find('strong').string).rstrip(':')
return (group_name, group_string)
def user_dict_from_edit_user_page(self, page):
soup = BeautifulSoup(page, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
user_table = soup.find("table", {'class': 'main'})
user_dict = {}
for row in user_table('tr'):
label_element = row.find('label')
if not label_element:
continue # This must not be a row we know how to parse.
if row.find('table'):
continue # Skip the <tr> holding the groups table.
key = label_element['for']
if "group" in key:
key = "groups"
value = user_dict.get('groups', set())
# We must be parsing a "tr" inside the inner group table.
(group_name, _) = self._group_name_and_string_from_row(row)
if row.find('input', {'type': 'checkbox', 'checked': 'checked'}):
value.add(group_name)
else:
value = unicode(row.find('td').string).strip()
user_dict[key] = value
return user_dict
def _group_rows_from_edit_user_page(self, edit_user_page):
soup = BeautifulSoup(edit_user_page, convertEntities=BeautifulSoup.HTML_ENTITIES)
return soup('td', {'class': 'groupname'})
def group_string_from_name(self, edit_user_page, group_name):
# Bugzilla uses "group_NUMBER" strings, which may be different per install
# so we just look them up once and cache them.
if not self._group_name_to_group_string_cache:
rows = self._group_rows_from_edit_user_page(edit_user_page)
name_string_pairs = map(self._group_name_and_string_from_row, rows)
self._group_name_to_group_string_cache = dict(name_string_pairs)
return self._group_name_to_group_string_cache[group_name]
def timestamp():
return datetime.now().strftime("%Y%m%d%H%M%S")
# A container for all of the logic for making and parsing bugzilla queries.
class BugzillaQueries(object):
def __init__(self, bugzilla):
self._bugzilla = bugzilla
def _is_xml_bugs_form(self, form):
# ClientForm.HTMLForm.find_control throws if the control is not found,
# so we do a manual search instead:
return "xml" in [control.id for control in form.controls]
# This is kinda a hack. There is probably a better way to get this information from bugzilla.
def _parse_result_count(self, results_page):
result_count_text = BeautifulSoup(results_page).find(attrs={'class': 'bz_result_count'}).string
result_count_parts = result_count_text.strip().split(" ")
if result_count_parts[0] == "Zarro":
return 0
if result_count_parts[0] == "One":
return 1
return int(result_count_parts[0])
# Note: _load_query, _fetch_bug and _fetch_bugs_from_advanced_query
# are the only methods which access self._bugzilla.
def _load_query(self, query):
self._bugzilla.authenticate()
full_url = "%s%s" % (config_urls.bug_server_url, query)
return self._bugzilla.browser.open(full_url)
def _fetch_bugs_from_advanced_query(self, query):
results_page = self._load_query(query)
if not self._parse_result_count(results_page):
return []
# Bugzilla results pages have an "XML" submit button at the bottom
# which can be used to get an XML page containing all of the <bug> elements.
# This is slighty lame that this assumes that _load_query used
# self._bugzilla.browser and that it's in an acceptable state.
self._bugzilla.browser.select_form(predicate=self._is_xml_bugs_form)
bugs_xml = self._bugzilla.browser.submit()
return self._bugzilla._parse_bugs_from_xml(bugs_xml)
def _fetch_bug(self, bug_id):
return self._bugzilla.fetch_bug(bug_id)
def _fetch_bug_ids_advanced_query(self, query):
soup = BeautifulSoup(self._load_query(query))
# The contents of the <a> inside the cells in the first column happen
# to be the bug id.
return [int(bug_link_cell.find("a").string)
for bug_link_cell in soup('td', "first-child")]
def _parse_attachment_ids_request_query(self, page):
digits = re.compile("\d+")
attachment_href = re.compile("attachment.cgi\?id=\d+&action=review")
attachment_links = SoupStrainer("a", href=attachment_href)
return [int(digits.search(tag["href"]).group(0))
for tag in BeautifulSoup(page, parseOnlyThese=attachment_links)]
def _fetch_attachment_ids_request_query(self, query):
return self._parse_attachment_ids_request_query(self._load_query(query))
def _parse_quips(self, page):
soup = BeautifulSoup(page, convertEntities=BeautifulSoup.HTML_ENTITIES)
quips = soup.find(text=re.compile(r"Existing quips:")).findNext("ul").findAll("li")
return [unicode(quip_entry.string) for quip_entry in quips]
def fetch_quips(self):
return self._parse_quips(self._load_query("/quips.cgi?action=show"))
# List of all r+'d bugs.
def fetch_bug_ids_from_pending_commit_list(self):
needs_commit_query_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review%2B"
return self._fetch_bug_ids_advanced_query(needs_commit_query_url)
def fetch_bugs_matching_quicksearch(self, search_string):
# We may want to use a more explicit query than "quicksearch".
# If quicksearch changes we should probably change to use
# a normal buglist.cgi?query_format=advanced query.
quicksearch_url = "buglist.cgi?quicksearch=%s" % urllib.quote(search_string)
return self._fetch_bugs_from_advanced_query(quicksearch_url)
# Currently this returns all bugs across all components.
# In the future we may wish to extend this API to construct more restricted searches.
def fetch_bugs_matching_search(self, search_string, author_email=None):
query = "buglist.cgi?query_format=advanced"
if search_string:
query += "&short_desc_type=allwordssubstr&short_desc=%s" % urllib.quote(search_string)
if author_email:
query += "&emailreporter1=1&emailtype1=substring&email1=%s" % urllib.quote(search_string)
return self._fetch_bugs_from_advanced_query(query)
def fetch_patches_from_pending_commit_list(self):
return sum([self._fetch_bug(bug_id).reviewed_patches()
for bug_id in self.fetch_bug_ids_from_pending_commit_list()], [])
def fetch_bug_ids_from_commit_queue(self):
commit_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B&order=Last+Changed"
return self._fetch_bug_ids_advanced_query(commit_queue_url)
def fetch_patches_from_commit_queue(self):
# This function will only return patches which have valid committers
# set. It won't reject patches with invalid committers/reviewers.
return sum([self._fetch_bug(bug_id).commit_queued_patches()
for bug_id in self.fetch_bug_ids_from_commit_queue()], [])
def fetch_bug_ids_from_review_queue(self):
review_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
return self._fetch_bug_ids_advanced_query(review_queue_url)
# This method will make several requests to bugzilla.
def fetch_patches_from_review_queue(self, limit=None):
# [:None] returns the whole array.
return sum([self._fetch_bug(bug_id).unreviewed_patches()
for bug_id in self.fetch_bug_ids_from_review_queue()[:limit]], [])
# NOTE: This is the only client of _fetch_attachment_ids_request_query
# This method only makes one request to bugzilla.
def fetch_attachment_ids_from_review_queue(self):
review_queue_url = "request.cgi?action=queue&type=review&group=type"
return self._fetch_attachment_ids_request_query(review_queue_url)
# This only works if your account has edituser privileges.
# We could easily parse https://bugs.webkit.org/userprefs.cgi?tab=permissions to
# check permissions, but bugzilla will just return an error if we don't have them.
def fetch_login_userid_pairs_matching_substring(self, search_string):
review_queue_url = "editusers.cgi?action=list&matchvalue=login_name&matchstr=%s&matchtype=substr" % urllib.quote(search_string)
results_page = self._load_query(review_queue_url)
# We could pull the EditUsersParser off Bugzilla if needed.
return EditUsersParser().login_userid_pairs_from_edit_user_results(results_page)
# FIXME: We should consider adding a BugzillaUser class.
def fetch_logins_matching_substring(self, search_string):
pairs = self.fetch_login_userid_pairs_matching_substring(search_string)
return map(lambda pair: pair[0], pairs)
class Bugzilla(object):
def __init__(self, committers=committers.CommitterList()):
self.authenticated = False
self.queries = BugzillaQueries(self)
self.committers = committers
self.cached_quips = []
self.edit_user_parser = EditUsersParser()
self._browser = None
def _get_browser(self):
if not self._browser:
from webkitpy.thirdparty.autoinstalled.mechanize import Browser
self._browser = Browser()
# Ignore bugs.webkit.org/robots.txt until we fix it to allow this script.
self._browser.set_handle_robots(False)
return self._browser
def _set_browser(self, value):
self._browser = value
browser = property(_get_browser, _set_browser)
def fetch_user(self, user_id):
self.authenticate()
edit_user_page = self.browser.open(self.edit_user_url_for_id(user_id))
return self.edit_user_parser.user_dict_from_edit_user_page(edit_user_page)
def add_user_to_groups(self, user_id, group_names):
self.authenticate()
user_edit_page = self.browser.open(self.edit_user_url_for_id(user_id))
self.browser.select_form(nr=1)
for group_name in group_names:
group_string = self.edit_user_parser.group_string_from_name(user_edit_page, group_name)
self.browser.find_control(group_string).items[0].selected = True
self.browser.submit()
def quips(self):
# We only fetch and parse the list of quips once per instantiation
# so that we do not burden bugs.webkit.org.
if not self.cached_quips:
self.cached_quips = self.queries.fetch_quips()
return self.cached_quips
def bug_url_for_bug_id(self, bug_id, xml=False):
if not bug_id:
return None
content_type = "&ctype=xml" if xml else ""
return "%sshow_bug.cgi?id=%s%s" % (config_urls.bug_server_url, bug_id, content_type)
def short_bug_url_for_bug_id(self, bug_id):
if not bug_id:
return None
return "http://webkit.org/b/%s" % bug_id
def add_attachment_url(self, bug_id):
return "%sattachment.cgi?action=enter&bugid=%s" % (config_urls.bug_server_url, bug_id)
def attachment_url_for_id(self, attachment_id, action="view"):
if not attachment_id:
return None
action_param = ""
if action and action != "view":
action_param = "&action=%s" % action
return "%sattachment.cgi?id=%s%s" % (config_urls.bug_server_url,
attachment_id,
action_param)
def edit_user_url_for_id(self, user_id):
return "%seditusers.cgi?action=edit&userid=%s" % (config_urls.bug_server_url, user_id)
def _parse_attachment_flag(self,
element,
flag_name,
attachment,
result_key):
flag = element.find('flag', attrs={'name': flag_name})
if flag:
attachment[flag_name] = flag['status']
if flag['status'] == '+':
attachment[result_key] = flag['setter']
# Sadly show_bug.cgi?ctype=xml does not expose the flag modification date.
def _string_contents(self, soup):
# WebKit's bugzilla instance uses UTF-8.
# BeautifulStoneSoup always returns Unicode strings, however
# the .string method returns a (unicode) NavigableString.
# NavigableString can confuse other parts of the code, so we
# convert from NavigableString to a real unicode() object using unicode().
return unicode(soup.string)
# Example: 2010-01-20 14:31 PST
# FIXME: Some bugzilla dates seem to have seconds in them?
# Python does not support timezones out of the box.
# Assume that bugzilla always uses PST (which is true for bugs.webkit.org)
_bugzilla_date_format = "%Y-%m-%d %H:%M:%S"
@classmethod
def _parse_date(cls, date_string):
(date, time, time_zone) = date_string.split(" ")
if time.count(':') == 1:
# Add seconds into the time.
time += ':0'
# Ignore the timezone because python doesn't understand timezones out of the box.
date_string = "%s %s" % (date, time)
return datetime.strptime(date_string, cls._bugzilla_date_format)
def _date_contents(self, soup):
return self._parse_date(self._string_contents(soup))
def _parse_attachment_element(self, element, bug_id):
attachment = {}
attachment['bug_id'] = bug_id
attachment['is_obsolete'] = (element.has_key('isobsolete') and element['isobsolete'] == "1")
attachment['is_patch'] = (element.has_key('ispatch') and element['ispatch'] == "1")
attachment['id'] = int(element.find('attachid').string)
# FIXME: No need to parse out the url here.
attachment['url'] = self.attachment_url_for_id(attachment['id'])
attachment["attach_date"] = self._date_contents(element.find("date"))
attachment['name'] = self._string_contents(element.find('desc'))
attachment['attacher_email'] = self._string_contents(element.find('attacher'))
attachment['type'] = self._string_contents(element.find('type'))
self._parse_attachment_flag(
element, 'review', attachment, 'reviewer_email')
self._parse_attachment_flag(
element, 'commit-queue', attachment, 'committer_email')
return attachment
def _parse_log_descr_element(self, element):
comment = {}
comment['comment_email'] = self._string_contents(element.find('who'))
comment['comment_date'] = self._date_contents(element.find('bug_when'))
comment['text'] = self._string_contents(element.find('thetext'))
return comment
def _parse_bugs_from_xml(self, page):
soup = BeautifulSoup(page)
# Without the unicode() call, BeautifulSoup occasionally complains of being
# passed None for no apparent reason.
return [Bug(self._parse_bug_dictionary_from_xml(unicode(bug_xml)), self) for bug_xml in soup('bug')]
def _parse_bug_dictionary_from_xml(self, page):
soup = BeautifulStoneSoup(page, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
bug = {}
bug["id"] = int(soup.find("bug_id").string)
bug["title"] = self._string_contents(soup.find("short_desc"))
bug["bug_status"] = self._string_contents(soup.find("bug_status"))
dup_id = soup.find("dup_id")
if dup_id:
bug["dup_id"] = self._string_contents(dup_id)
bug["reporter_email"] = self._string_contents(soup.find("reporter"))
bug["assigned_to_email"] = self._string_contents(soup.find("assigned_to"))
bug["cc_emails"] = [self._string_contents(element) for element in soup.findAll('cc')]
bug["attachments"] = [self._parse_attachment_element(element, bug["id"]) for element in soup.findAll('attachment')]
bug["comments"] = [self._parse_log_descr_element(element) for element in soup.findAll('long_desc')]
return bug
# Makes testing fetch_*_from_bug() possible until we have a better
# BugzillaNetwork abstration.
def _fetch_bug_page(self, bug_id):
bug_url = self.bug_url_for_bug_id(bug_id, xml=True)
log("Fetching: %s" % bug_url)
return self.browser.open(bug_url)
def fetch_bug_dictionary(self, bug_id):
try:
return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id))
except KeyboardInterrupt:
raise
except:
self.authenticate()
return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id))
# FIXME: A BugzillaCache object should provide all these fetch_ methods.
def fetch_bug(self, bug_id):
return Bug(self.fetch_bug_dictionary(bug_id), self)
def fetch_attachment_contents(self, attachment_id):
attachment_url = self.attachment_url_for_id(attachment_id)
# We need to authenticate to download patches from security bugs.
self.authenticate()
return self.browser.open(attachment_url).read()
def _parse_bug_id_from_attachment_page(self, page):
# The "Up" relation happens to point to the bug.
up_link = BeautifulSoup(page).find('link', rel='Up')
if not up_link:
# This attachment does not exist (or you don't have permissions to
# view it).
return None
match = re.search("show_bug.cgi\?id=(?P<bug_id>\d+)", up_link['href'])
return int(match.group('bug_id'))
def bug_id_for_attachment_id(self, attachment_id):
self.authenticate()
attachment_url = self.attachment_url_for_id(attachment_id, 'edit')
log("Fetching: %s" % attachment_url)
page = self.browser.open(attachment_url)
return self._parse_bug_id_from_attachment_page(page)
# FIXME: This should just return Attachment(id), which should be able to
# lazily fetch needed data.
def fetch_attachment(self, attachment_id):
# We could grab all the attachment details off of the attachment edit
# page but we already have working code to do so off of the bugs page,
# so re-use that.
bug_id = self.bug_id_for_attachment_id(attachment_id)
if not bug_id:
return None
attachments = self.fetch_bug(bug_id).attachments(include_obsolete=True)
for attachment in attachments:
if attachment.id() == int(attachment_id):
return attachment
return None # This should never be hit.
def authenticate(self):
if self.authenticated:
return
credentials = Credentials(config_urls.bug_server_host, git_prefix="bugzilla")
attempts = 0
while not self.authenticated:
attempts += 1
username, password = credentials.read_credentials()
log("Logging in as %s..." % username)
self.browser.open(config_urls.bug_server_url +
"index.cgi?GoAheadAndLogIn=1")
self.browser.select_form(name="login")
self.browser['Bugzilla_login'] = username
self.browser['Bugzilla_password'] = password
response = self.browser.submit()
match = re.search("<title>(.+?)</title>", response.read())
# If the resulting page has a title, and it contains the word
# "invalid" assume it's the login failure page.
if match and re.search("Invalid", match.group(1), re.IGNORECASE):
errorMessage = "Bugzilla login failed: %s" % match.group(1)
# raise an exception only if this was the last attempt
if attempts < 5:
log(errorMessage)
else:
raise Exception(errorMessage)
else:
self.authenticated = True
self.username = username
def _commit_queue_flag(self, mark_for_landing, mark_for_commit_queue):
if mark_for_landing:
return '+'
elif mark_for_commit_queue:
return '?'
return 'X'
# FIXME: mark_for_commit_queue and mark_for_landing should be joined into a single commit_flag argument.
def _fill_attachment_form(self,
description,
file_object,
mark_for_review=False,
mark_for_commit_queue=False,
mark_for_landing=False,
is_patch=False,
filename=None,
mimetype=None):
self.browser['description'] = description
if is_patch:
self.browser['ispatch'] = ("1",)
# FIXME: Should this use self._find_select_element_for_flag?
self.browser['flag_type-1'] = ('?',) if mark_for_review else ('X',)
self.browser['flag_type-3'] = (self._commit_queue_flag(mark_for_landing, mark_for_commit_queue),)
filename = filename or "%s.patch" % timestamp()
if not mimetype:
mimetypes.add_type('text/plain', '.patch') # Make sure mimetypes knows about .patch
mimetype, _ = mimetypes.guess_type(filename)
if not mimetype:
mimetype = "text/plain" # Bugzilla might auto-guess for us and we might not need this?
self.browser.add_file(file_object, mimetype, filename, 'data')
def _file_object_for_upload(self, file_or_string):
if hasattr(file_or_string, 'read'):
return file_or_string
# Only if file_or_string is not already encoded do we want to encode it.
if isinstance(file_or_string, unicode):
file_or_string = file_or_string.encode('utf-8')
return StringIO.StringIO(file_or_string)
# timestamp argument is just for unittests.
def _filename_for_upload(self, file_object, bug_id, extension="txt", timestamp=timestamp):
if hasattr(file_object, "name"):
return file_object.name
return "bug-%s-%s.%s" % (bug_id, timestamp(), extension)
def add_attachment_to_bug(self,
bug_id,
file_or_string,
description,
filename=None,
comment_text=None):
self.authenticate()
log('Adding attachment "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
self.browser.open(self.add_attachment_url(bug_id))
self.browser.select_form(name="entryform")
file_object = self._file_object_for_upload(file_or_string)
filename = filename or self._filename_for_upload(file_object, bug_id)
self._fill_attachment_form(description, file_object, filename=filename)
if comment_text:
log(comment_text)
self.browser['comment'] = comment_text
self.browser.submit()
# FIXME: The arguments to this function should be simplified and then
# this should be merged into add_attachment_to_bug
def add_patch_to_bug(self,
bug_id,
file_or_string,
description,
comment_text=None,
mark_for_review=False,
mark_for_commit_queue=False,
mark_for_landing=False):
self.authenticate()
log('Adding patch "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
self.browser.open(self.add_attachment_url(bug_id))
self.browser.select_form(name="entryform")
file_object = self._file_object_for_upload(file_or_string)
filename = self._filename_for_upload(file_object, bug_id, extension="patch")
self._fill_attachment_form(description,
file_object,
mark_for_review=mark_for_review,
mark_for_commit_queue=mark_for_commit_queue,
mark_for_landing=mark_for_landing,
is_patch=True,
filename=filename)
if comment_text:
log(comment_text)
self.browser['comment'] = comment_text
self.browser.submit()
# FIXME: There has to be a more concise way to write this method.
def _check_create_bug_response(self, response_html):
match = re.search("<title>Bug (?P<bug_id>\d+) Submitted</title>",
response_html)
if match:
return match.group('bug_id')
match = re.search(
'<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">',
response_html,
re.DOTALL)
error_message = "FAIL"
if match:
text_lines = BeautifulSoup(
match.group('error_message')).findAll(text=True)
error_message = "\n" + '\n'.join(
[" " + line.strip()
for line in text_lines if line.strip()])
raise Exception("Bug not created: %s" % error_message)
def create_bug(self,
bug_title,
bug_description,
component=None,
diff=None,
patch_description=None,
cc=None,
blocked=None,
assignee=None,
mark_for_review=False,
mark_for_commit_queue=False):
self.authenticate()
log('Creating bug with title "%s"' % bug_title)
self.browser.open(config_urls.bug_server_url + "enter_bug.cgi?product=WebKit")
self.browser.select_form(name="Create")
component_items = self.browser.find_control('component').items
component_names = map(lambda item: item.name, component_items)
if not component:
component = "New Bugs"
if component not in component_names:
component = User.prompt_with_list("Please pick a component:", component_names)
self.browser["component"] = [component]
if cc:
self.browser["cc"] = cc
if blocked:
self.browser["blocked"] = unicode(blocked)
if not assignee:
assignee = self.username
if assignee and not self.browser.find_control("assigned_to").disabled:
self.browser["assigned_to"] = assignee
self.browser["short_desc"] = bug_title
self.browser["comment"] = bug_description
if diff:
# _fill_attachment_form expects a file-like object
# Patch files are already binary, so no encoding needed.
assert(isinstance(diff, str))
patch_file_object = StringIO.StringIO(diff)
self._fill_attachment_form(
patch_description,
patch_file_object,
mark_for_review=mark_for_review,
mark_for_commit_queue=mark_for_commit_queue,
is_patch=True)
response = self.browser.submit()
bug_id = self._check_create_bug_response(response.read())
log("Bug %s created." % bug_id)
log("%sshow_bug.cgi?id=%s" % (config_urls.bug_server_url, bug_id))
return bug_id
def _find_select_element_for_flag(self, flag_name):
# FIXME: This will break if we ever re-order attachment flags
if flag_name == "review":
return self.browser.find_control(type='select', nr=0)
elif flag_name == "commit-queue":
return self.browser.find_control(type='select', nr=1)
raise Exception("Don't know how to find flag named \"%s\"" % flag_name)
def clear_attachment_flags(self,
attachment_id,
additional_comment_text=None):
self.authenticate()
comment_text = "Clearing flags on attachment: %s" % attachment_id
if additional_comment_text:
comment_text += "\n\n%s" % additional_comment_text
log(comment_text)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
self.browser.set_value(comment_text, name='comment', nr=0)
self._find_select_element_for_flag('review').value = ("X",)
self._find_select_element_for_flag('commit-queue').value = ("X",)
self.browser.submit()
def set_flag_on_attachment(self,
attachment_id,
flag_name,
flag_value,
comment_text=None,
additional_comment_text=None):
# FIXME: We need a way to test this function on a live bugzilla
# instance.
self.authenticate()
# FIXME: additional_comment_text seems useless and should be merged into comment-text.
if additional_comment_text:
comment_text += "\n\n%s" % additional_comment_text
log(comment_text)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
if comment_text:
self.browser.set_value(comment_text, name='comment', nr=0)
self._find_select_element_for_flag(flag_name).value = (flag_value,)
self.browser.submit()
# FIXME: All of these bug editing methods have a ridiculous amount of
# copy/paste code.
def obsolete_attachment(self, attachment_id, comment_text=None):
self.authenticate()
log("Obsoleting attachment: %s" % attachment_id)
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
self.browser.find_control('isobsolete').items[0].selected = True
# Also clear any review flag (to remove it from review/commit queues)
self._find_select_element_for_flag('review').value = ("X",)
self._find_select_element_for_flag('commit-queue').value = ("X",)
if comment_text:
log(comment_text)
# Bugzilla has two textareas named 'comment', one is somehow
# hidden. We want the first.
self.browser.set_value(comment_text, name='comment', nr=0)
self.browser.submit()
def add_cc_to_bug(self, bug_id, email_address_list):
self.authenticate()
log("Adding %s to the CC list for bug %s" % (email_address_list, bug_id))
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
self.browser["newcc"] = ", ".join(email_address_list)
self.browser.submit()
def post_comment_to_bug(self, bug_id, comment_text, cc=None):
self.authenticate()
log("Adding comment to bug %s" % bug_id)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
self.browser["comment"] = comment_text
if cc:
self.browser["newcc"] = ", ".join(cc)
self.browser.submit()
def close_bug_as_fixed(self, bug_id, comment_text=None):
self.authenticate()
log("Closing bug %s as fixed" % bug_id)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
if comment_text:
self.browser['comment'] = comment_text
self.browser['bug_status'] = ['RESOLVED']
self.browser['resolution'] = ['FIXED']
self.browser.submit()
def _has_control(self, form, id):
return id in [control.id for control in form.controls]
def reassign_bug(self, bug_id, assignee=None, comment_text=None):
self.authenticate()
if not assignee:
assignee = self.username
log("Assigning bug %s to %s" % (bug_id, assignee))
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
if not self._has_control(self.browser, "assigned_to"):
log("""Failed to assign bug to you (can't find assigned_to) control.
Do you have EditBugs privileges at bugs.webkit.org?
https://bugs.webkit.org/userprefs.cgi?tab=permissions
If not, you should email [email protected] or ask in #webkit
for someone to add EditBugs to your bugs.webkit.org account.""")
return
if comment_text:
log(comment_text)
self.browser["comment"] = comment_text
self.browser["assigned_to"] = assignee
self.browser.submit()
def reopen_bug(self, bug_id, comment_text):
self.authenticate()
log("Re-opening bug %s" % bug_id)
# Bugzilla requires a comment when re-opening a bug, so we know it will
# never be None.
log(comment_text)
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
bug_status = self.browser.find_control("bug_status", type="select")
# This is a hack around the fact that ClientForm.ListControl seems to
# have no simpler way to ask if a control has an item named "REOPENED"
# without using exceptions for control flow.
possible_bug_statuses = map(lambda item: item.name, bug_status.items)
if "REOPENED" in possible_bug_statuses:
bug_status.value = ["REOPENED"]
# If the bug was never confirmed it will not have a "REOPENED"
# state, but only an "UNCONFIRMED" state.
elif "UNCONFIRMED" in possible_bug_statuses:
bug_status.value = ["UNCONFIRMED"]
else:
# FIXME: This logic is slightly backwards. We won't print this
# message if the bug is already open with state "UNCONFIRMED".
log("Did not reopen bug %s, it appears to already be open with status %s." % (bug_id, bug_status.value))
self.browser['comment'] = comment_text
self.browser.submit()
| gpl-3.0 | 7,722,090,757,548,807,000 | 44.840336 | 229 | 0.618934 | false |
ChristophKirst/ClearMap | ClearMap/IO/NRRD.py | 1 | 20940 | #!/usr/bin/env python
# encoding: utf-8
"""
Interface to NRRD volumetric image data files.
The interface is based on nrrd.py, an all-python (and numpy)
implementation for reading and writing nrrd files.
See http://teem.sourceforge.net/nrrd/format.html for the specification.
Example:
>>> import os, numpy
>>> import ClearMap.Settings as settings
>>> import ClearMap.IO.NRRD as nrrd
>>> filename = os.path.join(settings.ClearMapPath, 'Test/Data/Nrrd/test.nrrd');
>>> data = nrrd.readData(filename);
>>> print data.shape
(20, 50, 10)
Author
""""""
Copyright 2011 Maarten Everts and David Hammond.
Modified to integrate into ClearMap framework by Christoph Kirst, The Rockefeller University, New York City, 2015
"""
import numpy as np
import gzip
import bz2
import os.path
from datetime import datetime
import ClearMap.IO as io
class NrrdError(Exception):
"""Exceptions for Nrrd class."""
pass
#This will help prevent loss of precision
#IEEE754-1985 standard says that 17 decimal digits is enough in all cases.
def _convert_to_reproducible_floatingpoint( x ):
if type(x) == float:
value = '{:.16f}'.format(x).rstrip('0').rstrip('.') # Remove trailing zeros, and dot if at end
else:
value = str(x)
return value
_TYPEMAP_NRRD2NUMPY = {
'signed char': 'i1',
'int8': 'i1',
'int8_t': 'i1',
'uchar': 'u1',
'unsigned char': 'u1',
'uint8': 'u1',
'uint8_t': 'u1',
'short': 'i2',
'short int': 'i2',
'signed short': 'i2',
'signed short int': 'i2',
'int16': 'i2',
'int16_t': 'i2',
'ushort': 'u2',
'unsigned short': 'u2',
'unsigned short int': 'u2',
'uint16': 'u2',
'uint16_t': 'u2',
'int': 'i4',
'signed int': 'i4',
'int32': 'i4',
'int32_t': 'i4',
'uint': 'u4',
'unsigned int': 'u4',
'uint32': 'u4',
'uint32_t': 'u4',
'longlong': 'i8',
'long long': 'i8',
'long long int': 'i8',
'signed long long': 'i8',
'signed long long int': 'i8',
'int64': 'i8',
'int64_t': 'i8',
'ulonglong': 'u8',
'unsigned long long': 'u8',
'unsigned long long int': 'u8',
'uint64': 'u8',
'uint64_t': 'u8',
'float': 'f4',
'double': 'f8',
'block': 'V'
}
_TYPEMAP_NUMPY2NRRD = {
'i1': 'int8',
'u1': 'uint8',
'i2': 'int16',
'u2': 'uint16',
'i4': 'int32',
'u4': 'uint32',
'i8': 'int64',
'u8': 'uint64',
'f4': 'float',
'f8': 'double',
'V': 'block'
}
_NUMPY2NRRD_ENDIAN_MAP = {
'<': 'little',
'L': 'little',
'>': 'big',
'B': 'big'
}
def parse_nrrdvector(inp):
"""Parse a vector from a nrrd header, return a list."""
assert inp[0] == '(', "Vector should be enclosed by parenthesis."
assert inp[-1] == ')', "Vector should be enclosed by parenthesis."
return [_convert_to_reproducible_floatingpoint(x) for x in inp[1:-1].split(',')]
def parse_optional_nrrdvector(inp):
"""Parse a vector from a nrrd header that can also be none."""
if (inp == "none"):
return inp
else:
return parse_nrrdvector(inp)
_NRRD_FIELD_PARSERS = {
'dimension': int,
'type': str,
'sizes': lambda fieldValue: [int(x) for x in fieldValue.split()],
'endian': str,
'encoding': str,
'min': float,
'max': float,
'oldmin': float,
'old min': float,
'oldmax': float,
'old max': float,
'lineskip': int,
'line skip': int,
'byteskip': int,
'byte skip': int,
'content': str,
'sample units': str,
'datafile': str,
'data file': str,
'spacings': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'thicknesses': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axis mins': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axismins': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axis maxs': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'axismaxs': lambda fieldValue: [_convert_to_reproducible_floatingpoint(x) for x in fieldValue.split()],
'centerings': lambda fieldValue: [str(x) for x in fieldValue.split()],
'labels': lambda fieldValue: [str(x) for x in fieldValue.split()],
'units': lambda fieldValue: [str(x) for x in fieldValue.split()],
'kinds': lambda fieldValue: [str(x) for x in fieldValue.split()],
'space': str,
'space dimension': int,
'space units': lambda fieldValue: [str(x) for x in fieldValue.split()],
'space origin': parse_nrrdvector,
'space directions': lambda fieldValue:
[parse_optional_nrrdvector(x) for x in fieldValue.split()],
'measurement frame': lambda fieldValue:
[parse_nrrdvector(x) for x in fieldValue.split()],
}
_NRRD_REQUIRED_FIELDS = ['dimension', 'type', 'encoding', 'sizes']
# The supported field values
_NRRD_FIELD_ORDER = [
'type',
'dimension',
'space dimension',
'space',
'sizes',
'space directions',
'kinds',
'endian',
'encoding',
'min',
'max',
'oldmin',
'old min',
'oldmax',
'old max',
'content',
'sample units',
'spacings',
'thicknesses',
'axis mins',
'axismins',
'axis maxs',
'axismaxs',
'centerings',
'labels',
'units',
'space units',
'space origin',
'measurement frame',
'data file']
def _determine_dtype(fields):
"""Determine the numpy dtype of the data."""
# Check whether the required fields are there
for field in _NRRD_REQUIRED_FIELDS:
if field not in fields:
raise NrrdError('Nrrd header misses required field: "%s".' % (field))
# Process the data type
np_typestring = _TYPEMAP_NRRD2NUMPY[fields['type']]
if np.dtype(np_typestring).itemsize > 1:
if 'endian' not in fields:
raise NrrdError('Nrrd header misses required field: "endian".')
if fields['endian'] == 'big':
np_typestring = '>' + np_typestring
elif fields['endian'] == 'little':
np_typestring = '<' + np_typestring
return np.dtype(np_typestring)
def _read_data(fields, filehandle, filename=None):
"""Read the actual data into a numpy structure."""
data = np.zeros(0)
# Determine the data type from the fields
dtype = _determine_dtype(fields)
# determine byte skip, line skip, and data file (there are two ways to write them)
lineskip = fields.get('lineskip', fields.get('line skip', 0))
byteskip = fields.get('byteskip', fields.get('byte skip', 0))
datafile = fields.get("datafile", fields.get("data file", None))
datafilehandle = filehandle
if datafile is not None:
# If the datafile path is absolute, don't muck with it. Otherwise
# treat the path as relative to the directory in which the detached
# header is in
if os.path.isabs(datafile):
datafilename = datafile
else:
datafilename = os.path.join(os.path.dirname(filename), datafile)
datafilehandle = open(datafilename,'rb')
numPixels=np.array(fields['sizes']).prod()
totalbytes = dtype.itemsize * numPixels
if fields['encoding'] == 'raw':
if byteskip == -1: # This is valid only with raw encoding
datafilehandle.seek(-totalbytes, 2)
else:
for _ in range(lineskip):
datafilehandle.readline()
datafilehandle.read(byteskip)
data = np.fromfile(datafilehandle, dtype)
elif fields['encoding'] == 'gzip' or\
fields['encoding'] == 'gz':
gzipfile = gzip.GzipFile(fileobj=datafilehandle)
# Again, unfortunately, np.fromfile does not support
# reading from a gzip stream, so we'll do it like this.
# I have no idea what the performance implications are.
data = np.fromstring(gzipfile.read(), dtype)
elif fields['encoding'] == 'bzip2' or\
fields['encoding'] == 'bz2':
bz2file = bz2.BZ2File(fileobj=datafilehandle)
# Again, unfortunately, np.fromfile does not support
# reading from a gzip stream, so we'll do it like this.
# I have no idea what the performance implications are.
data = np.fromstring(bz2file.read(), dtype)
else:
raise NrrdError('Unsupported encoding: "%s"' % fields['encoding'])
if numPixels != data.size:
raise NrrdError('ERROR: {0}-{1}={2}'.format(numPixels,data.size,numPixels-data.size))
# dkh : eliminated need to reverse order of dimensions. nrrd's
# data layout is same as what numpy calls 'Fortran' order,
shape_tmp = list(fields['sizes'])
data = np.reshape(data, tuple(shape_tmp), order='F')
return data
def _validate_magic_line(line):
"""For NRRD files, the first four characters are always "NRRD", and
remaining characters give information about the file format version
"""
if not line.startswith('NRRD'):
raise NrrdError('Missing magic "NRRD" word. Is this an NRRD file?')
try:
if int(line[4:]) > 5:
raise NrrdError('NRRD file version too new for this library.')
except:
raise NrrdError('Invalid NRRD magic line: %s' % (line,))
return len(line)
def readHeader(filename):
"""Parse the fields in the nrrd header
nrrdfile can be any object which supports the iterator protocol and
returns a string each time its next() method is called — file objects and
list objects are both suitable. If csvfile is a file object, it must be
opened with the ‘b’ flag on platforms where that makes a difference
(e.g. Windows)
>>> readHeader(("NRRD0005", "type: float", "dimension: 3"))
{'type': 'float', 'dimension': 3, 'keyvaluepairs': {}}
>>> readHeader(("NRRD0005", "my extra info:=my : colon-separated : values"))
{'keyvaluepairs': {'my extra info': 'my : colon-separated : values'}}
"""
if isinstance(filename, basestring):
nrrdfile = open(filename,'rb');
else:
nrrdfile = filename;
# Collect number of bytes in the file header (for seeking below)
headerSize = 0
it = iter(nrrdfile)
headerSize += _validate_magic_line(next(it).decode('ascii'))
header = { 'keyvaluepairs': {} }
for raw_line in it:
headerSize += len(raw_line)
raw_line = raw_line.decode('ascii')
# Trailing whitespace ignored per the NRRD spec
line = raw_line.rstrip()
# Comments start with '#', no leading whitespace allowed
if line.startswith('#'):
continue
# Single blank line separates the header from the data
if line == '':
break
# Handle the <key>:=<value> lines first since <value> may contain a
# ': ' which messes up the <field>: <desc> parsing
key_value = line.split(':=', 1)
if len(key_value) is 2:
key, value = key_value
# TODO: escape \\ and \n ??
# value.replace(r'\\\\', r'\\').replace(r'\n', '\n')
header['keyvaluepairs'][key] = value
continue
# Handle the "<field>: <desc>" lines.
field_desc = line.split(': ', 1)
if len(field_desc) is 2:
field, desc = field_desc
## preceeding and suffixing white space should be ignored.
field = field.rstrip().lstrip()
desc = desc.rstrip().lstrip()
if field not in _NRRD_FIELD_PARSERS:
raise NrrdError('Unexpected field in nrrd header: "%s".' % field)
if field in header.keys():
raise NrrdError('Duplicate header field: "%s"' % field)
header[field] = _NRRD_FIELD_PARSERS[field](desc)
continue
# Should not reach here
raise NrrdError('Invalid header line: "%s"' % line)
# line reading was buffered; correct file pointer to just behind header:
nrrdfile.seek(headerSize)
return header
def readData(filename, **args):
"""Read nrrd file image data
Arguments:
filename (str): file name as regular expression
x,y,z (tuple): data range specifications
Returns:
array: image data
"""
with open(filename,'rb') as filehandle:
header = readHeader(filehandle)
#print header
data = _read_data(header, filehandle, filename)
#return (data, header)
#return data.transpose([1,0,2]);
data = io.readData(data, **args);
return data;
def _format_nrrd_list(fieldValue) :
return ' '.join([_convert_to_reproducible_floatingpoint(x) for x in fieldValue])
def _format_nrrdvector(v) :
return '(' + ','.join([_convert_to_reproducible_floatingpoint(x) for x in v]) + ')'
def _format_optional_nrrdvector(v):
if (v == 'none') :
return 'none'
else :
return _format_nrrdvector(v)
_NRRD_FIELD_FORMATTERS = {
'dimension': str,
'type': str,
'sizes': _format_nrrd_list,
'endian': str,
'encoding': str,
'min': str,
'max': str,
'oldmin': str,
'old min': str,
'oldmax': str,
'old max': str,
'lineskip': str,
'line skip': str,
'byteskip': str,
'byte skip': str,
'content': str,
'sample units': str,
'datafile': str,
'data file': str,
'spacings': _format_nrrd_list,
'thicknesses': _format_nrrd_list,
'axis mins': _format_nrrd_list,
'axismins': _format_nrrd_list,
'axis maxs': _format_nrrd_list,
'axismaxs': _format_nrrd_list,
'centerings': _format_nrrd_list,
'labels': _format_nrrd_list,
'units': _format_nrrd_list,
'kinds': _format_nrrd_list,
'space': str,
'space dimension': str,
'space units': _format_nrrd_list,
'space origin': _format_nrrdvector,
'space directions': lambda fieldValue: ' '.join([_format_optional_nrrdvector(x) for x in fieldValue]),
'measurement frame': lambda fieldValue: ' '.join([_format_optional_nrrdvector(x) for x in fieldValue]),
}
def _write_data(data, filehandle, options):
# Now write data directly
#rawdata = data.transpose([2,0,1]).tostring(order = 'C')
rawdata = data.transpose([2,1,0]).tostring(order = 'C');
if options['encoding'] == 'raw':
filehandle.write(rawdata)
elif options['encoding'] == 'gzip':
gzfileobj = gzip.GzipFile(fileobj = filehandle)
gzfileobj.write(rawdata)
gzfileobj.close()
elif options['encoding'] == 'bz2':
bz2fileobj = bz2.BZ2File(fileobj = filehandle)
bz2fileobj.write(rawdata)
bz2fileobj.close()
else:
raise NrrdError('Unsupported encoding: "%s"' % options['encoding'])
def writeData(filename, data, options={}, separateHeader=False, x = all, y = all, z = all):
"""Write data to nrrd file
Arguments:
filename (str): file name as regular expression
data (array): image data
options (dict): options dictionary
separateHeader (bool): write a separate header file
Returns:
str: nrrd output file name
To sample data use `options['spacings'] = [s1, s2, s3]` for
3d data with sampling deltas `s1`, `s2`, and `s3` in each dimension.
"""
data = io.dataToRange(data, x = x, y = y, z = z);
# Infer a number of fields from the ndarray and ignore values
# in the options dictionary.
options['type'] = _TYPEMAP_NUMPY2NRRD[data.dtype.str[1:]]
if data.dtype.itemsize > 1:
options['endian'] = _NUMPY2NRRD_ENDIAN_MAP[data.dtype.str[:1]]
# if 'space' is specified 'space dimension' can not. See http://teem.sourceforge.net/nrrd/format.html#space
if 'space' in options.keys() and 'space dimension' in options.keys():
del options['space dimension']
options['dimension'] = data.ndim
dsize = list(data.shape);
#dsize[0:2] = [dsize[1], dsize[0]];
options['sizes'] = dsize;
# The default encoding is 'gzip'
if 'encoding' not in options:
options['encoding'] = 'gzip'
# A bit of magic in handling options here.
# If *.nhdr filename provided, this overrides `separate_header=False`
# If *.nrrd filename provided AND separate_header=True, separate files
# written.
# For all other cases, header & data written to same file.
if filename[-5:] == '.nhdr':
separate_header = True
if 'data file' not in options:
datafilename = filename[:-4] + str('raw')
if options['encoding'] == 'gzip':
datafilename += '.gz'
options['data file'] = datafilename
else:
datafilename = options['data file']
elif filename[-5:] == '.nrrd' and separate_header:
separate_header = True
datafilename = filename
filename = filename[:-4] + str('nhdr')
else:
# Write header & data as one file
datafilename = filename;
separate_header = False;
with open(filename,'wb') as filehandle:
filehandle.write(b'NRRD0005\n')
filehandle.write(b'# This NRRD file was generated by pynrrd\n')
filehandle.write(b'# on ' +
datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S').encode('ascii') +
b'(GMT).\n')
filehandle.write(b'# Complete NRRD file format specification at:\n');
filehandle.write(b'# http://teem.sourceforge.net/nrrd/format.html\n');
# Write the fields in order, this ignores fields not in _NRRD_FIELD_ORDER
for field in _NRRD_FIELD_ORDER:
if field in options:
outline = (field + ': ' +
_NRRD_FIELD_FORMATTERS[field](options[field]) +
'\n').encode('ascii')
filehandle.write(outline)
d = options.get('keyvaluepairs', {})
for (k,v) in sorted(d.items(), key=lambda t: t[0]):
outline = (str(k) + ':=' + str(v) + '\n').encode('ascii')
filehandle.write(outline)
# Write the closing extra newline
filehandle.write(b'\n')
# If a single file desired, write data
if not separate_header:
_write_data(data, filehandle, options)
# If separate header desired, write data to different file
if separate_header:
with open(datafilename, 'wb') as datafilehandle:
_write_data(data, datafilehandle, options)
return filename;
def dataSize(filename, **args):
"""Read data size from nrrd image
Arguments:
filename (str): file name as regular expression
x,y,z (tuple): data range specifications
Returns:
tuple: data size
"""
header = readHeader(filename);
dims = header['sizes'];
#dims[0:2] = [dims[1], dims[0]];
return io.dataSizeFromDataRange(dims, **args);
def dataZSize(filename, z = all, **args):
"""Read data z size from nrrd image
Arguments:
filename (str): file name as regular expression
z (tuple): z data range specification
Returns:
int: z data size
"""
header = readHeader(filename);
dims = header['sizes'];
if len(dims) > 2:
return io.toDataSize(dims[2], r = z);
else:
return None;
def copyData(source, sink):
"""Copy an nrrd file from source to sink
Arguments:
source (str): file name pattern of source
sink (str): file name pattern of sink
Returns:
str: file name of the copy
Notes:
Todo: dealt with nrdh header files!
"""
io.copyFile(source, sink);
def test():
"""Test NRRD IO module"""
import ClearMap.IO.NRRD as self
reload(self)
from ClearMap.Settings import ClearMapPath
import os
import numpy
"""Test NRRD module"""
basedir = ClearMapPath;
fn = os.path.join(basedir, 'Test/Data/Nrrd/test.nrrd')
data = numpy.random.rand(20,50,10);
data[5:15, 20:45, 2:9] = 0;
reload(self)
print "writing nrrd image to: " + fn;
self.writeData(fn, data);
ds = self.dataSize(fn);
print "dataSize: %s" % str(ds);
print "Loading raw image from: " + fn;
img = self.readData(fn);
print "Image size: " + str(img.shape)
diff = img - data;
print (diff.max(), diff.min())
#some uint type
print "writing raw image to: " + fn;
udata = data * 10;
udata = udata.astype('uint16');
self.writeData(fn, udata);
print "Loading raw image from: " + fn;
img = self.readData(fn);
print "Image size: " + str(img.shape)
diff = img - udata;
print (diff.max(), diff.min())
#dataSize
print "dataSize is %s" % str(self.dataSize(fn))
print "dataZSize is %s" % str(self.dataZSize(fn))
if __name__ == "__main__":
test();
| gpl-3.0 | -2,766,026,941,562,460,000 | 31.009174 | 117 | 0.596542 | false |
weidongxu84/info-gatherer | django/contrib/auth/tests/models.py | 1 | 3906 | from django.conf import settings
from django.test import TestCase
from django.contrib.auth.models import (Group, User,
SiteProfileNotAvailable, UserManager)
class ProfileTestCase(TestCase):
fixtures = ['authtestdata.json']
def setUp(self):
"""Backs up the AUTH_PROFILE_MODULE"""
self.old_AUTH_PROFILE_MODULE = getattr(settings,
'AUTH_PROFILE_MODULE', None)
def tearDown(self):
"""Restores the AUTH_PROFILE_MODULE -- if it was not set it is deleted,
otherwise the old value is restored"""
if self.old_AUTH_PROFILE_MODULE is None and \
hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
if self.old_AUTH_PROFILE_MODULE is not None:
settings.AUTH_PROFILE_MODULE = self.old_AUTH_PROFILE_MODULE
def test_site_profile_not_available(self):
# calling get_profile without AUTH_PROFILE_MODULE set
if hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
user = User.objects.get(username='testclient')
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# Bad syntax in AUTH_PROFILE_MODULE:
settings.AUTH_PROFILE_MODULE = 'foobar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# module that doesn't exist
settings.AUTH_PROFILE_MODULE = 'foo.bar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
class NaturalKeysTestCase(TestCase):
fixtures = ['authtestdata.json']
def test_user_natural_key(self):
staff_user = User.objects.get(username='staff')
self.assertEquals(User.objects.get_by_natural_key('staff'), staff_user)
self.assertEquals(staff_user.natural_key(), ('staff',))
def test_group_natural_key(self):
users_group = Group.objects.create(name='users')
self.assertEquals(Group.objects.get_by_natural_key('users'), users_group)
class LoadDataWithoutNaturalKeysTestCase(TestCase):
fixtures = ['regular.json']
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username='my_username')
group = Group.objects.get(name='my_group')
self.assertEquals(group, user.groups.get())
class LoadDataWithNaturalKeysTestCase(TestCase):
fixtures = ['natural.json']
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username='my_username')
group = Group.objects.get(name='my_group')
self.assertEquals(group, user.groups.get())
class UserManagerTestCase(TestCase):
def test_create_user(self):
email_lowercase = '[email protected]'
user = User.objects.create_user('user', email_lowercase)
self.assertEquals(user.email, email_lowercase)
self.assertEquals(user.username, 'user')
self.assertEquals(user.password, '!')
def test_create_user_email_domain_normalize_rfc3696(self):
# According to http://tools.ietf.org/html/rfc3696#section-3
# the "@" symbol can be part of the local part of an email address
returned = UserManager.normalize_email(r'Abc\@[email protected]')
self.assertEquals(returned, r'Abc\@[email protected]')
def test_create_user_email_domain_normalize(self):
returned = UserManager.normalize_email('[email protected]')
self.assertEquals(returned, '[email protected]')
def test_create_user_email_domain_normalize_with_whitespace(self):
returned = UserManager.normalize_email('email\ [email protected]')
self.assertEquals(returned, 'email\ [email protected]')
def test_empty_username(self):
self.assertRaisesMessage(ValueError,
'The given username must be set',
User.objects.create_user, username='')
| mit | -7,884,866,480,122,510,000 | 38.857143 | 81 | 0.666155 | false |
rplevka/robottelo | tests/foreman/cli/test_katello_agent.py | 1 | 11216 | """CLI tests for ``katello-agent``.
:Requirement: Host
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: Katello-agent
:Assignee: gtalreja
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import time
import pytest
from broker import VMBroker
from robottelo.api.utils import wait_for_errata_applicability_task
from robottelo.cli.activationkey import ActivationKey
from robottelo.cli.factory import make_activation_key
from robottelo.cli.factory import make_host_collection
from robottelo.cli.factory import setup_org_for_a_custom_repo
from robottelo.cli.factory import setup_org_for_a_rh_repo
from robottelo.cli.host import Host
from robottelo.cli.hostcollection import HostCollection
from robottelo.constants import FAKE_0_CUSTOM_PACKAGE_GROUP
from robottelo.constants import FAKE_0_CUSTOM_PACKAGE_GROUP_NAME
from robottelo.constants import FAKE_0_CUSTOM_PACKAGE_NAME
from robottelo.constants import FAKE_1_CUSTOM_PACKAGE
from robottelo.constants import FAKE_1_CUSTOM_PACKAGE_NAME
from robottelo.constants import FAKE_1_ERRATA_ID
from robottelo.constants import FAKE_2_CUSTOM_PACKAGE
from robottelo.constants import FAKE_2_CUSTOM_PACKAGE_NAME
from robottelo.constants import PRDS
from robottelo.constants import REPOS
from robottelo.constants import REPOSET
from robottelo.constants.repos import FAKE_1_YUM_REPO
from robottelo.hosts import ContentHost
pytestmark = [pytest.mark.skip_if_not_set('clients', 'fake_manifest')]
@pytest.fixture(scope='module')
def katello_agent_repos(module_ak, module_cv, module_lce, module_org):
"""Create Org, Lifecycle Environment, Content View, Activation key"""
setup_org_for_a_rh_repo(
{
'product': PRDS['rhel'],
'repository-set': REPOSET['rhst7'],
'repository': REPOS['rhst7']['name'],
'organization-id': module_org.id,
'content-view-id': module_cv.id,
'lifecycle-environment-id': module_lce.id,
'activationkey-id': module_ak.id,
}
)
# Create custom repository content
setup_org_for_a_custom_repo(
{
'url': FAKE_1_YUM_REPO,
'organization-id': module_org.id,
'content-view-id': module_cv.id,
'lifecycle-environment-id': module_lce.id,
'activationkey-id': module_ak.id,
}
)
return {
'ak': module_ak,
'cv': module_cv,
'lce': module_lce,
'org': module_org,
}
@pytest.fixture
def katello_agent_client(katello_agent_repos, rhel7_contenthost):
rhel7_contenthost.install_katello_ca()
# Register content host and install katello-agent
rhel7_contenthost.register_contenthost(
katello_agent_repos['org'].label,
katello_agent_repos['ak'].name,
)
assert rhel7_contenthost.subscribed
host_info = Host.info({'name': rhel7_contenthost.hostname})
rhel7_contenthost.enable_repo(REPOS['rhst7']['id'])
rhel7_contenthost.install_katello_agent()
yield {'client': rhel7_contenthost, 'host_info': host_info}
@pytest.mark.tier3
def test_positive_get_errata_info(katello_agent_client):
"""Get errata info
:id: afb5ab34-1703-49dc-8ddc-5e032c1b86d7
:expectedresults: Errata info was displayed
:CaseLevel: System
"""
client = katello_agent_client['client']
host_info = katello_agent_client['host_info']
client.run(f'yum install -y {FAKE_1_CUSTOM_PACKAGE}')
result = Host.errata_info({'host-id': host_info['id'], 'id': FAKE_1_ERRATA_ID})
assert result[0]['errata-id'] == FAKE_1_ERRATA_ID
assert FAKE_2_CUSTOM_PACKAGE in result[0]['packages']
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_apply_errata(katello_agent_client):
"""Apply errata to a host
:id: 8d0e5c93-f9fd-4ec0-9a61-aa93082a30c5
:expectedresults: Errata is scheduled for installation
:CaseLevel: System
"""
client = katello_agent_client['client']
host_info = katello_agent_client['host_info']
client.run(f'yum install -y {FAKE_1_CUSTOM_PACKAGE}')
Host.errata_apply({'errata-ids': FAKE_1_ERRATA_ID, 'host-id': host_info['id']})
@pytest.mark.tier3
def test_positive_apply_security_erratum(katello_agent_client):
"""Apply security erratum to a host
:id: 4d1095c8-d354-42ac-af44-adf6dbb46deb
:expectedresults: erratum is recognized by the
`yum update --security` command on client
:CaseLevel: System
:BZ: 1420671, 1740790
"""
client = katello_agent_client['client']
host_info = katello_agent_client['host_info']
client.download_install_rpm(FAKE_1_YUM_REPO, FAKE_2_CUSTOM_PACKAGE)
# Check the system is up to date
result = client.run('yum update --security | grep "No packages needed for security"')
assert result.status == 0
before_downgrade = int(time.time())
# Downgrade walrus package
client.run(f'yum downgrade -y {FAKE_2_CUSTOM_PACKAGE_NAME}')
# Wait for errata applicability cache is counted
wait_for_errata_applicability_task(int(host_info['id']), before_downgrade)
# Check that host has applicable errata
host_errata = Host.errata_list({'host-id': host_info['id']})
assert host_errata[0]['erratum-id'] == FAKE_1_ERRATA_ID
assert host_errata[0]['installable'] == 'true'
# Check the erratum becomes available
result = client.run(
'yum update --assumeno --security | grep "No packages needed for security"'
)
assert result.status == 1
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_install_package(katello_agent_client):
"""Install a package to a host remotely
:id: b1009bba-0c7e-4b00-8ac4-256e5cfe4a78
:expectedresults: Package was successfully installed
:CaseLevel: System
"""
client = katello_agent_client['client']
host_info = katello_agent_client['host_info']
Host.package_install({'host-id': host_info['id'], 'packages': FAKE_0_CUSTOM_PACKAGE_NAME})
result = client.run(f'rpm -q {FAKE_0_CUSTOM_PACKAGE_NAME}')
assert result.status == 0
@pytest.mark.tier3
def test_positive_remove_package(katello_agent_client):
"""Remove a package from a host remotely
:id: 573dec11-8f14-411f-9e41-84426b0f23b5
:expectedresults: Package was successfully removed
:CaseLevel: System
"""
client = katello_agent_client['client']
host_info = katello_agent_client['host_info']
client.run(f'yum install -y {FAKE_1_CUSTOM_PACKAGE}')
Host.package_remove({'host-id': host_info['id'], 'packages': FAKE_1_CUSTOM_PACKAGE_NAME})
result = client.run(f'rpm -q {FAKE_1_CUSTOM_PACKAGE_NAME}')
assert result.status != 0
@pytest.mark.tier3
def test_positive_upgrade_package(katello_agent_client):
"""Upgrade a host package remotely
:id: ad751c63-7175-40ae-8bc4-800462cd9c29
:expectedresults: Package was successfully upgraded
:CaseLevel: System
"""
client = katello_agent_client['client']
host_info = katello_agent_client['host_info']
client.run(f'yum install -y {FAKE_1_CUSTOM_PACKAGE}')
Host.package_upgrade({'host-id': host_info['id'], 'packages': FAKE_1_CUSTOM_PACKAGE_NAME})
result = client.run(f'rpm -q {FAKE_2_CUSTOM_PACKAGE}')
assert result.status == 0
@pytest.mark.tier3
def test_positive_upgrade_packages_all(katello_agent_client):
"""Upgrade all the host packages remotely
:id: 003101c7-bb95-4e51-a598-57977b2858a9
:expectedresults: Packages (at least 1 with newer version available)
were successfully upgraded
:CaseLevel: System
"""
client = katello_agent_client['client']
host_info = katello_agent_client['host_info']
client.run(f'yum install -y {FAKE_1_CUSTOM_PACKAGE}')
Host.package_upgrade_all({'host-id': host_info['id']})
result = client.run(f'rpm -q {FAKE_2_CUSTOM_PACKAGE}')
assert result.status == 0
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_install_and_remove_package_group(katello_agent_client):
"""Install and remove a package group to a host remotely
:id: ded20a89-cfd9-48d5-8829-739b1a4d4042
:expectedresults: Package group was successfully installed
and removed
:CaseLevel: System
"""
client = katello_agent_client['client']
host_info = katello_agent_client['host_info']
hammer_args = {'groups': FAKE_0_CUSTOM_PACKAGE_GROUP_NAME, 'host-id': host_info['id']}
Host.package_group_install(hammer_args)
for package in FAKE_0_CUSTOM_PACKAGE_GROUP:
result = client.run(f'rpm -q {package}')
assert result.status == 0
Host.package_group_remove(hammer_args)
for package in FAKE_0_CUSTOM_PACKAGE_GROUP:
result = client.run(f'rpm -q {package}')
assert result.status != 0
@pytest.mark.tier3
def test_negative_unregister_and_pull_content(katello_agent_client):
"""Attempt to retrieve content after host has been unregistered from Satellite
:id: de0d0d91-b1e1-4f0e-8a41-c27df4d6b6fd
:expectedresults: Host can no longer retrieve content from satellite
:CaseLevel: System
"""
client = katello_agent_client['client']
result = client.run('subscription-manager unregister')
assert result.status == 0
result = client.run(f'yum install -y {FAKE_1_CUSTOM_PACKAGE}')
assert result.status != 0
@pytest.mark.tier3
@pytest.mark.upgrade
def test_positive_register_host_ak_with_host_collection(
katello_agent_client, module_cv, module_lce, module_org, rhel7_contenthost
):
"""Attempt to register a host using activation key with host collection
:id: 7daf4e40-3fa6-42af-b3f7-1ca1a5c9bfeb
:BZ: 1385814
:expectedresults: Host successfully registered and listed in host
collection
:CaseLevel: System
"""
# client = katello_agent_client['client']
host_info = katello_agent_client['host_info']
# create a new activation key
activation_key = make_activation_key(
{
'lifecycle-environment-id': module_lce.id,
'organization-id': module_org.id,
'content-view-id': module_cv.id,
}
)
hc = make_host_collection({'organization-id': module_org.id})
ActivationKey.add_host_collection(
{
'id': activation_key['id'],
'organization-id': module_org.id,
'host-collection-id': hc['id'],
}
)
# add the registered instance host to collection
HostCollection.add_host(
{'id': hc['id'], 'organization-id': module_org.id, 'host-ids': host_info['id']}
)
with VMBroker(nick='rhel7', host_classes={'host': ContentHost}) as vm:
vm.install_katello_ca()
# register the client host with the current activation key
vm.register_contenthost(module_org.name, activation_key=activation_key['name'])
assert vm.subscribed
# note: when registering the host, it should be automatically added to the host-collection
client_host = Host.info({'name': vm.hostname})
hosts = HostCollection.hosts({'id': hc['id'], 'organization-id': module_org.id})
assert len(hosts) == 2
expected_hosts_ids = {host_info['id'], client_host['id']}
hosts_ids = {host['id'] for host in hosts}
assert hosts_ids == expected_hosts_ids
| gpl-3.0 | -6,850,693,793,248,222,000 | 32.783133 | 98 | 0.684647 | false |
mdevaev/slog | src/remote.py | 1 | 1202 | # -*- mode: python; coding: utf-8; -*-
import dbus
import dbus.service, dbus.mainloop.glib
class Remote:
def __init__(self):
bus = dbus.SessionBus()
slog_obj = bus.get_object("org.LightLang.SLog", "/SLog")
self.iface = dbus.Interface(slog_obj, "org.LightLang.SLogInterface")
def __spy_toggle(self):
self.iface.spy_toggle()
def __window_toggle(self):
self.iface.toggle()
def __show(self):
self.iface.show()
def execute(self, cmd):
if cmd == "toggle":
self.__window_toggle()
elif cmd == "spy-toggle":
self.__spy_toggle()
elif cmd == "show":
self.__show()
class SLogDBus(dbus.service.Object):
def __init__(self, interface, obj_path = "/SLog"):
self.interface = interface
bus = dbus.SessionBus()
bus_name = dbus.service.BusName("org.LightLang.SLog", bus)
dbus.service.Object.__init__(self, bus_name, obj_path)
@dbus.service.method("org.LightLang.SLogInterface")
def spy_toggle(self):
self.interface.spy_action.activate()
@dbus.service.method("org.LightLang.SLogInterface")
def toggle(self):
self.interface.window_toggle()
@dbus.service.method("org.LightLang.SLogInterface")
def show(self):
self.interface.hide()
self.interface.app_show()
| gpl-2.0 | -6,164,688,821,272,691,000 | 24.041667 | 70 | 0.682196 | false |
ovnicraft/server-tools | base_locale_uom_default/tests/test_res_lang.py | 1 | 1605 | # -*- coding: utf-8 -*-
# Copyright 2017 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo.tests.common import TransactionCase
from odoo.exceptions import ValidationError
class TestResLang(TransactionCase):
def setUp(self):
super(TestResLang, self).setUp()
self.lang = self.env.ref('base.lang_en')
self.env.user.lang = self.lang.code
self.uom = self.env.ref('product.product_uom_dozen')
self.lang.default_uom_ids = [(6, 0, self.uom.ids)]
def test_check_default_uom_ids_fail(self):
"""It should not allow multiple UoMs of the same category."""
with self.assertRaises(ValidationError):
self.lang.default_uom_ids = [
(4, self.env.ref('product.product_uom_unit').id),
]
def test_check_default_uom_ids_pass(self):
"""It should allow multiple UoMs of different categories."""
self.lang.default_uom_ids = [
(4, self.env.ref('product.product_uom_kgm').id),
]
self.assertEqual(len(self.lang.default_uom_ids), 2)
def test_default_uom_by_category_exist(self):
"""It should return the default UoM if existing."""
self.assertEqual(
self.env['res.lang'].default_uom_by_category('Unit'),
self.uom,
)
def test_default_uom_by_category_no_exist(self):
"""It should return empty recordset when no default UoM."""
self.assertEqual(
self.env['res.lang'].default_uom_by_category('Volume'),
self.env['product.uom'].browse(),
)
| agpl-3.0 | 4,553,754,772,150,179,000 | 35.477273 | 69 | 0.61433 | false |
jabumaho/MNIST-neural-network | network.py | 1 | 3424 | import numpy as np
def sgm(x, derivative=False):
if not derivative:
return 1/(1+np.exp(-x))
else:
return sgm(x) * (1 - sgm(x))
def linear(x, derivative=False):
if not derivative:
return x
else:
return 1
class NeuralNetwork:
layerCount = 0
shape = None
weights = []
layerTransferFunc = []
def __init__(self, layerSize, layerTransferFunc=None):
self.layerCount = len(layerSize) - 1
self.shape = layerSize
self._layerInput = []
self._layerOutput = []
self._previousWeightDelta = []
for (l1, l2) in zip(layerSize[:-1], layerSize[1:]):
self.weights.append(np.random.normal(scale=0.1, size=(l2, l1 + 1)))
self._previousWeightDelta.append(np.zeros(shape=(l2, l1 + 1)))
if layerTransferFunc is None:
layerTransferFunc = []
for i in range(self.layerCount):
if i == self.layerCount - 1:
layerTransferFunc.append(sgm)
else:
layerTransferFunc.append(sgm)
else:
if len(layerTransferFunc) != len(layerSize):
raise ValueError("Incompatible no of transfer functions.")
elif layerTransferFunc[0] is not None:
raise ValueError("no transfer functions for input layer.")
else:
layerTransferFunc = layerTransferFunc[1:]
self.layerTransferFunc = layerTransferFunc
def run(self, inputr):
lnCases = inputr.shape[0]
self._layerInput = []
self._layerOutput = []
for i in range(self.layerCount):
if i == 0:
layerInput = self.weights[0].dot(np.vstack([inputr.T, np.ones([1, lnCases])]))
else:
layerInput = self.weights[i].dot(np.vstack([self._layerOutput[-1], np.ones([1, lnCases])]))
self._layerInput.append(layerInput)
self._layerOutput.append(self.layerTransferFunc[i](layerInput))
return self._layerOutput[-1].T
def trainEpoch(self, inputt, target, trainingRate=0.5, momentum=0.5):
delta = []
lnCases = inputt.shape[0]
self.run(inputt)
for i in reversed(range(self.layerCount)):
if i == self.layerCount - 1:
output_delta = self._layerOutput[i] - target.T
error = 0.5 * np.sum(output_delta**2)
delta.append(output_delta * self.layerTransferFunc[i](self._layerInput[i], True))
else:
deltaPullback = self.weights[i + 1].T.dot(delta[-1])
delta.append(deltaPullback[:-1, :] * self.layerTransferFunc[i](self._layerInput[i], True))
for i in range(self.layerCount):
deltaIndex = self.layerCount - 1 - i
if i == 0:
layerOutput = np.vstack([inputt.T, np.ones([1, lnCases])])
else:
layerOutput = np.vstack([self._layerOutput[i - 1], np.ones([1, self._layerOutput[i - 1].shape[1]])])
currentweightDelta = np.sum(layerOutput[None, :, :].transpose(2, 0, 1) * delta[deltaIndex][None, :, :].transpose(2, 1, 0), axis=0)
weightDelta = trainingRate * currentweightDelta + momentum * self._previousWeightDelta[i]
self.weights[i] -= weightDelta
self._previousWeightDelta[i] = weightDelta
return error
def test_network(self, inputtest, target):
self.run(inputtest)
output_delta = self._layerOutput[self.layerCount - 1] - target.T
return 0.5 * np.sum(output_delta**2)
def nudge(self, scale):
for i in xrange(len(self.weights)):
for j in xrange(len(self.weights[i])):
for k in xrange(len(self.weights[i][j])):
w = self.weights[i][j][k]
w *= scale
u = np.random.normal(scale=abs(w))
self.weights[i][j][k] += u
| gpl-3.0 | -1,273,426,215,257,752,800 | 28.035088 | 133 | 0.645736 | false |
danclaudiupop/pyhackernews | hn/core.py | 1 | 6927 | import os
import urwid
import subprocess
import ConfigParser
from hnapi import HN
from datetime import datetime
_config = None
class HNStory(object):
def __init__(self, i, story):
self.index = i + 1
self.story = story
@property
def story_number(self):
index = str(self.index)
if len(index) == 1:
return ''.join((' ', index))
return self.index
@property
def story_title(self):
return self.story.title
@property
def story_link(self):
return self.story.link
@property
def story_subtext(self):
return self.story.subtext
def get_top_stories():
hn = HN()
for i, story in enumerate(hn.get_top_stories()[:30]):
yield HNStory(i, story)
def read_config():
filename = 'pyhackernews'
config = ConfigParser.ConfigParser()
if os.path.exists(os.path.expanduser('~' + '/.' + filename)):
config.read(os.path.expanduser('~' + '/.' + filename))
elif os.path.exists ( os.path.expanduser('~' + '/.config/' + filename)):
config.read(os.path.expanduser('~' + '/.config/' + filename))
return config
def open_browser(function, url):
global _config
try:
command = _config.get('Commands', function)
if command:
subprocess.Popen(
command.replace('%URL', url).split(' '),
stdout=open(os.devnull),
stderr=open(os.devnull),
)
else:
raise
except:
subprocess.Popen(
['python', '-m', 'webbrowser', '-t', url],
stdout=open(os.devnull),
stderr=open(os.devnull),
)
class ItemWidget(urwid.WidgetWrap):
def __init__(self, s):
self.story_link = s.story_link
story_title = urwid.AttrWrap(urwid.Text(
'%s. %s' % (s.story_number, s.story_title)),
'body', 'focus'
)
story_subtext = urwid.AttrWrap(urwid.Text(
' %s' % (s.story_subtext)),
'subtext', 'focus'
)
pile = urwid.Pile([story_title, story_subtext])
self.item = [
urwid.Padding(pile, left=1, right=1),
('flow', urwid.AttrWrap(urwid.Text(
' ', align="right"), 'body', 'focus'
))
]
w = urwid.Columns(self.item, focus_column=0)
super(ItemWidget, self).__init__(w)
def selectable(self):
return True
def keypress(self, size, key):
return key
class UI(object):
palette = [
('head', '', '', '', 'g7', '#f60'),
('body', '', '', '', 'g7', 'g66'),
('footer', '', '', '', 'g7', 'g55'),
('focus', '', '', '', 'g7', 'g55'),
('subtext', '', '', '', 'g38', 'g66'),
]
header = [
urwid.AttrWrap(urwid.Text(
' Y | Hacker News', align='left'), 'head'
),
('flow', urwid.AttrWrap(urwid.Text(
' ', align="right"), 'head'
))
]
header = urwid.Columns(header)
# defaults
keys = {
'quit' : 'q',
'open' : 'Enter',
'tabopen' : 'Enter',
'refresh' : 'r',
'scroll_up' : 'k',
'scroll_down' : 'j',
'top' : 'g',
'bottom' : 'G'
}
def run(self):
self.make_screen()
urwid.set_encoding('utf-8')
urwid.connect_signal(self.walker, 'modified', self.update_footer)
self.set_keys()
try:
self.loop.run()
except KeyboardInterrupt:
print "Keyboard interrupt received, quitting gracefully"
raise urwid.ExitMainLoop
def make_screen(self):
self.view = urwid.Frame(
urwid.AttrWrap(self.populate_stories(), 'body'),
header=self.header
)
self.loop = urwid.MainLoop(
self.view,
self.palette,
unhandled_input=self.keystroke
)
self.loop.screen.set_terminal_properties(colors=256)
self.loop.set_alarm_in(600, self._wrapped_refresh)
def set_keys(self):
global _config
_config = read_config()
if _config.has_section('Keys'):
for option in _config.options('Keys'):
try:
self.keys[option] = _config.get('Keys', option)
except:
pass
def get_stories(self):
items = list()
for story in get_top_stories():
items.append(ItemWidget(story))
return items
def populate_stories(self):
items = self.get_stories()
self.walker = urwid.SimpleListWalker(items)
self.listbox = urwid.ListBox(self.walker)
return self.listbox
def set_status_bar(self, msg):
msg = '%s' % (msg.rjust(len(msg)+1))
self.view.set_footer(urwid.AttrWrap(urwid.Text(msg), 'footer'))
def update_footer(self):
url = self.listbox.get_focus()[0].story_link
self.set_status_bar(url)
def keystroke(self, input):
if input in self.keys['quit'].lower():
raise urwid.ExitMainLoop()
if input is self.keys['open'] or input is self.keys['tabopen']:
url = self.listbox.get_focus()[0].story_link
function = [key for (key,value) in self.keys.items()
if value == input][0]
open_browser(function, url)
if input is self.keys['refresh']:
self.set_status_bar('Refreshing for new stories...')
self.loop.draw_screen()
self.refresh_with_new_stories()
if input is self.keys['scroll_up']:
if self.listbox.focus_position - 1 in self.walker.positions():
self.listbox.set_focus(
self.walker.prev_position(self.listbox.focus_position)
)
if input is self.keys['scroll_down']:
if self.listbox.focus_position + 1 in self.walker.positions():
self.listbox.set_focus(
self.walker.next_position(self.listbox.focus_position)
)
if input is self.keys['top']:
if self.listbox.focus_position - 1 in self.walker.positions():
self.listbox.set_focus(self.walker.positions()[0])
if input is self.keys['bottom']:
if self.listbox.focus_position + 1 in self.walker.positions():
self.listbox.set_focus(self.walker.positions()[-1])
def refresh_with_new_stories(self):
items = self.get_stories()
self.walker[:] = items
self.loop.draw_screen()
def _wrapped_refresh(self, loop, *args):
self.refresh_with_new_stories()
ct = datetime.now().strftime('%H:%M:%S')
self.set_status_bar('Automatically fetched new stories at: %s' % ct)
self.loop.set_alarm_in(600, self._wrapped_refresh)
def live():
u = UI()
u.run()
| mit | 871,114,213,341,966,000 | 28.857759 | 76 | 0.529955 | false |
arulalant/mmDiagnosis | diagnosis1/extra/dirty/MULTIPLE PLOTS/por_landscape_2x3.py | 1 | 2203 | import cdms2
import cdutil
import numpy
import numpy.ma
import vcs
import os
import sys
import por_template_2x3_landscape as por_lanscape_2x3
x = por_lanscape_2x3.x
iso=x.createisofill('new1', 'ASD')
iso.levels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
#([1, 5, 10, 15, 20, 25, 35, 45, 55, 60, 65, 70, 80])
#iso.levels=vcs.mkscale(0.,80.)
iso.fillareacolors = (246, 255, 252, 253, 254, 251, 140, 5, 171,
248, 249, 242, 239)
#iso.fillareacolors=vcs.getcolors(iso.levels)
iso.ext_1='y'
iso.ext_2='y'
iso.level_1=0
iso.level_2=1
hours=[24, 48, 72, 96, 120]
score_name= ['ts', 'pod', 'pofd', 'hr', 'far']
th_list=[0.1, 0.6, 1. , 3. , 5. , 7.]
file_name='/NCMRWF/Process_Files/T254/StatiScore/2010/Season/jjas/24/stati_spatial_distribution_score_24hr_jjas_2010_T254.nc'
f=cdms2.open(file_name)
for j in xrange(len(score_name)):
score_name_capital = score_name[j].upper()
for k in range(6):
score=TS=f(score_name[j], threshold = th_list[k])
title_plot='T254 D-01 %s %s THRESHOLD JJAS 2010' %(score_name_capital, str(th_list[k]))
if (k == 0):
x.plot(score, por_lanscape_2x3.leftOfTop_lscp, iso, title=title_plot, continents=1, bg=1)
elif (k == 1):
x.plot(score, por_lanscape_2x3.midOfTop_lscp, iso, title=title_plot, continents=1, bg=1)
elif (k == 2):
x.plot(score, por_lanscape_2x3.rightOfTop_lscp, iso, title=title_plot, continents=1, bg=1)
elif(k==3):
x.plot(score, por_lanscape_2x3.leftOfBot_lscp, iso, title=title_plot, continents=1, bg=1)
elif(k==4):
x.plot(score, por_lanscape_2x3.midOfBot_lscp, iso, title=title_plot, continents=1, bg=1)
elif(k==5):
x.plot(score, por_lanscape_2x3.rightOfBot_lscp, iso, title=title_plot, continents=1, bg=1)
else:
pass
out_f_name='/home/arulalan/Desktop/%s_2010_obs.png' %(score_name_capital)
x.png(out_f_name)
x.clear()
| gpl-3.0 | -1,482,070,405,860,495,400 | 31.880597 | 125 | 0.552429 | false |
cedricpradalier/vrep_ros_ws | src/ar_loc_base/src/ar_loc_base/rover_pf.py | 1 | 4147 | import roslib; roslib.load_manifest('ar_loc_base')
import rospy
from numpy import *
from numpy.linalg import pinv, inv
from math import pi, sin, cos
from geometry_msgs.msg import *
import tf
import bisect
import threading
from rover_kinematics import *
class RoverPF(RoverKinematics):
def __init__(self, initial_pose, initial_uncertainty):
RoverKinematics.__init__(self)
self.initial_uncertainty = initial_uncertainty
self.lock = threading.Lock()
self.X = mat(vstack(initial_pose))
# Initialisation of the particle cloud around the initial position
self.N = 500
self.particles = [self.X + self.drawNoise(initial_uncertainty) for i in range(0,self.N)]
self.pa_pub = rospy.Publisher("~particles",PoseArray,queue_size=1)
def getRotation(self, theta):
R = mat(zeros((2,2)))
R[0,0] = cos(theta); R[0,1] = -sin(theta)
R[1,0] = sin(theta); R[1,1] = cos(theta)
return R
# Draw a vector uniformly around [0,0,0], scaled by norm
def drawNoise(self, norm):
if type(norm)==list:
return mat(vstack(norm)*(2*random.rand(3,1)-vstack([1,1,1])))
else:
return mat(multiply(norm,((2*random.rand(3,1)-vstack([1,1,1])))))
def predict(self, motor_state, drive_cfg, encoder_precision):
self.lock.acquire()
# The first time, we need to initialise the state
if self.first_run:
self.motor_state.copy(motor_state)
self.first_run = False
self.lock.release()
return
# Prepare odometry matrices (check rover_odo.py for usage)
iW = self.prepare_inversion_matrix(drive_cfg)
S = self.prepare_displacement_matrix(self.motor_state,motor_state,drive_cfg)
self.motor_state.copy(motor_state)
# Apply the particle filter prediction step here
# TODO
# self.particles = ...
self.lock.release()
def update_ar(self, Z, L, Uncertainty):
self.lock.acquire()
print "Update: L="+str(L.T)
# Implement particle filter update using landmarks here
# Note: the function bisect.bisect_left could be useful to implement
# the resampling process efficiently
# TODO
# self.particles = ...
self.lock.release()
def update_compass(self, angle, Uncertainty):
self.lock.acquire()
print "Update: C="+str(angle)
# Implement particle filter update using landmarks here
# Note: the function bisect.bisect_left could be useful to implement
# the resampling process efficiently
# TODO
# self.particles = ...
self.lock.release()
def updateMean(self):
X = mat(zeros((3,1)))
for x in self.particles:
X += x
self.X = X / len(self.particles)
return self.X
def publish(self, pose_pub, target_frame, stamp):
# Only compute the mean for plotting
self.updateMean()
pose = PoseStamped()
pose.header.frame_id = target_frame
pose.header.stamp = stamp
pose.pose.position.x = self.X[0,0]
pose.pose.position.y = self.X[1,0]
pose.pose.position.z = 0.0
Q = tf.transformations.quaternion_from_euler(0, 0, self.X[2,0])
pose.pose.orientation.x = Q[0]
pose.pose.orientation.y = Q[1]
pose.pose.orientation.z = Q[2]
pose.pose.orientation.w = Q[3]
pose_pub.publish(pose)
pa = PoseArray()
pa.header = pose.header
for p in self.particles:
po = Pose()
po.position.x = p[0,0]
po.position.y = p[1,0]
q = tf.transformations.quaternion_from_euler(0, 0, p[2,0])
po.orientation = Quaternion(*q)
pa.poses.append(po)
self.pa_pub.publish(pa)
def broadcast(self,br, target_frame, stamp):
br.sendTransform((self.X[0,0], self.X[1,0], 0),
tf.transformations.quaternion_from_euler(0, 0, self.X[2,0]),
stamp, "/%s/ground"%self.name, target_frame)
| bsd-3-clause | 2,330,022,787,027,257,000 | 32.991803 | 96 | 0.592235 | false |
agrc/deq-enviro | api/Deq.Search.Soe/PublishSoe.py | 1 | 1322 | import requests
host = 'localhost'
configuration = 'Debug'
service_name = 'DEQEnviro/MapService'
token_url = 'http://{}:6080/arcgis/admin/generateToken'.format(host)
update_soe_url = 'http://{}:6080/arcgis/admin/services/types/extensions/update'.format(
host)
upload_url = 'http://{}:6080/arcgis/admin/uploads/upload?token={}'.format(
host, '{}')
start_service_url = 'http://{}:6080/arcgis/admin/services/{}.MapServer/start'.format(
host, service_name)
file_name = r'C:\Projects\GitHub\deq-enviro\api\Deq.Search.Soe\bin\{}\Deq.Search.Soe.soe'.format(
configuration)
data = {'username': '',
'password': '',
'client': 'requestip',
'f': 'json'}
r = requests.post(token_url, data=data)
data = {'f': 'json'}
print 'got token'
files = {'itemFile': open(file_name, 'rb'),
'f': 'json'}
data['token'] = r.json()['token']
print 'uploading'
r = requests.post(upload_url.format(data['token']), files=files)
print r.status_code, r.json()['status']
data['id'] = r.json()['item']['itemID']
print 'updating', data['id']
r = requests.post(update_soe_url, params=data)
print r.status_code, r.json()['status']
print 'starting service'
r = requests.post(
start_service_url, params={'f': 'json', 'token': data['token']})
print r.status_code, r.json()['status']
print 'done'
| mit | 7,601,712,418,277,422,000 | 24.921569 | 97 | 0.645991 | false |
nblago/utils | src/model/BBFit.py | 1 | 66521 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 10:57:34 2018
Class that enables to fit a black body function to a set of magntidues.
@author: nadiablago
@version: 0.22
"""
from __future__ import print_function
import matplotlib
from matplotlib import pylab as plt
import corner
from astropy import units as u
import astropy.constants as cnt
import os, sys
import numpy as np
import emcee
from scipy import stats
import extinction
from astropy.cosmology import FlatLambdaCDM
import warnings
#If PYSYN_CDBS is not defined, it adds the environment variable which points to the
#filter response files for the bands we are interested in.
if not 'PYSYN_CDBS' in os.environ.keys():
print ("Adding the Pysynphot environment:")
os.environ['PYSYN_CDBS'] = "/Users/USER/SOMEWHERE/pysynphot_files"
print ('PYSYN_CDBS environment variable set to: ', os.environ['PYSYN_CDBS'])
'''os.environ['PYSYN_CDBS'] = "/scratch/Software/pysynphot_files/cdbs/"
# Add the environment variable which points to the filter response files for the bands we are interested in.
if not 'PYSYN_CDBS' in os.environ.keys():
print("Adding the Pysynphot environment:")
os.environ['PYSYN_CDBS'] = "/scratch/Software/pysynphot_files/cdbs/"
print('PYSYN_CDBS environment variable set to: ', os.environ['PYSYN_CDBS'])'''
os.environ['PYSYN_CDBS'] = "/Users/nadiablago/Documents/Software/pysynphot_files/"
import pysynphot as ps
class BBFit:
def __init__(self):
'''
Constructor initializes all the parameters to
defaults.
'''
#Some predefined constants in the units we need them
self.c = cnt.c.to(u.cm/u.s).value #2.99792458e+10 #cm / s
self.h = cnt.h.to(u.erg * u.s).value #6.62607004e-27 #erg s
self.k_B = cnt.k_B.to(u.erg / u.K).value#1.38064852e-16 #erg / K
#Source parameters
self.av_host = 0
self.av_mw = 0
self.law = "Fitzpatrick"
self.law_mw = "Fitzpatrick"
#Black body models
self.initT1 = 10000 #K
self.initR1 = 1 # Rsun
self.initT2 = 3000 #K
self.initR2 = 1 # Rsun
self.z = None
self.distMpc = None #in Mpc
self.mjd = 0
#Power law models
self.alpha = 0.75
self.alphaerr1 = 0
self.alphaerr2 = 0
self.scale = 1
self.scaleerr1 = 0.1
self.scaleerr2 = 0.1
#Disk model (scale is already in the power law model)
#Stellar mass, radius, log accretion mass per year, outer radius of accretion disk
self.Mstar = 1
self.Mstarerr1 = 0.1
self.Mstarerr2 = 0.1
self.Rstar = 1
self.Rstarerr1 = 0.1
self.rstarerr2 = 0.1
self.logMacc = -8
self.logMaccerr1 = -9
self.logMaccerr2 = -9
self.R_out = 3
self.R_outerr1 = 1
self.R_outerr2 = 1
#Location for plots
self.plotdir = "../../data/plots"
#Location for fit results
self.resdir = "../../data/modelfits"
self.resfile = "fit_results.txt"
#MCMC parameters
self.method = 'ensemble' #or HA for Hastings
self.mhtune = True # tuning of the Metropolis-Hastings
self.niterations = 10000
self.burnin = 5000
self.threads = 10
self.nwalkers = 20
self.sampler = None
self.model = "BlackBody" #others are "BlackBody_Av" or "BlackBody2_Av", "PowerLaw", "PowerLaw_BlackBody"
#Input data parameters.
#The fitter will run either with magnitudes or with fluxes
self.mags = None
self.magerrs = None
self.bands = None
#Indicates whether the magnitude is in AB or Vega
self.photsys = None
self.wls = None
self.fluxes = None
self.fluxerrs = None
#Output
self.T = None
self.Terr1 = None
self.Terr2 = None
self.R = None
self.Rerr1 = None
self.Rerr2 = None
self.L = None
self.Lerr1 = None
self.Lerr2 = None
#Output for the secondary star
self.Tsec = None
self.Tsecerr1 = None
self.Tsecerr2 = None
self.Rsec = None
self.Rsecerr1 = None
self.Rsecerr2 = None
self.Lsec = None
self.Lsecerr1 = None
self.Lsecerr2 = None
self.cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)
#Set the plotting characteristics
self._matplotlib_init()
self.banddic = {"Y": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/ctio_y_andicam.dat"),
"J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_j_002.fits"),
"H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_h_002.fits"),
"K": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/bessell_k_002.fits"),
"keck,J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.J.dat"),
"keck,H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.H.dat"),
"keck,Ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.Ks.dat"),
"keck,K": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.K.dat"),
"spitzer,3.6": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac1_3.6.dat"),
"spitzer,4.5": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac2_4.5.dat"),
"spitzer,5.8": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac3_5.8.dat"),
"spitzer,8.0": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Spitzer_irac4_8.0.dat"),
"wise,w1": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W1.dat"),
"wise,w2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W2.dat"),
"wise,w3": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W3.dat"),
"wise,w4": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/WISE_WISE.W4.dat"),
"swift,uvw2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvw2_uvot.dat"),
"swift,uvm2": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvm2_uvot.dat"),
"swift,uvw1": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_uvw1_uvot.dat"),
"swift,u": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_u_uvot.dat"),
"swift,b": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_b_uvot.dat"),
"swift,v": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/swift_v_uvot.dat"),
"paranal,Y": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Y.dat"),
"paranal,Z": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Z.dat"),
"paranal,J": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.J.dat"),
"paranal,H": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.H.dat"),
"paranal,Ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_VISTA.Ks.dat"),
"omegacam,u": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.u_SDSS.dat"),
"omegacam,g": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.g_SDSS.dat"),
"omegacam,r": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.r_SDSS.dat"),
"omegacam,i": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.i_SDSS.dat"),
"omegacam,z": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.z_SDSS.dat"),
"omegacam,Halpha": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Paranal_OmegaCAM.Halpha.dat"),
"nirc2,j": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.J.dat"),
"nirc2,h": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.H.dat"),
"nirc2,ks": os.path.join(os.environ['PYSYN_CDBS'], "comp/nonhst/Keck_NIRC2.Ks.dat")
}
def _matplotlib_init(self):
'''
Set up preferences on matplotlib plot appearance.
'''
matplotlib.rcParams['xtick.minor.size'] = 6
matplotlib.rcParams['xtick.major.size'] = 6
matplotlib.rcParams['ytick.major.size'] = 6
matplotlib.rcParams['xtick.minor.size'] = 4
matplotlib.rcParams['ytick.minor.size'] = 4
matplotlib.rcParams['lines.linewidth'] = 0.5
matplotlib.rcParams['axes.linewidth'] = 1.5
matplotlib.rcParams['font.size']= 14.0
matplotlib.rcParams['font.family']= 'sans-serif'
matplotlib.rcParams['xtick.major.width']= 2.
matplotlib.rcParams['ytick.major.width']= 2.
matplotlib.rcParams['ytick.direction']='in'
matplotlib.rcParams['xtick.direction']='in'
def _band2flux(self):
'''
Will transform the magnitude measurement into a flux measurement.
'''
wls = np.array([])
fluxes = np.array([])
fluxerr = np.array([])
#Create a black body spectrum with an arbitrary value
lam = np.linspace(100, 120000, 10000)
sp = ps.BlackBody(10000)
sp.convert('flam')
sp2 = self._model_2(lam, 10000, 1)
sp2 = sp2 * np.max(sp.flux) / np.max(sp2)
sp = ps.ArraySpectrum(lam, sp2)
for b, m, me, psys in zip(self.bands, self.mags, self.magerrs, self.photsys):
print ("Band,",b)
#Create the observation bandpass
try:
band = ps.ObsBandpass(b)
except ValueError:
#The band is not in the standard list
#We need to go to the dictionary to retrieve the transmission function.
band = ps.FileBandpass(self.banddic[b])
#band.waveunits.convert("angstrom")
#else:
# band.waveunits = ps.units.Angstrom
#Oftain the effective (average) wavelength
effwave = band.avgwave()
#Correct for Milky Way extinction
m = m - extinction.fitzpatrick99(np.array([effwave]), a_v=self.av_mw, unit='aa')[0]
#Normalize the spectrum to the magnitude of the observation
sp_norm = sp.renorm(m, psys, band, force="extrap")
#Observe with the band
obs = ps.Observation(sp_norm, band)
#Get the flux
flux = obs.effstim('flam')
wls = np.append(wls, effwave)
fluxes = np.append(fluxes, flux)
#Compute the error bars
flux_high = flux * 10**(0.4*me)
flux_low = flux * 10**(-0.4*me)
fluxerr = np.append(fluxerr, np.average([flux - flux_low, flux_high-flux]))
return wls, fluxes, fluxerr
def _model(self, lam, p):
'''
Returns the flux for the single BlackBody model for the wavelength introduced.
lam is in A.
p = (T, R)
'''
lam = lam * u.Angstrom
T = p[0] * u.K
R = (p[1] * u.Rsun).to(u.cm)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Area = np.pi * (4 * np.pi * R**2)
flam = Area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
return flam.to(u.erg/u.s/u.Angstrom).value
def _model_2(self, lam, T, R):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
lam = lam * u.Angstrom
T = T * u.K
R = (R * u.Rsun).to(u.cm)
Area = np.pi * (4 * np.pi * R**2)
flam = Area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
return flam.to(u.erg/u.s/u.Angstrom).value
def _model_av_r(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
T = p[0] * u.K
R = (p[1] * u.Rsun).to(u.cm)
a_v = p[2]
if a_v < 0:
return lam * np.inf
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
lam = lam * u.Angstrom
area = np.pi * (4 * np.pi * R**2)
flam = area * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T))-1)
#Apply the reddening
flam = flam.to(u.erg/u.s/u.Angstrom).value * flux_red
return flam
def _model_av_r_2(self, lam, T, R, a_v):
'''
Return units: erg s-1 A-1
'''
return self._model_av_r(lam, (T, R, a_v))
def _model2_av(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
T1 = p[0] * u.K
R1 = (p[1] * u.Rsun).to(u.cm)
a_v = p[2]
T2 = p[3] * u.K
R2 = (p[4] * u.Rsun).to(u.cm)
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
lam = lam * u.Angstrom
area1 = np.pi * (4 * np.pi * R1**2)
area2 = np.pi * (4 * np.pi * R2**2)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = area1 * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T1))-1)
flam2 = area2 * (2*cnt.h*((cnt.c).to(u.cm/u.s))**2/( (lam.to(u.cm))**5))/ \
(np.exp((cnt.h*cnt.c)/(lam.to(u.m)*cnt.k_B*T2))-1)
flam = flam1 + flam2
#Apply the reddening
flam = flam.to(u.erg/u.s/u.Angstrom).value * flux_red
return flam
def _model2_av_2(self, lam, T1, R1, a_v, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_av(lam, (T1, R1, a_v, T2, R2))
def _model2_av_r(self, lam, p):
'''
Return units: erg s-1 A-1
'''
T1 = p[0] #In K
R1 = p[1]*69570000000.0 #From Rsun to cm
a_v = p[2]
T2 = p[3]
R2 = p[4]*69570000000.0 #From Rsun to cm
lam = lam * 1e-8 #To cm
if a_v < 0:
return lam * np.inf
#We need an extra pi as it is integrated across all steradians
#The second factor is the surface of the black body
#The third ones is the Plank law
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = np.pi * (4 * np.pi * R1**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T1))-1)
flam2 = np.pi * (4 * np.pi * R2**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T2))-1)
#Compute the effect of reddening as a flux factor
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam*1e8, a_v, unit='aa'))
flam = (flam1 + flam2) * flux_red *1e-8 #to erg / s / A
#Apply the reddening and transform to erg /s/ A from cm
return flam
def _model2_av_r_2(self, lam, T1, R1, a_v, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_av_r(lam, (T1, R1, a_v, T2, R2))
def _model2_r(self, lam, p):
'''
Return units: erg s-1 A-1
'''
T1 = p[0] #In K
R1 = p[1]*69570000000.0 #From Rsun to cm
T2 = p[2]
R2 = p[3]*69570000000.0 #From Rsun to cm
lam = lam * 1e-8 #To cm
#We need an extra pi as it is integrated across all steradians
#The second factor is the surface of the black body
#The third ones is the Plank law
with warnings.catch_warnings():
warnings.simplefilter("ignore")
flam1 = np.pi * (4 * np.pi * R1**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T1))-1)
flam2 = np.pi * (4 * np.pi * R2**2) * ( (2*self.h*self.c**2)/( lam**5))/ (np.exp((self.h*self.c)/(lam*self.k_B*T2))-1)
flam = (flam1 + flam2)*1e-8 #to erg / s / A
return flam
def _model2_r_2(self, lam, T1, R1, T2, R2):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model2_r(lam, (T1, R1, T2, R2))
def _model_powerlaw(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
lam = lam * u.Angstrom
w0 = 4000 #p[0] #Refernce wavelength
alpha = p[0]
scale = p[1]
a_v = p[2]
f = ps.PowerLaw(w0, alpha)
f.convert('flam')
flam = np.interp(lam, f.wave, f.flux)
flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
area = 10**scale
return area * flam * flux_red #.to(u.erg/u.s/u.Angstrom).value
def _model_powerlaw_2(self, lam, alpha, scale, a_v):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_powerlaw(lam, (alpha, scale, a_v))
def _model_powerlaw_bb(self, lam, p):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
w0 = 4000 #p[0] #Refernce wavelength
alpha = p[0]
scale = p[1]
T_bb = p[2]
R_bb = p[3]
bb_flux = self._model_2(lam, T_bb, R_bb)
lam = lam * u.Angstrom
f = ps.PowerLaw(w0, alpha)
f.convert('flam')
flam = np.interp(lam, f.wave, f.flux)
area = 10**scale
return area * flam + bb_flux
def _model_powerlaw_bb_2(self, lam, alpha, scale, T_bb, R_bb):
'''
Return units: erg s-1 A-1
'''
return self._model_powerlaw_bb(lam, (alpha, scale, T_bb, R_bb))
def _model_accretion_disk_old2(self, lam, Mstar, Rstar, logMacc, scale, R_out):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_accretion_disk_old(lam, (Mstar, Rstar, logMacc, scale, R_out))
def _model_accretion_disk_old(self, lam, p):
'''
Equation 1 from Kenyon, Hartmann, Hewett 1988.
'''
Mstar = p[0]
Rstar = p[1]
Macc = p[2]
scale = p[3]
R_out = p[4]
if Mstar<0 or Macc<-12 or Rstar<0.001 or scale<0 or R_out < Rstar:
return np.ones(len(lam))*np.inf
Macc = 10**Macc
R = np.linspace(Rstar,R_out,20)
dR = R[1] - R[0]
F_r = (3 * cnt.G * Mstar * u.Msun * Macc * u.Msun/u.year / 8 / np.pi / (u.Rsun*Rstar)**3) * (Rstar/R)**3 * (1 - (Rstar/R)**0.5)
F_r = F_r.to(u.erg/u.cm**2/u.s)
T_r = ((F_r / cnt.sigma_sb)**0.25).to(u.K)
T_max = 13000 * u.K *(Mstar)**0.25 * (Macc / 1e-5)**0.25 * (Rstar)**-0.75
#Cretae the disk model
#For each differential radii, we compute the black body spectra corresponding
# to the temperature at that radius, and scale it by the flux expected at that
# radius.
disk_model = []
for i, ri in enumerate(R):
if ri>Rstar and ri<=1.5*Rstar:
sp = ps.BlackBody(T_max.value)
#sp = ps.BlackBody(T_r[i].value)
else:
sp = ps.BlackBody(T_r[i].value)
sp.convert('flam')
tot_flux = sp.trapezoidIntegration(sp.wave, sp.flux)
#Compute the total emitted flux for the spherical area.
#Adopt the outer radius as the
dist_flux_fac = np.pi * ((ri+dR)**2 - ri**2) * (u.Rsun.to(u.cm))**2
scaled_flux = sp.flux / tot_flux * F_r[i].value #* dist_flux_fac
disk_model.append(scaled_flux)
disk = np.array(disk_model)
disk = np.nansum(disk, axis=0)
sp = ps.ArraySpectrum(sp.wave, disk)
#int_flux = sp.trapezoidIntegration(sp.wave, sp.flux)
int_flux = np.max(sp.flux)
#Normalize (recover) the integral flux from 1kpc
flux_norm= sp.flux #/int_flux
#sp_norm = ps.ArraySpectrum(sp.wave, flux_norm)
flux_norm = np.interp(lam, sp.wave, flux_norm)
#flux_red = 10**(-0.4 * extinction.fitzpatrick99(lam, a_v, unit='aa'))
return flux_norm #* scale #* flux_red
def _model_disk_T(self, R, Mstar, Rstar, logMacc):
F_r = (3 * cnt.G * Mstar * 10**float(logMacc) * (u.Msun**2/u.year)) \
/ (8 * np.pi * (u.Rsun*R)**3) \
* (1 - (Rstar/R)**0.5)
T_r = ((F_r / cnt.sigma_sb)**0.25).to(u.K)
#print (F_r, T_r)
mask = (R>=Rstar) * (R<=1.5*Rstar)
if np.count_nonzero(mask)>0:
T_max = 13000 * u.K *(Mstar)**0.25 * (10**float(logMacc) / 1e-5)**0.25 * (Rstar)**-0.75
T_r[mask] = T_max
#print (mask, "Tmax", T_max, np.count_nonzero(mask))
return T_r.value
def _model_accretion_disk2(self, lam, Mstar, Rstar, logMacc, R_out):
'''
Return units: erg s-1 A-1
As we multiply by the area of the emitting source (in cm**2)
'''
return self._model_accretion_disk(lam, (Mstar, Rstar, logMacc, R_out))
def _model_accretion_disk(self, lam, p):
Mstar = np.maximum(1e-6, p[0])
Rstar = np.maximum(1e-6, p[1])
logMacc = np.maximum(-12, np.minimum(-7, p[2]))
R_out = np.maximum(1e-6, p[3])
i = 45.0
#Deg to radians
i = np.deg2rad(i%360)
d = self.distMpc*(u.Mpc).to(u.cm)
R = np.linspace(Rstar, R_out, 30)*u.Rsun
nu = (cnt.c / (lam*u.Angstrom)).to(u.Hz)
T_r = self._model_disk_T(R.value, Mstar, Rstar, logMacc)
F_nu_arr = []
for ni in nu:
I_nu_r = R / (np.exp(cnt.h * ni/(cnt.k_B*T_r*u.K)) - 1)
I_flux = np.trapz(I_nu_r, R)
F_nu = (4 * np.pi * cnt.h * np.cos(i)*ni**3)/(cnt.c**2 * d**2) * I_flux
F_nu_arr.append(F_nu.to(u.erg/u.s/u.Hz).value)
F_nu_arr = np.array(F_nu_arr)
s = ps.ArraySpectrum(lam, F_nu_arr, fluxunits='fnu', waveunits='Angstrom')
s.convert('flam')
fluxFactor = 4*np.pi*d**2
return s.flux*fluxFactor
def _get_Qnu(self, a, lam, wavedusttype="silicate"):
'''
'''
from scipy import interpolate
x = np.array([0.001, 0.01, 0.1, 1]) #size
y = np.array([0.01, 0.06, 0.2, 7, 10 ]) #wavelength
#--> size
# | wave
# v
z = np.array([[0.02, 0.2, 0.85, 0.85],
[0.02, 0.7, 0.7, 0.7],
[0.001, 0.01, 0.7, 0.7],
[0.00007, 0.001, 0.01, 0.1],
[0.001, 0.01, 0.1, 1]])
f = interpolate.interp2d(x, y, z, kind='linear')
return f(a, lam)
def _get_knu(self, a, wave, rho=1, ):
'''
Returns the values for the dust mass absorption coefficient
for the Spitzer bands for the given grain size and wavelength.
k_nu = (3. / 4 * np.pi * rho * a**3)* (np.pi * a**2 * Q_nu(a))
'''
k_nu = (3. / 4 * np.pi * rho * a**3)* (np.pi * a**2 * self.Q_nu(a, wave))
return k_nu
def _model_dust(self, Md, Td, a):
'''
Using the dust modelling approach from Fox et. al. 2010.
The assumption is that the dust is optically thin and that there is only one size and
one dust composition.
The opactities are taken from their Figure 4 values.
F_nu = M_d B_nu (T_d )k_nu(a) / d**2
'''
Bnu = ps.BlackBody(Td)
Bnu.convert('fnu')
knu = self._get_knu(a, wave) * u.cm**2 / u.g
Fnu = Md * u.Msun * Bnu * knu / (self.distMpc * u.Mpc)**2
#likelihood function
def _like(self, p, xdat, ydat, errdat, debug=False):
'''
p: function parameters
args: carry anything we want to pass to our function (e.g. the data)
'''
if self.model == "BlackBody":
ymod = self._model(xdat, p)
elif self.model == "BlackBody_Av":
ymod = self._model_av_r(xdat, p)
elif self.model == "BlackBody2_Av":
ymod = self._model2_av_r(xdat, p)
elif self.model == "BlackBody2":
ymod = self._model2_r(xdat, p)
elif self.model == "PowerLaw":
ymod = self._model_powerlaw(xdat, p)
elif self.model == "PowerLaw_BlackBody":
ymod = self._model_powerlaw_bb(xdat, p)
elif self.model == "Disk":
ymod = self._model_accretion_disk(xdat, p)
else:
print ("Unknown model", self.model)
return np.nan
#Discard models which exceed the upper limits
if (np.any(ymod[errdat<0] > ydat[errdat<0])):
prob = 1e-320
#Compute the likelihood with only valid datapoints.
else:
prob = stats.norm.pdf(ydat[errdat>0] , ymod[errdat>0] , errdat[errdat>0] )
# log probabilities
# we add tiny number to avoid NaNs
mylike = np.log(prob + 1e-320).sum()
return mylike
def _logposterior(self, p, xdat, ydat, errdat):
'''
Returns the posterior of the observations. In essence the likelihood and the prior:
#log(likelihood) + log(prior)
'''
lp = self._logprior(p)
if (not np.isinf(lp)):
lp= self._like(p, xdat, ydat, errdat) + lp
return lp
def _logprior(self, p):
'''
Returns the prior probability distribution for each model.
'''
if self.model == "BlackBody":
T1 = p[0]
R1 = p[1]
if T1 < 0 or R1 < 0:
return -np.inf
logp = stats.uniform.logpdf(T1, 10, 15000)
logp = logp + stats.uniform.logpdf(R1, 1, 50000)
if self.model =="BlackBody_Av":
T1 = p[0]
R1 = p[1]
av = p[2]
if T1 < 0 or R1 < 0 or av < 0:
return -np.inf
else:
logp = stats.uniform.logpdf(T1, 10, 15000)
logp = logp + stats.uniform.logpdf(R1, 10000, 120000)
logp = logp + stats.uniform.logpdf(av, 0, 3)
elif self.model == "BlackBody2":
T1 = p[0]
R1 = p[1]
T2 = p[2]
R2 = p[3]
if T1 < 0 or T2 > T1 or T2 < 0 or R1 < 0 or R2<0:
return - np.inf
else:
logp = stats.uniform.logpdf(T1, 100, 10000)
logp = logp + stats.uniform.logpdf(R1, 10, 12000)
logp = logp + stats.uniform.logpdf(T2, 10, 5000)
logp = logp + stats.uniform.logpdf(R2, 10, 12000)
elif self.model == "BlackBody2_Av":
T1 = p[0]
R1 = p[1]
av = p[2]
T2 = p[3]
R2 = p[4]
if T1 < 0 or T2 > T1 or T2 < 0 or av < 0 or av > 10:
return - np.inf
else:
logp = stats.uniform.logpdf(T1, 100, 1000)
logp = logp + stats.uniform.logpdf(R1, 10000, 120000)
logp = logp + stats.uniform.logpdf(av, 0, 3)
logp = logp + stats.uniform.logpdf(T2, 100, 1000)
logp = logp + stats.uniform.logpdf(R2, 10000, 120000)
elif self.model == "PowerLaw":
alpha = p[0]
scale = p[1]
av = p[2]
if av < 0:
logp = -np.inf
else:
logp = stats.uniform.logpdf(alpha, 0, 3)
logp = logp + stats.uniform.logpdf(scale, 0.1, 100)
logp = logp + stats.uniform.logpdf(av, 0, 3)
elif self.model == "PowerLaw_BlackBody":
alpha = p[0]
scale = p[1]
T1 = p[2]
R1 = p[3]
if R1 < 0 or T1 < 0 or alpha < 0:
logp = -np.inf
else:
logp = stats.uniform.logpdf(alpha, 0, 3)
logp = logp + stats.uniform.logpdf(scale, 0.1, 100)
logp = logp + stats.uniform.logpdf(T1, 500, 20000)
logp = logp + stats.uniform.logpdf(R1, 0, 500)
elif self.model == "Disk":
Mstar = p[0]
Rstar = p[1]
logMacc = p[2]
R_out = p[3]
if Rstar < 0 or Mstar < 0 or logMacc < -12 or R_out<0 or R_out < Rstar:
logp = -np.inf
else:
logp = stats.uniform.logpdf(Mstar, 0, 1.44)
logp = logp + stats.uniform.logpdf(Rstar, 0, 10)
logp = logp + stats.uniform.logpdf(logMacc, -12, 7)
logp = logp + stats.uniform.logpdf(R_out, 0, 50)
return logp
def _get_max_and_intervals(self, x):
'''
Provided a chain of samples, finds the average value and returns the values
for a 1 sigma distribution following the 34 and 66 percentiles.
'''
return np.percentile(x, 34), np.percentile(x, 50), np.percentile(x, 66)
#return percent1, maxp, percent2
def _area2rsun(self, A):
'''
Given the area of the black body in cm2 returns the radius for the object in solar radius.
'''
Aream2 = A * u.cm**2 # add units
Rad = np.sqrt(Aream2/(4*(np.pi)**2)).to(u.Rsun) #in Rsun
return Rad.value
def _fill_output(self):
'''
Computes the confidence intervals from the MCMC distribution.
Transforms the temperature ad radius into a black body luminosity.
'''
if self.model.startswith("BlackBody"):
T1, T, T2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
self.T = T
self.Terr1 = T - T1
self.Terr2 = T2 - T
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
self.L = self._get_bol_lum(T, R)
self.Lerr1 = self.L - self._get_bol_lum(T1, R1)
self.Lerr2 = self._get_bol_lum(T2, R2) - self.L
if self.model == "BlackBody_Av":
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
elif self.model == "BlackBody2_Av":
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
Tsec1, Tsec, Tsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
Rsec1, Rsec, Rsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,4])
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
self.Tsec = Tsec
self.Tsecerr1 = Tsec - Tsec1
self.Tsecerr2 = Tsec2 - Tsec
self.Rsec = Rsec
self.Rsecerr1 = Rsec - Rsec1
self.Rsecerr2 = Rsec2 - Rsec
elif self.model == "BlackBody2":
Tsec1, Tsec, Tsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
Rsec1, Rsec, Rsec2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.Tsec = Tsec
self.Tsecerr1 = Tsec - Tsec1
self.Tsecerr2 = Tsec2 - Tsec
self.Rsec = Rsec
self.Rsecerr1 = Rsec - Rsec1
self.Rsecerr2 = Rsec2 - Rsec
self.Lsec = self._get_bol_lum(Tsec, Rsec)
self.Lsecerr1 = self.Lsec - self._get_bol_lum(Tsec1, Rsec1)
self.Lsecerr2 = self._get_bol_lum(Tsec2, Rsec2) - self.Lsec
elif self.model=="PowerLaw":
alpha1, alpha, alpha2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
Av1, Av, Av2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
self.alpha = alpha
self.alphaerr1 = alpha - alpha1
self.alphaerr2 = alpha2 - alpha
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
self.Av = Av
self.Averr1 = Av - Av1
self.Averr2 = Av2 - Av
elif self.model=="PowerLaw_BlackBody":
alpha1, alpha, alpha2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
scale1, scale, scale2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
T1, T, T2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
R1, R, R2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.alpha = alpha
self.alphaerr1 = alpha - alpha1
self.alphaerr2 = alpha2 - alpha
self.scale = scale
self.scaleerr1 = scale - scale1
self.scaleerr2 = scale2 - scale
self.T = T
self.Terr1 = T - T1
self.Terr2 = T2 - T
self.R = R
self.Rerr1 = R - R1
self.Rerr2 = R2 - R
elif self.model=="Disk":
Mstar1, Mstar, Mstar2 = self._get_max_and_intervals(self.sampler.flatchain[:,0])
Rstar1, Rstar, Rstar2 = self._get_max_and_intervals(self.sampler.flatchain[:,1])
logMacc1, logMacc, logMacc2 = self._get_max_and_intervals(self.sampler.flatchain[:,2])
R_out1, R_out, R_out2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
#scale1, scale, scale2 = self._get_max_and_intervals(self.sampler.flatchain[:,3])
self.Mstar = Mstar
self.Mstarerr1 = Mstar - Mstar1
self.Mstarerr2 = Mstar2 - Mstar
self.Rstar = Rstar
self.Rstarerr1 = Rstar - Rstar1
self.Rstarerr2 = Rstar2 - Rstar
self.logMacc = logMacc
self.logMaccerr1 = logMacc - logMacc1
self.logMaccerr2 = logMacc2 - logMacc
self.R_out = R_out
self.R_outerr1 = R_out - R_out1
self.R_outerr2 = R_out2 - R_out
def _save_output(self):
'''
Saves in a results file.
'''
exists = os.path.isfile(self.resfile)
with open(self.resfile, 'a') as outfile:
print ("Saving results to %s"%self.resfile)
if self.model == "BlackBody":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, self.L, self.Lerr1, self.Lerr2, self.av_mw))
elif self.model == "BlackBody_Av":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av Averr1 Averr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2, self.Av, self.Averr1, self.Averr2, self.av_mw))
elif self.model == "BlackBody2":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Tsec Tsecerr1 Tsecerr2 Rsec Rsecerr1 Rsecerr2 Lsec Lsecerr1 Lsecerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f \n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2,
self.Tsec, self.Tsecerr1, self.Tsecerr2, self.Rsec, self.Rsecerr1, self.Rsecerr2, \
self.Lsec, self.Lsecerr1, self.Lsecerr2, self.av_mw))
elif self.model == "BlackBody2_Av":
if not exists:
outfile.write("mjd T Terr1 Terr2 R Rerr1 Rerr2 L Lerr1 Lerr2 Av Averr1 Averr2 Tsec Tsecerr1 Tsecerr2 Rsec Rsecerr1 Rsecerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.T, self.Terr1, self.Terr2, self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2, self.Av, self.Averr1, self.Averr2,\
self.Tsec, self.Tsecerr1, self.Tsecerr2, self.Rsec, self.Rsecerr1, self.Rsecerr2, self.av_mw))
elif self.model == "PowerLaw":
if not exists:
outfile.write("mjd alpha alphaerr1 alphaerr2 scale scaleerr1 scaleerr2 Av Averr1 Averr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.alpha, self.alphaerr1, self.alphaerr2, self.scale, self.scaleerr1, self.scaleerr2, \
self.Av, self.Averr1, self.Averr2, self.av_mw))
elif self.model == "PowerLaw_BlackBody":
if not exists:
outfile.write("mjd alpha alphaerr1 alphaerr2 scale scaleerr1 scaleerr2 T Terr1 Terr2 R Rerr1 Rerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n"%\
(self.mjd, self.alpha, self.alphaerr1, self.alphaerr2, self.scale, self.scaleerr1, self.scaleerr2, \
self.T, self.Terr1, self.Terr2, \
self.R, self.Rerr1, self.Rerr2, \
self.av_mw))
elif self.model == "Disk":
if not exists:
outfile.write("mjd M Merr1 Merr2 Rstar Rerr1 Rerr2 Macc Maccerr1 Maccerr2 R_out R_outerr1 R_outerr2 Av_MW\n")
outfile.write("%.5f %.3f %.3f %.3f %.3f %.3f %.3f %.3e %.3e %.3e %.3e %.3e %.3e %.3f\n"%\
(self.mjd, self.Mstar, self.Mstarerr1, self.Mstarerr1, \
self.Rstar, self.Rstarerr1, self.Rstarerr2,\
self.logMacc, self.logMaccerr1, self.logMaccerr2,\
#self.scale, self.scaleerr1, self.scaleerr2, \
self.R_out, self.R_outerr1, self.R_outerr2,\
self.av_mw))
else:
print ("Unknown model! %s"%self.model)
def _get_bol_lum(self, T, R):
'''
T is in K
R in R_sun.
Gives the Lbol in Lsun
'''
L = cnt.sigma_sb * (T * u.K)**4 * 4 * np.pi * (R*u.Rsun)**2
return (L.to(u.Lsun)).value
def _get_save_path(self, savefile, plot_name=""):
'''
Checks what savefile name has been given.
If there is a value, then it jsut stores it in the plot directory provided.
If there is no name, then it creates a filename with the suffix provided.
It also checks if there is already a file named like that, and it that is the case,
it increases the suffix so that it has a higher number, avoiding collision.
'''
#If there is a given name to store the file, then we use that one
if (not savefile is None):
if os.path.dirname(savefile) == "":
name = os.path.join(self.plotdir, os.path.basename(savefile))
#If there is no name, then we will save the plots in the plot directory
#with an automatic name.
# This name will increase a count if the name exists already.
else:
i = 0
name = os.path.join(self.plotdir, "%s_%.1f_%d.pdf"%(plot_name, self.mjd, i))
while (os.path.isfile(name)):
i = i+1
name = os.path.join(self.plotdir, "%s_%.1f_%d.pdf"%(plot_name, self.mjd, i))
return name
def _initialize_parameters(self, plot=False):
'''
Runs the least squares optimiztion routine to find the best initial parameters
to start the MCMC with.
'''
lam = np.linspace(np.min(self.wls)*0.9, np.max(self.wls)*1.1, 2000)
a_v_wls = extinction.fitzpatrick99(self.wls, a_v=self.av_mw, unit='aa')
reddening = 10**(0.4*a_v_wls)
if self.model == "BlackBody":
flux_ini = self._model_2(lam, self.initT1, self.initR1)
p0 = (self.initT1, self.initR1)
print ("Initial parameters given:", p0)
#Perform a LSQ fit
#params, covar = curve_fit(self._model_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model_2(lam, *params)
if plot:
plt.clf()
mask_lims = self.fluxerrs<0
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls[~mask_lims], self.fluxes[~mask_lims], yerr=self.fluxerrs[~mask_lims], marker="o", color="b", lw=0, label="Measurements")
plt.errorbar(self.wls[mask_lims], self.fluxes[mask_lims], yerr=self.fluxes[mask_lims]*0.2, fmt="o", color="b", uplims=True)
plt.xlabel("Wavelength [A]")
plt.ylabel("$F_{\\lambda}$ [erg/s/cm2/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
plt.yscale("log")
name = self._get_save_path(None, "fluxes_obs_bb")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody_Av":
flux_ini = self._model_av_r_2(lam, self.initT1, self.initR1, self.av_host)
p0 = (self.initT1, self.initR1, self.av_host)
print ("Initial ", p0)
#params, covar = curve_fit(self._model_av_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model_av_r_2(lam, *params)
if plot:
plt.clf()
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs_bb_av")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody2_Av":
flux_ini = self._model2_av_r_2(lam, self.initT1, self.initR1, self.av_host, self.initT2, self.initR2)
p0 = (self.initT1, self.initR1, self.av_host, self.initT2, self.initR2)
print ("Initial ", p0)
#params, covar = curve_fit(self._model2_av_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model2_av_r_2(lam, *params)
if plot:
plt.clf()
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs")
plt.savefig(name, dpi=200)
elif self.model == "BlackBody2":
flux_ini = self._model2_r_2(lam, self.initT1, self.initR1, self.initT2, self.initR2)
p0 = (self.initT1, self.initR1, self.initT2, self.initR2)
print ("Initial ", p0)
#params, covar = curve_fit(self._model2_r_2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#flux_end = self._model2_r_2(lam, *params)
#flux_1 = self._model_2(lam, *params[0:2])
#flux_2 = self._model_2(lam, *params[2:])
if plot:
plt.clf()
plt.figure(figsize=(6,4))
plt.plot(lam, flux_ini, "r--", label="Fit initial parameters")
#plt.plot(lam, flux_end, label="Best fit LSQ")
#plt.plot(lam, flux_1, label="BB1")
#plt.plot(lam, flux_2, label="BB2")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.legend(loc="best", fontsize=10)
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.yscale("log")
name = self._get_save_path(None, "fluxes_obs_2bb")
plt.savefig(name, dpi=200)
elif self.model == "PowerLaw":
#params, covar = curve_fit(self._model_powerlaw_2, self.wls , self.fluxes, \
#p0=(self.alpha, self.initR1, self.av_host), sigma=self.fluxerrs, absolute_sigma=True, maxfev = 10000)
lam = np.linspace(3000, 25000, 2000)
fluxpw = self._model_powerlaw_2(lam, self.alpha, self.scale, self.av_host)
if plot:
plt.clf()
plt.plot(lam, fluxpw, label="Fit initial parameters")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes), 1.2*np.max(self.fluxes))
plt.legend()
name = self._get_save_path(None, "fluxes_obs_powerlaw")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
elif self.model == "PowerLaw_BlackBody":
#params, covar = curve_fit(self._model_powerlaw_2, self.wls , self.fluxes, \
#p0=(self.alpha, self.initR1, self.av_host), sigma=self.fluxerrs, absolute_sigma=True, maxfev = 10000)
lam = np.linspace(3000, 25000, 2000)
fluxpw = self._model_powerlaw_bb_2(lam, self.alpha, self.scale, self.initT1, self.initR1)
if plot:
plt.clf()
plt.plot(lam, fluxpw, label="Fit initial parameters")
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="MW ext. corr")
plt.errorbar(self.wls, self.fluxes/reddening, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
plt.xlabel("Wavelength [A]")
plt.ylabel("$L_{\\lambda}$ [erg/s/A]")
plt.ylim(0.8*np.min(self.fluxes/reddening), 1.2*np.max(self.fluxes))
plt.legend(loc="best")
name = self._get_save_path(None, "fluxes_obs_powerlaw_bb")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
if self.model == 'Disk':
#params = (0.5, 0.2, 5e-9, 1, 2)
p0 = (self.Mstar, self.Rstar, self.logMacc, self.R_out)
#params, covar = curve_fit(self._model_accretion_disk2, self.wls , self.fluxes, \
#p0 = p0, sigma=self.fluxerrs, absolute_sigma=True, maxfev = 20000)
#print ("LSQ fit: Mstar:", params[0], " Rstar", params[1], "logMacc ", \
# params[2], "R_out", params[3])
lam = np.linspace(3000, 25000, 2000)
#flux_disk = self._model_accretion_disk2(lam, params[0], params[1], params[2], params[3])
if plot:
plt.clf()
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0, label="Measurements")
#plt.plot(lam, flux_disk, lw=3)
plt.xlabel("Wavelength [$\\mu$m]")
plt.ylabel("Flux [erg/cm$^2$/s]")
plt.ylim(np.nanmin(self.fluxes)*0.9, np.nanmax(self.fluxes)*1.2)
plt.legend()
name = self._get_save_path(None, "fluxes_obs_disk")
plt.savefig(name, dpi=200)
print ("Saved fit as %s"%name)
def initialize(self, plot=False):
'''
Will transform the magnitudes to fluxes and use the distance to the object to
calculate the luminosity at each wavelength.
'''
if (not os.path.isdir(self.plotdir)):
os.makedirs(self.plotdir)
print ("Created plot directory %s"%self.plotdir)
#Directory where to store the results
if (not os.path.isdir(self.resdir)):
os.makedirs(self.resdir)
print ("Created result directory %s"%(self.resdir))
self.resfile = os.path.join(self.resdir, self.model + os.path.basename(self.resfile))
# generate the data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.wls, self.fluxes, self.fluxerrs = self._band2flux()
#Plot the raw fluxes before correcting them.
'''if (plot):
plt.figure(figsize=(8,6))
plt.errorbar(self.wls, self.fluxes, yerr=self.fluxerrs, marker="o", lw=0)
for i in range(len(self.wls)):
plt.text(self.wls[i], self.fluxes[i]*1.01, self.bands[i].split(",")[-1], alpha=.4)
name = self._get_save_path(None, "fluxes_observed")
plt.yscale("log")
plt.xlabel("Wavelength [A]")
plt.ylabel("log (Flux/[erg/cm2/s])")
plt.tight_layout()
plt.savefig(name, dpi=200)'''
if not self.distMpc is None and self.distMpc !=0:
print ("Using distance to the source of %.1e Mpc"%self.distMpc)
fluxFactor = (4*np.pi*((self.distMpc*u.Mpc).to(u.cm) )**2).value
elif (self.distMpc is None or self.distMpc==0 )and (not self.z is None and self.z != 0):
self.distMpc = self.cosmo.luminosity_distance(self.z)
#Compute the flux multiplication factor for the object if it is at distance distMpc
#We transform that to cm, as the flux is in erg cm-2 s-1
fluxFactor = (4*np.pi*(self.distMpc.to(u.cm) )**2).value
else: # self.distMpc is None and self.z is None:
#Here we do not use any multiplication flux factor
print ("Warning: no redshift or distance provided!")
fluxFactor = 1
self.fluxes = self.fluxes * fluxFactor
self.fluxerrs = self.fluxerrs * fluxFactor
self._initialize_parameters(plot)
def run(self):
'''
Runs the main MCMC process.
Retrieves the priors, the likelihood process and computes the posterior probability.
'''
xs = self.wls
ys = self.fluxes
errs = self.fluxerrs
if self.model == "BlackBody":
p0 = np.array([ self.initT1, self.initR1])
sigs = np.array([self.initT1*0.2, self.initR1*0.2])
elif self.model == "BlackBody_Av":
p0 = np.array([ self.initT1, self.initR1, self.av_host])
sigs = np.array([2000, 10, 0.5])
elif self.model == "BlackBody2":
p0 = np.array([ self.initT1, self.initR1, self.initT2, self.initR2])
sigs = np.array([self.initT1*0.2, self.initR1*0.2, self.initT2*0.2, self.initR2*0.2])
elif self.model == "BlackBody2_Av":
p0 = np.array([ self.initT1, self.initR1, self.av_host, self.initT2, self.initR2])
sigs = np.array([2000, 5, 1, 2000, 5])
elif self.model == "PowerLaw":
p0 = np.array([ self.alpha, self.scale, self.av_host])
sigs = np.array([2, 3, 2])
elif self.model == "PowerLaw_BlackBody":
p0 = np.array([ self.alpha, self.scale, self.initT1, self.initR1])
sigs = np.array([2, 3, 2000, 2])
elif self.model == "Disk":
p0 = np.array([ self.Mstar, self.Rstar, self.logMacc, self.R_out])
sigs = np.array([0.1, 0.01, 1, 0.1])
print ("Initialized with p0", p0, " and sigmas ", sigs)
else:
print ("-------------------CRITICAL ERROR!----------------------")
print ("-------------------UNKNOWN model! %s----------------------"%self.model)
print ("-------------------CRITICAL ERROR!----------------------")
sys.exit()
ndim = len(p0)
# emsemble MCMC
p0s = emcee.utils.sample_ball(p0, sigs, self.nwalkers)
# initialize the ball of initial conditions
#Supports the threads=X argument for parallelization
sampler = emcee.EnsembleSampler(self.nwalkers, ndim, self._logposterior,\
args=(xs, ys, errs), threads=10)
pos, lnprob, state = sampler.run_mcmc(p0s, self.burnin)
print ("Burning phase finished")
sampler.reset()
pos, lnprob, state = sampler.run_mcmc(pos, self.niterations)
print ('Acceptance ratio', sampler.acceptance_fraction)
self.sampler = sampler
print ("MCMC main phase finished")
self._fill_output()
self._save_output()
def plot_corner_posteriors(self, savefile=None):
'''
Plots the corner plot of the MCMC results.
'''
if self.model == "BlackBody2":
labels=["T1", "R1", "T2", "R2"]
elif self.model.startswith("BlackBody"):
labels=["T1", "R1", "Av", "T2", "R2"]
elif self.model == "PowerLaw":
labels=["alpha", "scale", "Av"]
elif self.model == "PowerLaw_BlackBody":
labels = ["alpha", "scale", "T", "R"]
elif self.model == "Disk":
labels = ["Mstar", "Rstar", "logMacc", "R_out"]
ndim = len(self.sampler.flatchain[0,:])
chain = self.sampler
samples = chain.flatchain
samples = samples[:,0:ndim]
plt.figure(figsize=(8,8))
fig = corner.corner(samples, labels=labels[0:ndim], quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_kwargs={"fontsize": 12})
fig.suptitle("MJD: %.2f"%self.mjd)
name = self._get_save_path(savefile, "mcmc_posteriors")
plt.savefig(name)
plt.close("all")
plt.figure(figsize=(8,ndim*3))
for n in range(ndim):
plt.subplot(ndim,1,n+1)
chain = self.sampler.chain[:,:,n]
nwalk, nit = chain.shape
for i in np.arange(nwalk):
plt.plot(chain[i], lw=0.1)
plt.ylabel(labels[n])
plt.xlabel("Iteration")
name_walkers = self._get_save_path(savefile, "mcmc_walkers")
plt.tight_layout()
plt.savefig(name_walkers)
plt.close("all")
def plot_fit(self, lambdaFlambda=False):
'''
Plots the best fit model to the data.
'''
lam = np.linspace( np.min(self.wls) -1500 , np.max(self.wls) + 1500, 1000)
plt.clf()
plt.figure(figsize=(8,6))
mask_lims = self.fluxerrs<0
if lambdaFlambda:
factor_obs=self.wls
else:
factor_obs=np.ones_like(self.wls)
plt.errorbar(self.wls[~mask_lims], self.fluxes[~mask_lims]*factor_obs[~mask_lims], yerr=self.fluxerrs[~mask_lims]*factor_obs[~mask_lims], marker="o", color="b", lw=0, label="Measurements")
plt.errorbar(self.wls[mask_lims], self.fluxes[mask_lims]*factor_obs[mask_lims], yerr=self.fluxes[mask_lims]*0.2*factor_obs[mask_lims], fmt="o", color="b", uplims=True)
for i in range(len(self.wls)):
plt.text(self.wls[i], self.fluxes[i]*1.01*factor_obs[i], self.bands[i], alpha=.4, fontsize=8)
if self.model == "BlackBody":
fluxbb = self._model(lam, (self.T, self.R))
if lambdaFlambda:
factor = lam
else:
factor = np.ones_like(lam)
plt.plot(lam, fluxbb*factor, "k-", label="BB fit")
plt.title("T: %d K R:%d R$_{\odot}$ Lumiosity %.2e L$_{\odot}$"%(self.T, self.R, self.L))
elif self.model == "BlackBody_Av":
fluxbb = self._model(lam, (self.T, self.R))
fluxbb_red = self._model_av_r(lam, (self.T, self.R, self.Av))
plt.plot(lam, fluxbb, "k-", label="BB fit")
plt.plot(lam, fluxbb_red, "red", label="BB fit + reddening")
plt.title("T: %.1f K R:%.1f R$_{\odot}$ Lumiosity %.1e L$_{\odot}$ Av: %.2f"%(np.round(self.T,0), np.round(self.R,0), np.round(self.L,1), self.Av))
elif self.model == "BlackBody2_Av":
fluxbb_red = self._model2_av(lam, (self.T, self.R, self.Av))
fluxbb_secondary_red = self._model2_av(lam, (self.Tsec, self.Rsec, self.Av))
fluxbb_with_seconday = self._model2_av(lam, (self.T, self.R, self.Av, self.Tsec, self.Rsec))
plt.plot(lam, fluxbb_red, "k-", label="BB1 fit + reddening")
plt.plot(lam, fluxbb_secondary_red, "k--", label="BB2 fit + reddening")
plt.plot(lam, fluxbb_with_seconday, "green", label="BB1 + BB2")
plt.title("T: %.1f K R:%.1f R$_{\odot}$ Lumiosity %.1e L$_{\odot}$ Av: %.2f\n T2: %.1f R2: %.1f"%(self.T, \
self.R, self.L, self.Av, self.Tsec, self.Rsec))
elif self.model == "BlackBody2":
fluxbb_primary = self._model(lam, (self.T, self.R))
fluxbb_secondary = self._model(lam, (self.Tsec, self.Rsec))
fluxbb_with_seconday = self._model2_r(lam, (self.T, self.R, self.Tsec, self.Rsec))
plt.plot(lam, fluxbb_primary, "k-", label="BB1 fit")
plt.plot(lam, fluxbb_secondary, "k--", label="BB2 fit")
plt.plot(lam, fluxbb_with_seconday, "green", label="BB1 + BB2")
plt.title("T: %d K R:%d R$_{\odot}$ T2: %d R2: %d"%( self.T, \
self.R, self.Tsec, self.Rsec))
elif self.model == "PowerLaw":
flux = self._model_powerlaw(lam, (self.alpha, self.scale, self.Av))
plt.plot(lam, flux, "k-", label="PowerLaw + reddening")
plt.title("$\\alpha$: %.1f Av: %.2f"%(self.alpha, self.Av))
elif self.model == "PowerLaw_BlackBody":
flux = self._model_powerlaw_bb(lam, (self.alpha, self.scale, self.T, self.R))
flux_pw = self._model_powerlaw(lam, (self.alpha, self.scale, 0))
flux_bb = self._model(lam, (self.T, self.R))
plt.plot(lam, flux, "k-", label="PowerLaw + BlackBody")
plt.plot(lam, flux_pw, "b--", label="PowerLaw")
plt.plot(lam, flux_bb, "g:", label="BlackBody")
plt.title("$\\alpha$: %.1f scale: %.2e T: %.1f R:%.1f"%(self.alpha, self.scale, self.T, self.R))
elif self.model == "Disk":
fluxdisk = self._model_accretion_disk(lam, (self.Mstar, self.Rstar, self.logMacc, self.R_out))
plt.plot(lam, fluxdisk, "k-", label="Disk fit")
plt.title("M:%.3f M$_{\\odot}$ R:%.3f R$_{\odot}$ M$_{acc}$:%.2f R_out: %.2f"%(self.Mstar, self.Rstar, self.logMacc, self.R_out))
ymin, ymax = plt.ylim()
#plt.ylim(np.max([ymin, np.min(self.fluxes)*0.01]), ymax)
plt.xlabel("Wavelength [$\\AA$]")
if (lambdaFlambda):
plt.ylabel("$\\lambda F_{\\lambda}$ [erg/s]")
plt.ylim(ymin=np.min(self.fluxes*factor_obs) * 0.1)
else:
plt.ylabel("$F_{\\lambda}$ [erg/s/$\\AA$]")
plt.ylim(ymin=np.min(self.fluxes) * 0.1)
plt.yscale("log")
plt.legend()
name = self._get_save_path(None, "mcmc_best_fit_model")
plt.savefig(name)
plt.close("all")
def write_fit_params(self):
'''
Write the best fit parameters of the model to the standard output.
'''
if self.model.startswith("BlackBody"):
#Prints the best parameters
print ('''
Temperature: \t %.3f -%.3f +%.3f K
Radius: \t\t %.2e -%.2e +%.2e R$_{\odot}$
Luminosity: \t %.3e -%.3e +%.3e L$_{\odot}$'''%(\
self.T, self.Terr1, self.Terr2, \
self.R, self.Rerr1, self.Rerr2, \
self.L, self.Lerr1, self.Lerr2))
if self.model == "BlackBody_Av":
print (" Av: \t\t\t %.1f -%.1f +%.1f mag"%(self.Av, self.Averr1, self.Averr2))
if self.model == "BlackBody2":
print (" Temperature2: %.1f -%.1f +%.1f K"%(self.Tsec, self.Tsecerr1, self.Tsecerr2))
print (" Radius2: %.2e -%.2e +%.2e R$_{\odot}$"%(self.Rsec, self.Rsecerr1, self.Rsecerr2))
print (" Luminosity2 %.3e -%.3e +%.3e L$_{\odot}$"%(self.Lsec, self.Lsecerr1, self.Lsecerr2))
if self.model == "BlackBody2_Av":
print (" Av: %.1f -%.1f +%.1f mag"%(self.Av, self.Averr1, self.Averr2))
print (" Temperature2: %.1f -%.1f +%.1f K"%(self.Tsec, self.Tsecerr1, self.Tsecerr2))
print (" Radius2: %.1f -%.1f +%.1f R$_{\odot}$"%(self.Rsec, self.Rsecerr1, self.Rsecerr2))
if (self.model == "PowerLaw"):
print ('''
alpha: %.2f -%.2f +%.2f
Scale : %.2e -%.2e +%.2e
Av %.2f -%.2f +%.2f'''%(\
self.alpha, self.alphaerr1, self.alphaerr2, \
self.scale, self.scaleerr1, self.scaleerr2, \
self.Av, self.Averr1, self.Averr2))
if (self.model == "PowerLaw_BlackBody"):
print ('''
alpha: %.2f -%.2f +%.2f
Scale (R): %.2e -%.2e +%.2e
T %.2f -%.2f +%.2f
R %.2f -%.2f +%.2f '''%(\
self.alpha, self.alphaerr1, self.alphaerr2, \
self.scale, self.scaleerr1, self.scaleerr2,\
self.T, self.Terr1, self.Terr2,\
self.R, self.Rerr1, self.Rerr2 ))
if (self.model == "Disk"):
print ('''
Mstar: %.3f$_{-%.3f}^{+%.3f}$
Rstar (10^8 cm): %.3f -%.3f +%.3f
logMacc %.3f$_{-%.3f}^{+%.3f}$
R_out %.3f$_{-%.3f}^{+%.3f}$ '''%(\
self.Mstar, self.Mstarerr1, self.Mstarerr2, \
self.Rstar*(u.Rsun.to(u.cm))/1e8, self.Rstarerr1*(u.Rsun.to(u.cm))/1e8, self.Rstarerr2*(u.Rsun.to(u.cm))/1e8,\
self.logMacc, self.logMaccerr1, self.logMaccerr2,\
self.R_out, self.R_outerr1, self.R_outerr2 ))
| mit | -9,118,717,144,658,973,000 | 39.41373 | 196 | 0.504743 | false |
disco-framework/disco | priv/general/components/gui/gui.py | 1 | 18070 | #!/usr/bin/python
import sys
from PyQt4 import QtCore, QtGui
from ui_mainview import Ui_MainWindow
import json
from jsonreader import JsonReader
##################################################
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# status bar
self.labelProblemSpec = QtGui.QLabel()
self.labelProblemTime = QtGui.QLabel()
self.labelCurrentRound = QtGui.QLabel()
self.labelWorkerInput = QtGui.QLabel()
self.ui.statusbar.addWidget(self.labelProblemSpec, 1)
self.ui.statusbar.addWidget(self.labelProblemTime, 1)
self.ui.statusbar.addWidget(self.labelCurrentRound, 1)
self.ui.statusbar.addWidget(self.labelWorkerInput, 1)
# set menu shortcuts
self.ui.actionLoadGameState.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+O")))
self.ui.actionSaveGameState.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+S")))
self.ui.actionQuit.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+Q")))
self.ui.actionStartRound.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+R")))
self.ui.actionAddScores.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+A")))
self.ui.actionKillAllWorkers.setShortcut(QtGui.QKeySequence(self.tr("Ctrl+K")))
self.DataCollector = JsonReader(self)
self.connect(self.DataCollector, QtCore.SIGNAL("received_data"), self.received)
self.connect(self.DataCollector, QtCore.SIGNAL("worker_updated"), self.update_worker)
self.connect(self.DataCollector, QtCore.SIGNAL("round_started"), self.start_round)
self.connect(self.DataCollector, QtCore.SIGNAL("round_ended"), self.end_round)
self.connect(self.DataCollector, QtCore.SIGNAL("worker_input_changed"), self.update_worker_input)
self.connect(self.DataCollector, QtCore.SIGNAL("problem_chosen"), self.choose_problem)
self.connect(self.DataCollector, QtCore.SIGNAL("all_data"), self.update_all)
self.connect(self.DataCollector, QtCore.SIGNAL("save_game_state_reply"), self.save_game_state_reply)
self.connect(self.DataCollector, QtCore.SIGNAL("load_game_state_reply"), self.load_game_state_reply)
self.DataCollector.start()
self.problemAnswerTime = 0
self.roundTimerRemaining = 0
self.roundTimer = QtCore.QTimer()
QtCore.QObject.connect(self.roundTimer, QtCore.SIGNAL("timeout()"), self.roundTimer_tick)
# file menu
QtCore.QObject.connect(self.ui.actionLoadGameState, QtCore.SIGNAL("triggered()"), self.btnLoadGameState_clicked)
QtCore.QObject.connect(self.ui.actionSaveGameState, QtCore.SIGNAL("triggered()"), self.btnSaveGameState_clicked)
QtCore.QObject.connect(self.ui.actionReloadAllData, QtCore.SIGNAL("triggered()"), self.btnReloadAllData_clicked)
QtCore.QObject.connect(self.ui.actionQuit, QtCore.SIGNAL("triggered()"), self.btnQuit_clicked)
# round menu
QtCore.QObject.connect(self.ui.actionStartRound, QtCore.SIGNAL("triggered()"), self.btnStartRound_clicked)
QtCore.QObject.connect(self.ui.actionAddScores, QtCore.SIGNAL("triggered()"), self.btnAddScores_clicked)
QtCore.QObject.connect(self.ui.actionKillAllWorkers, QtCore.SIGNAL("triggered()"), self.btnKillAllWorkers_clicked)
# worker tab
self.ui.tableWorker.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.ui.tableWorker.customContextMenuRequested.connect(self.tableWorker_requestContextMenu)
# io tab
QtCore.QObject.connect(self.ui.btnSend, QtCore.SIGNAL("clicked()"), self.btnSend_clicked)
QtCore.QObject.connect(self.ui.edtSend, QtCore.SIGNAL("returnPressed()"), self.btnSend_clicked)
# worker table header
thh = self.ui.tableWorker.horizontalHeader()
thh.setVisible(True)
thh.resizeSection(0, 50) # ranking group
thh.resizeSection(1, 60) # id
thh.resizeSection(2, 170) # name
thh.resizeSection(3, 230) # proposition
thh.resizeSection(4, 100) # points
thh.resizeSection(5, 50) # processed points
thh.resizeSection(6, 100) # problem points (accumulated over all rounds on this problem)
thh.setSortIndicator(1, QtCore.Qt.AscendingOrder)
tvh = self.ui.tableWorker.verticalHeader()
tvh.setVisible(True)
tvh.setResizeMode(QtGui.QHeaderView.Fixed)
self.reset_problem_list([])
self.worker_blocked = {}
def closeEvent(self, e):
self.send(json.dumps({'action': 'quit program'}))
self.DataCollector.terminate() # TODO: "This function is dangerous and its use is discouraged"
self.DataCollector.wait()
e.accept()
app.exit()
###############################
## main menu / buttons ##
###############################
## file menu
def btnLoadGameState_clicked(self):
fileName = str(QtGui.QFileDialog.getOpenFileName())
if fileName != "":
self.send(json.dumps({'action': 'load game state', 'file path': fileName}))
def btnSaveGameState_clicked(self):
fileName = str(QtGui.QFileDialog.getSaveFileName())
if fileName != "":
self.send(json.dumps({'action': 'save game state', 'file path': fileName}))
def btnReloadAllData_clicked(self):
self.send(json.dumps({'action': 'get all data'}))
def btnQuit_clicked(self):
self.close()
## problems menu
def btnChooseProblem_clicked(self, idx, action, oldChecked):
action.setChecked(oldChecked) # undo auto check
self.send(json.dumps({'action': 'choose problem', 'problem idx': idx}))
## round menu
def btnStartRound_clicked(self):
self.send(json.dumps({'action': 'start round'}))
def btnAddScores_clicked(self):
self.send(json.dumps({'action': 'add scores'}))
self.ui.actionAddScores.setEnabled(False)
def btnKillAllWorkers_clicked(self):
self.send(json.dumps({'action': 'kill all workers'}))
## worker tab
def tableWorker_requestContextMenu(self, position):
workerId = str(self.ui.tableWorker.item(self.ui.tableWorker.currentRow(), 1).text())
# create menu
menu = QtGui.QMenu()
actApply = menu.addAction("&Apply proposition")
actBlock = None
actUnblock = None
if self.worker_blocked[workerId]:
actUnblock = menu.addAction("Un&block worker '" + workerId + "'")
else:
actBlock = menu.addAction("&Block worker '" + workerId + "'")
# execute menu synchronously
action = menu.exec_(self.ui.tableWorker.viewport().mapToGlobal(position))
if action != None:
if action == actApply:
if QtGui.QMessageBox.information(self, "Apply proposition", "Really apply proposition from " + workerId + "?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No) == QtGui.QMessageBox.Yes:
self.send(json.dumps({'action': 'apply proposition', 'worker id': workerId}))
elif action == actBlock:
self.send(json.dumps({'action': 'block worker', 'worker id': workerId}))
elif action == actUnblock:
self.send(json.dumps({'action': 'unblock worker', 'worker id': workerId}))
## io tab
def btnSend_clicked(self):
msg = self.ui.edtSend.text()
self.send(msg)
self.ui.edtSend.clear()
#######################
## Round timer ##
#######################
def roundTimer_tick(self):
self.roundTimerRemaining -= self.roundTimer.interval()
if self.roundTimerRemaining <= 0:
self.roundTimer.stop()
self.roundTimerRemaining = 0
self.labelProblemTime.setText("Answer time remaining\n " +
str(self.roundTimerRemaining/1000) + "s")
#######################
## JSON events ##
#######################
def update_worker(self, id, proposition, caption, score, processedScore, problemScore, blocked, working):
row = self.get_worker_table_row(id)
if proposition == None:
proposition = ""
if row != None:
self.update_worker_by_row(row, id, proposition, caption, score, processedScore, problemScore, blocked, working)
def start_round(self, round):
self.ui.actionStartRound.setEnabled(False)
self.ui.menuProblems.setEnabled(False)
self.ui.actionAddScores.setEnabled(False)
self.labelCurrentRound.setText("Round (running)\n " + str(round))
self.roundTimerRemaining = self.problemAnswerTime
self.roundTimer.start(100)
def end_round(self, round):
self.ui.actionStartRound.setEnabled(True)
self.ui.menuProblems.setEnabled(True)
self.ui.actionAddScores.setEnabled(True)
self.labelCurrentRound.setText("Round\n " + str(round))
self.roundTimerRemaining = 0
self.roundTimer_tick()
def update_worker_input(self, workerInput):
def format_wi_line(line): return shorten_string(28, line)
wiString = "\n".join(list(map(format_wi_line, workerInput)))
self.labelWorkerInput.setText("Worker input for next round:\n" + wiString)
def choose_problem(self, problemIdx):
self.roundTimer.stop()
self.reset_problem_list(self.problemList, problemIdx)
probDesc, probSpec, answerTime, startState = self.problemList[problemIdx]
self.labelProblemSpec.setText("Problem\n " + probDesc)
self.labelProblemTime.setText("Answer time\n " + str(answerTime/1000.0) + "s")
self.problemAnswerTime = answerTime
self.labelCurrentRound.setText("")
def update_all(self, running, workerList, problemList, problemIdx, round, workerInput, problemState):
self.clear_worker_table()
for id, name, group, proposition, caption, score, processedScore, problemScore, blocked, working in workerList:
self.add_worker(id, name, group, proposition, caption, score, processedScore, problemScore, blocked, working)
self.update_worker_input(workerInput)
if running:
self.start_round(round)
else:
self.end_round(round)
self.problemList = problemList
self.choose_problem(problemIdx)
def save_game_state_reply(self, result):
if result == "ok":
msg = "Game state successfully saved."
QtGui.QMessageBox.information(self, "Game state saved", msg, QtGui.QMessageBox.Ok)
else:
if result == "enoent" : msg = "No such file or directory!"
elif result == "enotdir": msg = "Not a directory!"
elif result == "enospc" : msg = "No space left on device!"
elif result == "eacces" : msg = "Permission denied!"
elif result == "eisdir" : msg = "Illegal operation on a directory!"
else : msg = "Unknown error: " + result
QtGui.QMessageBox.warning(self, "Error saving game state", msg, QtGui.QMessageBox.Ok)
def load_game_state_reply(self, result):
if result == "ok":
msg = "Game state successfully loaded."
QtGui.QMessageBox.information(self, "Game state loaded", msg, QtGui.QMessageBox.Ok)
else:
if result == "eformat": msg = "Invalid file format!"
elif result == "enoent" : msg = "No such file or directory!"
elif result == "enotdir": msg = "Not a directory!"
elif result == "eacces" : msg = "Permission denied!"
elif result == "eisdir" : msg = "Illegal operation on a directory!"
else : msg = "Unknown error: " + result
QtGui.QMessageBox.warning(self, "Error loading game state", msg, QtGui.QMessageBox.Ok)
#############################
## private functions ##
#############################
def send(self, msg):
self.ui.txtRecv.appendHtml("<span style='font-weight:bold;color:red'>send:</span> "
+ escape_html(msg).rstrip("\n").replace("\n","<br />"))
print(msg)
sys.stdout.flush()
def received(self, msg):
self.ui.txtRecv.appendHtml("<span style='font-weight:bold;color:blue'>recv:</span> "
+ escape_html(msg).rstrip("\n").replace("\n","<br />"))
def get_worker_table_row(self, id):
for row in range(0, self.ui.tableWorker.rowCount()):
if self.ui.tableWorker.item(row, 1).text() == id:
return row
return None
def clear_worker_table(self):
self.worker_blocked = {}
self.ui.tableWorker.clearContents()
self.ui.tableWorker.setRowCount(0)
def add_worker(self, id, name, group, proposition, propCaption, score, processedScore, problemScore, blocked, working):
if proposition == None:
proposition = ""
self.worker_blocked[id] = blocked != "no"
row = self.ui.tableWorker.rowCount()
self.ui.tableWorker.setRowCount(row + 1)
self.ui.tableWorker.setSortingEnabled(False)
item = QtGui.QTableWidgetItem()
item.setText(group)
self.ui.tableWorker.setItem(row, 0, item)
item = QtGui.QTableWidgetItem()
item.setText(id)
self.ui.tableWorker.setItem(row, 1, item)
item = QtGui.QTableWidgetItem()
item.setText(name)
self.ui.tableWorker.setItem(row, 2, item)
item = QtGui.QTableWidgetItem()
self.ui.tableWorker.setItem(row, 3, item)
item = CustomTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ui.tableWorker.setItem(row, 4, item)
item = CustomTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ui.tableWorker.setItem(row, 5, item)
item = CustomTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
self.ui.tableWorker.setItem(row, 6, item)
self.update_worker_by_row(row, id, proposition, propCaption, score, processedScore, problemScore, blocked, working)
self.ui.tableWorker.setSortingEnabled(True)
def update_worker_by_row(self, row, id, proposition, propCaption, score, processedScore, problemScore, blocked, working):
isBlocked = blocked != "no"
blockedIdx = blocked["idx"] if "idx" in blocked else 0
self.worker_blocked[id] = isBlocked
self.ui.tableWorker.setSortingEnabled(False)
brush = QtGui.QBrush(QtGui.QColor(190, 190, 190))
if self.worker_blocked[id]:
brush.setStyle(QtCore.Qt.SolidPattern)
else:
brush.setStyle(QtCore.Qt.NoBrush)
self.ui.tableWorker.item(row, 0).setBackground(brush)
self.ui.tableWorker.item(row, 1).setBackground(brush)
self.ui.tableWorker.item(row, 2).setBackground(brush)
item = self.ui.tableWorker.item(row, 3)
item.setText(propCaption)
item.setBackground(brush)
item = self.ui.tableWorker.item(row, 4)
item.setText(str(score))
item.setCustomSortData(isBlocked, {False: int(score), True: blockedIdx}[isBlocked])
item.setBackground(brush)
item = self.ui.tableWorker.item(row, 5)
item.setText(str(processedScore))
item.setCustomSortData(isBlocked, {False: int(processedScore), True: blockedIdx}[isBlocked])
item.setBackground(brush)
item = self.ui.tableWorker.item(row, 6)
item.setText(str(problemScore))
item.setCustomSortData(isBlocked, {False: int(problemScore), True: blockedIdx}[isBlocked])
item.setBackground(brush)
if self.ui.tableWorker.cellWidget(row, 2) == None:
if working:
self.ui.tableWorker.setCellWidget(row, 2, WorkingWidget(self))
else:
if not working:
self.ui.tableWorker.removeCellWidget(row, 2)
self.ui.tableWorker.setSortingEnabled(True)
def reset_problem_list(self, lst, checkedIdx=None):
self.problemList = lst
self.ui.menuProblems.clear()
if lst == []:
action = QtGui.QAction(self)
action.setText("--- no problems ---")
action.setEnabled(False)
self.ui.menuProblems.addAction(action)
else:
for idx, (description, spec, answerTime, state) in enumerate(lst):
action = QtGui.QAction(self)
action.setText(description + "\t" + str(answerTime/1000.0) + "s")
action.setCheckable(True)
if checkedIdx == idx:
action.setChecked(True)
QtCore.QObject.connect(action, QtCore.SIGNAL("triggered()"),
lambda i=idx, a=action, chk=(checkedIdx==idx):
self.btnChooseProblem_clicked(i, a, chk))
self.ui.menuProblems.addAction(action)
##################################################
class WorkingWidget(QtGui.QLabel):
def __init__(self, parent=None):
super(WorkingWidget, self).__init__(parent)
self.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
movie = QtGui.QMovie("./gears.gif")
self.setMovie(movie)
movie.start()
##################################################
class CustomTableWidgetItem(QtGui.QTableWidgetItem):
def __init__(self):
# call custom constructor with item type 'UserType'
QtGui.QTableWidgetItem.__init__(self, QtGui.QTableWidgetItem.UserType)
self.blocked = False
self.sortKey = 0
def setCustomSortData(self, blocked, sortKey):
self.blocked = blocked
self.sortKey = sortKey
# override the 'less than' operator
def __lt__(self, other):
if self.blocked == other.blocked:
return self.sortKey > other.sortKey
else:
return self.blocked < other.blocked
##################################################
def shorten_string(chars, string):
return (string[:(chars-3)] + '...') if len(string) > chars else string
def escape_html(str):
return str.replace("&","&").replace(">",">").replace("<","<")
##################################################
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
win = MainWindow()
win.show()
sys.exit(app.exec_())
| apache-2.0 | -6,686,924,166,606,951,000 | 38.714286 | 124 | 0.639236 | false |
au9ustine/elrond | elrond/aws/s3.py | 1 | 3432 | import os
import json
import threading
import boto3
from boto3.s3.transfer import S3Transfer
from elrond.crypto import get_file_digest
DEFAULT_CHUNK_SIZE = 64 * 1024 * 1024
ELROND_S3_SINGLETON_CLIENT = None
ELROND_S3_SUPPORTED_REGIONS = [
'EU',
'eu-west-1',
'us-west-1',
'us-west-2',
'ap-south-1',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'sa-east-1',
'cn-north-1',
'eu-central-1'
]
def analyse(file_path):
res_st = os.stat(file_path, follow_symlinks=True)
return {
'mode': res_st.st_mode,
'atime': res_st.st_atime,
'mtime': res_st.st_mtime,
'ctime': res_st.st_ctime,
'size': res_st.st_size,
'digest': {
'algorithm': 'sha256',
'value': get_file_digest(file_path)
}
}
def update_metadata(file_path, metadata):
os.chmod(file_path, metadata['mode'])
os.utime(file_path, (metadata['atime'], metadata['mtime']))
def metadata2str(metadata):
return json.dumps(metadata,separators=(',', ':'))
def str2metadata(metadata_str):
return json.loads(metadata_str)
def chunk(stream, chuck_size=DEFAULT_CHUNK_SIZE):
for block in iter(lambda:stream.read(chuck_size),b''):
yield block
def get_client():
global ELROND_S3_SINGLETON_CLIENT
if ELROND_S3_SINGLETON_CLIENT is None:
ELROND_S3_SINGLETON_CLIENT = boto3.client('s3')
return ELROND_S3_SINGLETON_CLIENT
def get_buckets():
client = get_client()
return [bucket['Name'] for bucket in client.list_buckets()['Buckets']]
def get_bucket(bucket_name):
client = get_client()
if bucket_name in get_buckets():
location = client.get_bucket_location(
Bucket=bucket_name
)['LocationConstraint']
return (bucket_name, location)
else:
location = os.environ['AWS_DEFAULT_REGION']
assert location in ELROND_S3_SUPPORTED_REGIONS
res = client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': location
}
)
return (bucket_name, location)
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
def upload(bucket_name, file_path, key_name):
file_metadata = analyse(file_path)
if file_metadata['size'] > 100 * 1024 * 1024:
multipart_mode = True
client = get_client()
if multipart_mode:
pass
else:
transfer = S3Transfer(client)
transfer.upload_file(
file_path, bucket_name, key_name,
extra_args={
'ACL': 'private',
'Metadata': metadata2str(file_metadata),
'ContentType': 'application/octet-stream'
},
callback=ProgressPercentage(file_path))
| mit | 4,340,258,530,075,880,400 | 27.363636 | 74 | 0.586247 | false |
cerrno/neurokernel | examples/timing/run_gpu_slow.py | 2 | 1898 | #!/usr/bin/env python
"""
Run timing test (GPU) scaled over number of ports.
"""
import csv
import glob
import multiprocessing as mp
import os
import re
import subprocess
import sys
import numpy as np
from neurokernel.tools.misc import get_pids_open
try:
from subprocess import DEVNULL
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
out_file = sys.argv[1]
script_name = 'timing_demo_gpu_slow.py'
trials = 3
lpus = 2
def check_and_print_output(*args):
for i in xrange(5):
# CUDA < 7.0 doesn't properly clean up IPC-related files; since
# these can cause problems, we manually remove them before launching
# each job:
ipc_files = glob.glob('/dev/shm/cuda.shm*')
for ipc_file in ipc_files:
# Only remove files that are not being held open by any processes:
if not get_pids_open(ipc_file):
try:
os.remove(ipc_file)
except:
pass
try:
out = subprocess.check_output(*args, env=os.environ, stderr=DEVNULL)
except Exception as e:
out = e.output
if 'error' not in out:
break
print out,
return out
pool = mp.Pool(1)
results = []
for spikes in np.linspace(50, 15000, 25, dtype=int):
for i in xrange(trials):
r = pool.apply_async(check_and_print_output,
[['srun', '-n', '1', '-c', str(lpus+2),
'-p', 'huxley',
'--gres=gpu:%s' % lpus,
'python', script_name,
'-u', str(lpus), '-s', str(spikes),
'-g', '0', '-m', '50']])
results.append(r)
f = open(out_file, 'w', 0)
w = csv.writer(f)
for r in results:
w.writerow(r.get().strip('[]\n\"').split(', '))
f.close()
| bsd-3-clause | 5,892,850,443,696,471,000 | 26.507246 | 80 | 0.53372 | false |
hsoft/pluginbuilder | pluginbuilder/util.py | 1 | 12467 | import os, sys, zipfile, time
from modulegraph.find_modules import PY_SUFFIXES
from modulegraph.modulegraph import os_listdir
import macholib.util
def os_path_islink(path):
"""
os.path.islink with zipfile support.
Luckily zipfiles cannot contain symlink, therefore the implementation is
trivial.
"""
return os.path.islink(path)
def os_readlink(path):
"""
os.readlink with zipfile support.
Luckily zipfiles cannot contain symlink, therefore the implementation is
trivial.
"""
return os.readlink(path)
def os_path_isdir(path):
"""
os.path.isdir that understands zipfiles.
Assumes that you're checking a path the is the result of os_listdir and
might give false positives otherwise.
"""
while path.endswith('/') and path != '/':
path = path[:-1]
zf, zp = path_to_zip(path)
if zf is None:
return os.path.isdir(zp)
else:
zip = zipfile.ZipFile(zf)
try:
info = zip.getinfo(zp)
except KeyError:
return True
else:
# Not quite true, you can store information about directories in
# zipfiles, but those have a lash at the end of the filename
return False
def copy_resource(source, destination, dry_run=0):
"""
Copy a resource file into the application bundle
"""
if os.path.isdir(source):
# XXX: This is wrong, need to call ourselves recursively
if not dry_run:
if not os.path.exists(destination):
os.mkdir(destination)
for fn in os_listdir(source):
copy_resource(os.path.join(source, fn),
os.path.join(destination, fn), dry_run=dry_run)
else:
copy_file_data(source, destination, dry_run=dry_run)
def copy_file_data(source, destination, dry_run=0):
zf, zp = path_to_zip(source)
if zf is None:
data = open(zp,'rb').read()
else:
data = get_zip_data(zf, zp)
if not dry_run:
fp = open(destination, 'wb')
fp.write(data)
fp.close()
def get_zip_data(path_to_zip, path_in_zip):
zf = zipfile.ZipFile(path_to_zip)
return zf.read(path_in_zip)
def path_to_zip(path):
"""
Returns (pathtozip, pathinzip). If path isn't in a zipfile pathtozip
will be None
"""
orig_path = path
from distutils.errors import DistutilsFileError
if os.path.exists(path):
return (None, path)
else:
rest = ''
while not os.path.exists(path):
path, r = os.path.split(path)
if not path:
raise DistutilsFileError("File doesn't exist: %s"%(orig_path,))
rest = os.path.join(r, rest)
if not os.path.isfile(path):
# Directory really doesn't exist
raise DistutilsFileError("File doesn't exist: %s"%(orig_path,))
try:
zf = zipfile.ZipFile(path)
except zipfile.BadZipfile:
raise DistutilsFileError("File doesn't exist: %s"%(orig_path,))
if rest.endswith('/'):
rest = rest[:-1]
return path, rest
def get_mtime(path, mustExist=True):
"""
Get mtime of a path, even if it is inside a zipfile
"""
try:
return os.stat(path).st_mtime
except os.error:
from distutils.errors import DistutilsFileError
try:
path, rest = path_to_zip(path)
except DistutilsFileError:
if not mustExist:
return -1
raise
zf = zipfile.ZipFile(path)
info = zf.getinfo(rest)
return time.mktime(info.date_time + (0, 0, 0))
def newer(source, target):
"""
distutils.dep_utils.newer with zipfile support
"""
msource = get_mtime(source)
mtarget = get_mtime(target, mustExist=False)
return msource > mtarget
def is_python_package(path):
"""Returns whether `path` is a python package (has a __init__.py(c|o) file).
"""
if os_path_isdir(path):
for p in os_listdir(path):
if p.startswith('__init__.') and p[8:] in {'.py', '.pyc', '.pyo'}:
return True
return False
def make_exec(path):
mask = os.umask(0)
os.umask(mask)
os.chmod(path, os.stat(path).st_mode | (0o111 & ~mask))
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def mergecopy(src, dest):
return macholib.util.mergecopy(src, dest)
def mergetree(src, dst, condition=None, copyfn=mergecopy):
"""Recursively merge a directory tree using mergecopy()."""
return macholib.util.mergetree(src, dst, condition=condition, copyfn=copyfn)
def move(src, dst):
return macholib.util.move(src, dst)
LOADER = """
def __load():
import imp, os, sys, os.path
ext = %r
library_path = os.environ['LIBRARYPATH']
dynload_path = os.path.join(library_path, 'lib-dynload')
ext = os.path.join(dynload_path, ext)
if os.path.exists(ext):
mod = imp.load_dynamic(__name__, ext)
else:
raise ImportError(repr(ext) + " not found")
__load()
del __load
"""
def make_loader(fn):
return LOADER % fn
def byte_compile(py_files, optimize=0, force=0,
target_dir=None, verbose=1, dry_run=0,
direct=None):
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
from tempfile import mktemp
from distutils.util import execute, spawn
script_name = mktemp(".py")
if verbose:
print("writing byte-compilation script '%s'" % script_name)
if not dry_run:
script = open(script_name, "w")
script.write("""
from pluginbuilder.util import byte_compile
from modulegraph.modulegraph import *
files = [
""")
for f in py_files:
script.write(repr(f) + ",\n")
script.write("]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
target_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, target_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, "-O")
elif optimize == 2:
cmd.insert(1, "-OO")
spawn(cmd, verbose=verbose, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
verbose=verbose, dry_run=dry_run)
else:
from py_compile import compile
from distutils.dir_util import mkpath
for mod in py_files:
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
if mod.filename == mod.identifier:
cfile = os.path.basename(mod.filename)
dfile = cfile + (__debug__ and 'c' or 'o')
else:
cfile = mod.identifier.replace('.', os.sep)
if mod.packagepath:
dfile = cfile + os.sep + '__init__.py' + (__debug__ and 'c' or 'o')
else:
dfile = cfile + '.py' + (__debug__ and 'c' or 'o')
if target_dir:
cfile = os.path.join(target_dir, dfile)
if force or newer(mod.filename, cfile):
if verbose:
print("byte-compiling %s to %s" % (mod.filename, dfile))
if not dry_run:
mkpath(os.path.dirname(cfile))
suffix = os.path.splitext(mod.filename)[1]
if suffix in ('.py', '.pyw'):
zfile, pth = path_to_zip(mod.filename)
if zfile is None:
compile(mod.filename, cfile, dfile)
else:
fn = dfile + '.py'
open(fn, 'wb').write(get_zip_data(zfile, pth))
compile(mod.filename, cfile, dfile)
os.unlink(fn)
elif suffix in PY_SUFFIXES:
# Minor problem: This will happily copy a file
# <mod>.pyo to <mod>.pyc or <mod>.pyc to
# <mod>.pyo, but it does seem to work.
copy_file_data(mod.filename, cfile)
else:
raise RuntimeError \
("Don't know how to handle %r" % mod.filename)
else:
if verbose:
print("skipping byte-compilation of %s to %s" % \
(mod.filename, dfile))
SCMDIRS = {'CVS', '.svn', '.hg', '.git'}
def skipscm(ofn):
fn = os.path.basename(ofn)
if fn in SCMDIRS:
return False
return True
def iter_platform_files(path, is_platform_file=macholib.util.is_platform_file):
"""
Iterate over all of the platform files in a directory
"""
for root, dirs, files in os.walk(path):
for fn in files:
fn = os.path.join(root, fn)
if is_platform_file(fn):
yield fn
def copy_tree(src, dst,
preserve_mode=1,
preserve_times=1,
preserve_symlinks=0,
update=0,
verbose=0,
dry_run=0,
condition=None):
"""
Copy an entire directory tree 'src' to a new location 'dst'. Both
'src' and 'dst' must be directory names. If 'src' is not a
directory, raise DistutilsFileError. If 'dst' does not exist, it is
created with 'mkpath()'. The end result of the copy is that every
file in 'src' is copied to 'dst', and directories under 'src' are
recursively copied to 'dst'. Return the list of files that were
copied or might have been copied, using their output name. The
return value is unaffected by 'update' or 'dry_run': it is simply
the list of all files under 'src', with the names changed to be
under 'dst'.
'preserve_mode' and 'preserve_times' are the same as for
'copy_file'; note that they only apply to regular files, not to
directories. If 'preserve_symlinks' is true, symlinks will be
copied as symlinks (on platforms that support them!); otherwise
(the default), the destination of the symlink will be copied.
'update' and 'verbose' are the same as for 'copy_file'.
"""
assert isinstance(src, str), repr(src)
assert isinstance(dst, str), repr(dst)
from distutils.dir_util import mkpath
from distutils.file_util import copy_file
from distutils.dep_util import newer
from distutils.errors import DistutilsFileError
from distutils import log
if condition is None:
condition = skipscm
if not dry_run and not os_path_isdir(src):
raise DistutilsFileError("cannot copy tree '%s': not a directory" % src)
try:
names = os_listdir(src)
except os.error as xxx_todo_changeme:
(errno, errstr) = xxx_todo_changeme.args
if dry_run:
names = []
else:
raise DistutilsFileError("error listing files in '%s': %s" % (src, errstr))
if not dry_run:
mkpath(dst)
outputs = []
for n in names:
src_name = os.path.join(src, n)
dst_name = os.path.join(dst, n)
if (condition is not None) and (not condition(src_name)):
continue
if preserve_symlinks and os_path_islink(src_name):
link_dest = os_readlink(src_name)
log.info("linking %s -> %s", dst_name, link_dest)
if not dry_run:
if update and not newer(src, dst_name):
pass
else:
if os_path_islink(dst_name):
os.remove(dst_name)
os.symlink(link_dest, dst_name)
outputs.append(dst_name)
elif os_path_isdir(src_name):
outputs.extend(
copy_tree(src_name, dst_name, preserve_mode,
preserve_times, preserve_symlinks, update,
dry_run=dry_run, condition=condition))
else:
copy_file(src_name, dst_name, preserve_mode,
preserve_times, update, dry_run=dry_run)
outputs.append(dst_name)
return outputs
| mit | 2,080,846,094,289,265,200 | 30.722646 | 87 | 0.562365 | false |
jimcarreer/hpack | setup.py | 1 | 1691 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Get the version
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('hpack/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
version = match.group(1)
else:
raise RuntimeError("No version number found!")
# Stealing this from Kenneth Reitz
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = ['hpack']
setup(
name='hpack',
version=version,
description='Pure-Python HPACK header compression',
long_description=open('README.rst').read() + '\n\n' + open('HISTORY.rst').read(),
author='Cory Benfield',
author_email='[email protected]',
url='http://hyper.rtfd.org',
packages=packages,
package_data={'': ['LICENSE', 'README.rst', 'CONTRIBUTORS.rst', 'HISTORY.rst', 'NOTICES']},
package_dir={'hpack': 'hpack'},
include_package_data=True,
license='MIT License',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| mit | -773,770,130,712,185,600 | 29.196429 | 95 | 0.608516 | false |
CliMT/climt-future | climt/_lib/rrtmg_lw/setup.py | 1 | 2948 | from setuptools import setup, Extension
from Cython.Distutils import build_ext
# This line only needed if building with NumPy in Cython file.
from numpy import get_include
from os import system
import os
# compile the fortran modules without linking
module_list = [
'parkind.f90',
'parrrtm.f90',
'rrlw_cld.f90',
'rrlw_con.f90',
'rrlw_kg01.f90',
'rrlw_kg02.f90',
'rrlw_kg03.f90',
'rrlw_kg04.f90',
'rrlw_kg05.f90',
'rrlw_kg06.f90',
'rrlw_kg07.f90',
'rrlw_kg08.f90',
'rrlw_kg09.f90',
'rrlw_kg10.f90',
'rrlw_kg11.f90',
'rrlw_kg12.f90',
'rrlw_kg13.f90',
'rrlw_kg14.f90',
'rrlw_kg15.f90',
'rrlw_kg16.f90',
'rrlw_ncpar.f90',
'rrlw_ref.f90',
'rrlw_tbl.f90',
'rrlw_vsn.f90',
'rrlw_wvn.f90']
sources_list = [
'rrtmg_lw_cldprop.f90',
'rrtmg_lw_cldprmc.f90',
'rrtmg_lw_rtrn.f90',
'rrtmg_lw_rtrnmr.f90',
'rrtmg_lw_rtrnmc.f90',
'rrtmg_lw_setcoef.f90',
'rrtmg_lw_taumol.f90',
'rrtmg_lw_rad.nomcica.f90',
'mcica_random_numbers.f90',
'rrtmg_lw_init.f90',
'mcica_subcol_gen_lw.f90',
'rrtmg_lw_rad.f90',
'rrtmg_lw_c_binder.f90']
unoptimised_sources_list = [
'rrtmg_lw_k_g.f90',
]
object_file_list = []
fc = os.getenv('FC', 'gfortran ')
fflags = os.getenv('FFLAGS', ' -fPIC -fno-range-check ')
cflags = os.getenv('CFLAGS', '-fPIC')
f_opt_flags = os.getenv('CLIMT_OPTIMIZE_FLAG', '-O3')
f_no_opt_flags = os.getenv('CLIMT_NO_OPTIMIZE_FLAG', ' -O0 ')
ldflags = os.getenv('LDFLAGS', '-lgfortran')
print('Compiling Modules')
for module in module_list:
output_file = module[:-3]+'o'
object_file_list.append(output_file)
compilation_command = fc+module+' -c -o '+output_file+' '+f_opt_flags+fflags
print(compilation_command)
system(compilation_command)
print('Compiling Sources')
for source in sources_list:
output_file = source[:-3]+'o'
object_file_list.append(output_file)
compilation_command = fc+source+' -c -o '+output_file+' '+f_opt_flags+fflags
print(compilation_command)
system(compilation_command)
print('Compiling k coefficient tables')
for source in unoptimised_sources_list:
output_file = source[:-3]+'o'
object_file_list.append(output_file)
compilation_command = fc+source+' -c -o '+output_file+f_no_opt_flags+fflags
print(compilation_command)
system(compilation_command)
link_args_list = object_file_list + [ldflags]
ext_modules = [
Extension( # module name:
'_rrtm_lw',
# source file:
['_rrtm_lw.pyx'],
# other compile args for gcc
extra_compile_args=[cflags, f_opt_flags, ldflags],
# other files to link to
extra_link_args=link_args_list)]
setup(name='_rrtm_lw',
cmdclass={'build_ext': build_ext},
# Needed if building with NumPy.
# This includes the NumPy headers when compiling.
include_dirs=[get_include()],
ext_modules=ext_modules)
| bsd-3-clause | -8,972,440,418,118,426,000 | 25.8 | 80 | 0.637042 | false |
hachreak/invenio-accounts | invenio_accounts/models.py | 1 | 5487 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Database models for accounts."""
from __future__ import absolute_import, print_function
from datetime import datetime
from flask import current_app, session
from flask_security import RoleMixin, UserMixin
from invenio_db import db
from sqlalchemy.orm import validates
from sqlalchemy_utils import IPAddressType, Timestamp
userrole = db.Table(
'accounts_userrole',
db.Column('user_id', db.Integer(), db.ForeignKey(
'accounts_user.id', name='fk_accounts_userrole_user_id')),
db.Column('role_id', db.Integer(), db.ForeignKey(
'accounts_role.id', name='fk_accounts_userrole_role_id')),
)
"""Relationship between users and roles."""
class Role(db.Model, RoleMixin):
"""Role data model."""
__tablename__ = "accounts_role"
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
"""Role name."""
description = db.Column(db.String(255))
"""Role description."""
def __str__(self):
"""Return the name and description of the role."""
return '{0.name} - {0.description}'.format(self)
class User(db.Model, UserMixin):
"""User data model."""
__tablename__ = "accounts_user"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
"""User email."""
password = db.Column(db.String(255))
"""User password."""
active = db.Column(db.Boolean(name='active'))
"""Flag to say if the user is active or not ."""
confirmed_at = db.Column(db.DateTime)
"""When the user confirmed the email address."""
last_login_at = db.Column(db.DateTime)
"""When the user logged-in for the last time."""
current_login_at = db.Column(db.DateTime)
"""When user logged into the current session."""
last_login_ip = db.Column(IPAddressType, nullable=True)
"""Last user IP address."""
current_login_ip = db.Column(IPAddressType, nullable=True)
"""Current user IP address."""
login_count = db.Column(db.Integer)
"""Count how many times the user logged in."""
roles = db.relationship('Role', secondary=userrole,
backref=db.backref('users', lazy='dynamic'))
"""List of the user's roles."""
@validates('last_login_ip', 'current_login_ip')
def validate_ip(self, key, value):
"""Hack untrackable IP addresses."""
# NOTE Flask-Security stores 'untrackable' value to IPAddressType
# field. This incorrect value causes ValueError on loading
# user object.
if value == 'untrackable': # pragma: no cover
value = None
return value
def __str__(self):
"""Representation."""
return 'User <id={0.id}, email={0.email}>'.format(self)
class SessionActivity(db.Model, Timestamp):
"""User Session Activity model.
Instances of this model correspond to a session belonging to a user.
"""
__tablename__ = "accounts_user_session_activity"
sid_s = db.Column(db.String(255), primary_key=True)
"""Serialized Session ID. Used as the session's key in the kv-session
store employed by `flask-kvsession`.
Named here as it is in `flask-kvsession` to avoid confusion.
"""
user_id = db.Column(db.Integer, db.ForeignKey(
User.id, name='fk_accounts_session_activity_user_id'))
"""ID of user to whom this session belongs."""
user = db.relationship(User, backref='active_sessions')
ip = db.Column(db.String(80), nullable=True)
"""IP address."""
country = db.Column(db.String(3), nullable=True)
"""Country name."""
browser = db.Column(db.String(80), nullable=True)
"""User browser."""
browser_version = db.Column(db.String(30), nullable=True)
"""Browser version."""
os = db.Column(db.String(80), nullable=True)
"""User operative system name."""
device = db.Column(db.String(80), nullable=True)
"""User device."""
@classmethod
def query_by_expired(cls):
"""Query to select all expired sessions."""
lifetime = current_app.permanent_session_lifetime
expired_moment = datetime.utcnow() - lifetime
return cls.query.filter(cls.created < expired_moment)
@classmethod
def query_by_user(cls, user_id):
"""Query to select user sessions."""
return cls.query.filter_by(user_id=user_id)
@classmethod
def is_current(cls, sid_s):
"""Check if the session is the current one."""
return session.sid_s == sid_s
| gpl-2.0 | 6,014,488,001,262,512,000 | 31.087719 | 76 | 0.659194 | false |
khertan/KhtNotes | khtnotes/merge3/merge3.py | 1 | 18192 | # Copyright (C) 2005-2010 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#from __future__ import absolute_import
# mbp: "you know that thing where cvs gives you conflict markers?"
# s: "i hate that."
import errors
import patiencediff
import textfile
def intersect(ra, rb):
"""Given two ranges return the range where they intersect or None.
>>> intersect((0, 10), (0, 6))
(0, 6)
>>> intersect((0, 10), (5, 15))
(5, 10)
>>> intersect((0, 10), (10, 15))
>>> intersect((0, 9), (10, 15))
>>> intersect((0, 9), (7, 15))
(7, 9)
"""
# preconditions: (ra[0] <= ra[1]) and (rb[0] <= rb[1])
sa = max(ra[0], rb[0])
sb = min(ra[1], rb[1])
if sa < sb:
return sa, sb
else:
return None
def compare_range(a, astart, aend, b, bstart, bend):
"""Compare a[astart:aend] == b[bstart:bend], without slicing.
"""
if (aend - astart) != (bend - bstart):
return False
for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
if a[ia] != b[ib]:
return False
else:
return True
class Merge3(object):
"""3-way merge of texts.
Given BASE, OTHER, THIS, tries to produce a combined text
incorporating the changes from both BASE->OTHER and BASE->THIS.
All three will typically be sequences of lines."""
def __init__(self, base, a, b, is_cherrypick=False, allow_objects=False):
"""Constructor.
:param base: lines in BASE
:param a: lines in A
:param b: lines in B
:param is_cherrypick: flag indicating if this merge is a cherrypick.
When cherrypicking b => a, matches with b and base do not conflict.
:param allow_objects: if True, do not require that base, a and b are
plain Python strs. Also prevents BinaryFile from being raised.
Lines can be any sequence of comparable and hashable Python
objects.
"""
if not allow_objects:
textfile.check_text_lines(base)
textfile.check_text_lines(a)
textfile.check_text_lines(b)
self.base = base
self.a = a
self.b = b
self.is_cherrypick = is_cherrypick
def merge_lines(self,
name_a=None,
name_b=None,
name_base=None,
start_marker='<<<<<<<',
mid_marker='=======',
end_marker='>>>>>>>',
base_marker=None,
reprocess=False):
"""Return merge in cvs-like form.
"""
newline = '\n'
if len(self.a) > 0:
if self.a[0].endswith('\r\n'):
newline = '\r\n'
elif self.a[0].endswith('\r'):
newline = '\r'
if base_marker and reprocess:
raise errors.CantReprocessAndShowBase()
if name_a:
start_marker = start_marker + ' ' + name_a
if name_b:
end_marker = end_marker + ' ' + name_b
if name_base and base_marker:
base_marker = base_marker + ' ' + name_base
merge_regions = self.merge_regions()
if reprocess is True:
merge_regions = self.reprocess_merge_regions(merge_regions)
for t in merge_regions:
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield self.b[i]
elif what == 'conflict':
yield start_marker + newline
for i in range(t[3], t[4]):
yield self.a[i]
if base_marker is not None:
yield base_marker + newline
for i in range(t[1], t[2]):
yield self.base[i]
yield mid_marker + newline
for i in range(t[5], t[6]):
yield self.b[i]
yield end_marker + newline
else:
raise ValueError(what)
def merge(self):
"""Return merge"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield self.b[i]
elif what == 'conflict':
for i in range(t[3], t[4]):
yield self.a[i]
for i in range(t[5], t[6]):
yield self.b[i]
else:
raise ValueError(what)
def merge_annotated(self):
"""Return merge with conflicts, showing origin of lines.
Most useful for debugging merge.
"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
for i in range(t[1], t[2]):
yield 'u | ' + self.base[i]
elif what == 'a' or what == 'same':
for i in range(t[1], t[2]):
yield what[0] + ' | ' + self.a[i]
elif what == 'b':
for i in range(t[1], t[2]):
yield 'b | ' + self.b[i]
elif what == 'conflict':
yield '<<<<\n'
for i in range(t[3], t[4]):
yield 'A | ' + self.a[i]
yield '----\n'
for i in range(t[5], t[6]):
yield 'B | ' + self.b[i]
yield '>>>>\n'
else:
raise ValueError(what)
def merge_groups(self):
"""Yield sequence of line groups. Each one is a tuple:
'unchanged', lines
Lines unchanged from base
'a', lines
Lines taken from a
'same', lines
Lines taken from a (and equal to b)
'b', lines
Lines taken from b
'conflict', base_lines, a_lines, b_lines
Lines from base were changed to either a or b and conflict.
"""
for t in self.merge_regions():
what = t[0]
if what == 'unchanged':
yield what, self.base[t[1]:t[2]]
elif what == 'a' or what == 'same':
yield what, self.a[t[1]:t[2]]
elif what == 'b':
yield what, self.b[t[1]:t[2]]
elif what == 'conflict':
yield (what,
self.base[t[1]:t[2]],
self.a[t[3]:t[4]],
self.b[t[5]:t[6]])
else:
raise ValueError(what)
def merge_regions(self):
"""Return sequences of matching and conflicting regions.
This returns tuples, where the first value says what kind we
have:
'unchanged', start, end
Take a region of base[start:end]
'same', astart, aend
b and a are different from base but give the same result
'a', start, end
Non-clashing insertion from a[start:end]
Method is as follows:
The two sequences align only on regions which match the base
and both descendents. These are found by doing a two-way diff
of each one against the base, and then finding the
intersections between those regions. These "sync regions"
are by definition unchanged in both and easily dealt with.
The regions in between can be in any of three cases:
conflicted, or changed on only one side.
"""
# section a[0:ia] has been disposed of, etc
iz = ia = ib = 0
for zmatch, zend, amatch, aend, \
bmatch, bend in self.find_sync_regions():
matchlen = zend - zmatch
# invariants:
# matchlen >= 0
# matchlen == (aend - amatch)
# matchlen == (bend - bmatch)
len_a = amatch - ia
len_b = bmatch - ib
#len_base = zmatch - iz
# invariants:
# assert len_a >= 0
# assert len_b >= 0
# assert len_base >= 0
#print 'unmatched a=%d, b=%d' % (len_a, len_b)
if len_a or len_b:
# try to avoid actually slicing the lists
same = compare_range(self.a, ia, amatch,
self.b, ib, bmatch)
if same:
yield 'same', ia, amatch
else:
equal_a = compare_range(self.a, ia, amatch,
self.base, iz, zmatch)
equal_b = compare_range(self.b, ib, bmatch,
self.base, iz, zmatch)
if equal_a and not equal_b:
yield 'b', ib, bmatch
elif equal_b and not equal_a:
yield 'a', ia, amatch
elif not equal_a and not equal_b:
if self.is_cherrypick:
for node in self._refine_cherrypick_conflict(
iz, zmatch, ia, amatch,
ib, bmatch):
yield node
else:
yield 'conflict', \
iz, zmatch, ia, amatch, ib, bmatch
else:
raise AssertionError(
"can't handle a=b=base but unmatched")
ia = amatch
ib = bmatch
iz = zmatch
# if the same part of the base was deleted on both sides
# that's OK, we can just skip it.
if matchlen > 0:
# invariants:
# assert ia == amatch
# assert ib == bmatch
# assert iz == zmatch
yield 'unchanged', zmatch, zend
iz = zend
ia = aend
ib = bend
def _refine_cherrypick_conflict(self, zstart,
zend, astart, aend, bstart, bend):
"""When cherrypicking b => a, ignore matches with b and base."""
# Do not emit regions which match, only regions which do not match
matches = patiencediff.PatienceSequenceMatcher(None,
self.base[zstart:zend], self.b[bstart:bend]).get_matching_blocks()
last_base_idx = 0
last_b_idx = 0
last_b_idx = 0
yielded_a = False
for base_idx, b_idx, match_len in matches:
#conflict_z_len = base_idx - last_base_idx
conflict_b_len = b_idx - last_b_idx
if conflict_b_len == 0: # There are no lines in b which conflict,
# so skip it
pass
else:
if yielded_a:
yield ('conflict',
zstart + last_base_idx, zstart + base_idx,
aend, aend, bstart + last_b_idx, bstart + b_idx)
else:
# The first conflict gets the a-range
yielded_a = True
yield ('conflict', zstart + last_base_idx, zstart +
base_idx,
astart, aend, bstart + last_b_idx, bstart + b_idx)
last_base_idx = base_idx + match_len
last_b_idx = b_idx + match_len
if last_base_idx != zend - zstart or last_b_idx != bend - bstart:
if yielded_a:
yield ('conflict', zstart + last_base_idx, zstart + base_idx,
aend, aend, bstart + last_b_idx, bstart + b_idx)
else:
# The first conflict gets the a-range
yielded_a = True
yield ('conflict', zstart + last_base_idx, zstart + base_idx,
astart, aend, bstart + last_b_idx, bstart + b_idx)
if not yielded_a:
yield ('conflict', zstart, zend, astart, aend, bstart, bend)
def reprocess_merge_regions(self, merge_regions):
"""Where there are conflict regions, remove the agreed lines.
Lines where both A and B have made the same changes are
eliminated.
"""
for region in merge_regions:
if region[0] != "conflict":
yield region
continue
type, iz, zmatch, ia, amatch, ib, bmatch = region
a_region = self.a[ia:amatch]
b_region = self.b[ib:bmatch]
matches = patiencediff.PatienceSequenceMatcher(
None, a_region, b_region).get_matching_blocks()
next_a = ia
next_b = ib
for region_ia, region_ib, region_len in matches[:-1]:
region_ia += ia
region_ib += ib
reg = self.mismatch_region(next_a, region_ia, next_b,
region_ib)
if reg is not None:
yield reg
yield 'same', region_ia, region_len + region_ia
next_a = region_ia + region_len
next_b = region_ib + region_len
reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
if reg is not None:
yield reg
@staticmethod
def mismatch_region(next_a, region_ia, next_b, region_ib):
if next_a < region_ia or next_b < region_ib:
return 'conflict', None, None, next_a, region_ia, next_b, region_ib
def find_sync_regions(self):
"""Return a list of sync regions,where both descendents match the base.
Generates a list of (base1, base2, a1, a2, b1, b2). There is
always a zero-length sync region at the end of all the files.
"""
ia = ib = 0
amatches = patiencediff.PatienceSequenceMatcher(
None, self.base, self.a).get_matching_blocks()
bmatches = patiencediff.PatienceSequenceMatcher(
None, self.base, self.b).get_matching_blocks()
len_a = len(amatches)
len_b = len(bmatches)
sl = []
while ia < len_a and ib < len_b:
abase, amatch, alen = amatches[ia]
bbase, bmatch, blen = bmatches[ib]
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
i = intersect((abase, abase + alen), (bbase, bbase + blen))
if i:
intbase = i[0]
intend = i[1]
intlen = intend - intbase
# found a match of base[i[0], i[1]]; this may be less than
# the region that matches in either one
# assert intlen <= alen
# assert intlen <= blen
# assert abase <= intbase
# assert bbase <= intbase
asub = amatch + (intbase - abase)
bsub = bmatch + (intbase - bbase)
aend = asub + intlen
bend = bsub + intlen
# assert self.base[intbase:intend] == self.a[asub:aend], \
# (self.base[intbase:intend], self.a[asub:aend])
# assert self.base[intbase:intend] == self.b[bsub:bend]
sl.append((intbase, intend,
asub, aend,
bsub, bend))
# advance whichever one ends first in the base text
if (abase + alen) < (bbase + blen):
ia += 1
else:
ib += 1
intbase = len(self.base)
abase = len(self.a)
bbase = len(self.b)
sl.append((intbase, intbase, abase, abase, bbase, bbase))
return sl
def find_unconflicted(self):
"""Return a list of ranges in base that are not conflicted."""
am = patiencediff.PatienceSequenceMatcher(
None, self.base, self.a).get_matching_blocks()
bm = patiencediff.PatienceSequenceMatcher(
None, self.base, self.b).get_matching_blocks()
unc = []
while am and bm:
# there is an unconflicted block at i; how long does it
# extend? until whichever one ends earlier.
a1 = am[0][0]
a2 = a1 + am[0][2]
b1 = bm[0][0]
b2 = b1 + bm[0][2]
i = intersect((a1, a2), (b1, b2))
if i:
unc.append(i)
if a2 < b2:
del am[0]
else:
del bm[0]
return unc
def main(argv):
# as for diff3 and meld the syntax is "MINE BASE OTHER"
a = file(argv[1], 'rt').readlines()
base = file(argv[2], 'rt').readlines()
b = file(argv[3], 'rt').readlines()
m3 = Merge3(base, a, b)
#for sr in m3.find_sync_regions():
# print sr
# sys.stdout.writelines(m3.merge_lines(name_a=argv[1], name_b=argv[3]))
sys.stdout.writelines(m3.merge())
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| gpl-3.0 | -2,084,280,794,020,240,600 | 34.952569 | 79 | 0.484883 | false |
tschalch/pyTray | src/setup.py | 1 | 6933 | #!/usr/bin/env python
#this installer script uses InnoSetup to generate a complete Installer
from distutils.core import setup
import py2exe
import os, os.path, sys
import glob
#adding lib directory to module search path
libpath = os.path.abspath(os.path.dirname(sys.argv[0])) + "/lib"
sys.path.append(os.path.abspath(libpath))
includes = ["encodings", "encodings.latin_1",]
#options = {"py2exe": {"compressed": 1,
# "optimize": 2,
# "ascii": 1,
# "bundle_files": 1,
# "includes":includes}},
################################################################
# A program using wxPython
# The manifest will be inserted as resource into test_wx.exe. This
# gives the controls the Windows XP appearance (if run on XP ;-)
#
# Another option would be to store if in a file named
# test_wx.exe.manifest, and probably copy it with the data_files
# option.
#
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
RT_MANIFEST = 24
################################################################
# arguments for the setup() call
pyTray = dict(
script = "pytray.py",
other_resources = [(RT_MANIFEST, 1, manifest_template % dict(prog="pyTray"))],
dest_base = r"pyTray",
icon_resources = [(1,"files/images/icon.ico")])
zipfile = r"lib\shardlib"
options = {"py2exe": {"compressed": 1,
"optimize": 2}}
################################################################
import os
class InnoScript:
def __init__(self,
name,
lib_dir,
dist_dir,
windows_exe_files = [],
lib_files = [],
version = "1.0"):
self.lib_dir = lib_dir
self.dist_dir = dist_dir
if not self.dist_dir[-1] in "\\/":
self.dist_dir += "\\"
self.name = name
self.version = version
self.windows_exe_files = [self.chop(p) for p in windows_exe_files]
self.lib_files = [self.chop(p) for p in lib_files]
def chop(self, pathname):
assert pathname.startswith(self.dist_dir)
return pathname[len(self.dist_dir):]
def create(self, pathname="dist\\pytray.iss"):
self.pathname = pathname
ofi = self.file = open(pathname, "w")
print >> ofi, "; WARNING: This script has been created by py2exe. Changes to this script"
print >> ofi, "; will be overwritten the next time py2exe is run!"
print >> ofi, r"[Setup]"
print >> ofi, r"AppName=%s" % self.name
print >> ofi, r"AppVerName=%s %s" % (self.name, self.version)
print >> ofi, r"DefaultDirName={pf}\%s" % self.name
print >> ofi, r"DefaultGroupName=%s" % self.name
print >> ofi
print >> ofi, r"[Files]"
for path in self.windows_exe_files + self.lib_files:
print >> ofi, r'Source: "%s"; DestDir: "{app}\%s"; Flags: ignoreversion' % (path, os.path.dirname(path))
print >> ofi
print >> ofi, r"[Icons]"
for path in self.windows_exe_files:
print >> ofi, r'Name: "{group}\%s"; Filename: "{app}\%s"' % \
(self.name, path)
print >> ofi, 'Name: "{group}\Uninstall %s"; Filename: "{uninstallexe}"' % self.name
def compile(self):
try:
import ctypes
except ImportError:
try:
import win32api
except ImportError:
import os
os.startfile(self.pathname)
else:
print "Ok, using win32api."
win32api.ShellExecute(0, "compile",
self.pathname,
None,
None,
0)
else:
print "Cool, you have ctypes installed."
res = ctypes.windll.shell32.ShellExecuteA(0, "compile",
self.pathname,
None,
None,
0)
if res < 32:
raise RuntimeError, "ShellExecute failed, error %d" % res
################################################################
from py2exe.build_exe import py2exe
class build_installer(py2exe):
# This class first builds the exe file(s), then creates a Windows installer.
# You need InnoSetup for it.
def run(self):
# First, let py2exe do it's work.
py2exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
# create the Installer, using the files py2exe has created.
script = InnoScript("pytray",
lib_dir,
dist_dir,
self.windows_exe_files,
self.lib_files)
print "*** creating the inno setup script***"
script.create()
print "*** compiling the inno setup script***"
script.compile()
# Note: By default the final setup.exe will be in an Output subdirectory.
################################################################
setup(
description='Cryallization Management Software',
options = options,
# The lib directory contains everything except the executables and the python dll.
zipfile = zipfile,
windows = [pyTray],
# use out build_installer class as extended py2exe build command
cmdclass = {"py2exe": build_installer},
data_files=[(r"files", glob.glob(r"files/*.*")),
(r"files/test", glob.glob(r"files/test/*.*")),
(r"files/Dtd", glob.glob(r"files/Dtd/*.*")),
(r"files/fonts", glob.glob(r"files/fonts/*.*")),
(r"files/images", glob.glob(r"files/images/*.*")),
],
author='Thomas Schalch',
author_email='[email protected]',
packages = ["gui","dataStructures","util","test"],
)
| bsd-3-clause | 2,843,598,152,450,568,700 | 34.298429 | 116 | 0.492572 | false |
tdegeus/GooseEYE | docs/examples/clusters_dilate_periodic.py | 1 | 2926 | r'''
Plot and/or check.
Usage:
script [options]
Options:
-s, --save Save output for later check.
-c, --check Check against earlier results.
-p, --plot Plot.
-h, --help Show this help.
'''
# <snippet>
import numpy as np
import GooseEYE
# generate image
I = np.zeros((21, 21), dtype='bool')
I[4, 4] = True
I[18, 19] = True
I[19, 19] = True
I[20, 19] = True
I[19, 18] = True
I[19, 20] = True
# clusters
C = GooseEYE.Clusters(I).labels()
# dilate
CD = GooseEYE.dilate(C)
# </snippet>
if __name__ == '__main__':
import docopt
args = docopt.docopt(__doc__)
if args['--save']:
import h5py
with h5py.File('clusters_dilate_periodic.h5', 'w') as data:
data['I'] = I
data['C'] = C
data['CD'] = CD
if args['--check']:
import h5py
with h5py.File('clusters_dilate_periodic.h5', 'r') as data:
assert np.all(np.equal(data['I'][...], I))
assert np.all(np.equal(data['C'][...], C))
assert np.all(np.equal(data['CD'][...], CD))
if args['--plot']:
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
# color-scheme: modify such that the background is white
# N.B. for a transparent background -> 4th column == 1.
cmap = cm.jet(range(256))
cmap[0, :3] = 1.0
cmap = mpl.colors.ListedColormap(cmap)
try:
plt.style.use(['goose', 'goose-latex'])
except:
pass
fig, axes = plt.subplots(figsize=(18, 6), nrows=1, ncols=3)
ax = axes[0]
im = ax.imshow(I, clim=(0, 1), cmap=mpl.colors.ListedColormap(cm.gray([0, 255])))
ax.xaxis.set_ticks([0, 20])
ax.yaxis.set_ticks([0, 20])
ax.set_xlim([-0.5, 20.5])
ax.set_ylim([-0.5, 20.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'image')
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(im, cax=cax)
cbar.set_ticks([0, 1])
ax = axes[1]
im = ax.imshow(CD, clim=(0, np.max(C) + 1), cmap=cmap)
ax.xaxis.set_ticks([0, 20])
ax.yaxis.set_ticks([0, 20])
ax.set_xlim([-0.5, 20.5])
ax.set_ylim([-0.5, 20.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'clusters + dilate')
ax = axes[2]
im = ax.imshow(np.tile(CD, (3, 3)), clim=(0, np.max(C) + 1), cmap=cmap)
ax.xaxis.set_ticks([0, 60])
ax.yaxis.set_ticks([0, 60])
ax.set_xlim([-0.5, 60.5])
ax.set_ylim([-0.5, 60.5])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_title (r'periodic copy')
plt.savefig('clusters_dilate_periodic.svg')
| gpl-3.0 | -889,297,396,052,922,000 | 25.125 | 89 | 0.520164 | false |
NovikovMA/python_training_mantis | test/test_project_del.py | 1 | 3996 | # -*- coding: utf-8 -*-
__author__ = 'M.Novikov'
from model.project import Project # Проекты Mantis
from random import randrange # Случайности
import random # Случайности
# Тест удаления проекта, проверка через пользователький интерфейс
def test_project_del_ui(app):
if app.project.count() == 0: # Проверка наличия хотя бы одного проекта в списке
app.project.create(Project(name="Test project",description="Description test project.")) # Добавление нового проекта
old_projects = app.project.get_project_list() # Список проектов до удалени
index = randrange(len(old_projects)) # Получение случайного порядкового номера
app.project.delete_by_index(index) # Удаление проекта
new_projects = app.project.get_project_list() # Список проектов после удаления
old_projects[index:index+1] = [] # Удаление проекта из списка
assert sorted(old_projects, key=Project.id_or_max) == sorted(new_projects, key=Project.id_or_max)
# Тест удаления проекта, проверка с использование базы данных
def test_project_del_db(app, orm):
if len(orm.get_project_list()) == 0: # Проверка наличия хотя бы одного проекта в списке
app.project.create(Project(name="Test project",description="Description test project.")) # Добавление нового проекта
old_projects = orm.get_project_list() # Список проектов до удалени
project = random.choice(old_projects) # Получение случайного порядкового номера
app.project.delete_by_id(project.id) # Удаление проекта
new_projects = orm.get_project_list() # Список проектов после удаления
old_projects.remove(project) # Удаление проекта из списка
assert sorted(old_projects, key=Project.id_or_max) == sorted(new_projects, key=Project.id_or_max)
# Тест удаленияпроекта через протокол SOAP
def test_project_del_soap(app):
if len(app.soap.get_project_list()) == 0: # Проверка наличия хотя бы одного проекта в списке
app.project.create(Project(name="Test project",description="Description test project.")) # Добавление нового проекта
old_projects = app.soap.get_project_list() # Список проектов до удалени
project = random.choice(old_projects) # Получение случайного порядкового номера
app.project.delete_by_id(project.id) # Удаление проекта
new_projects = app.soap.get_project_list() # Список проектов после удаления
old_projects.remove(project) # Удаление проекта из списка
assert sorted(old_projects, key=Project.id_or_max) == sorted(new_projects, key=Project.id_or_max)
| apache-2.0 | 2,116,186,601,909,424,000 | 76.714286 | 127 | 0.563725 | false |
drupdates/Slack | __init__.py | 1 | 1072 | """ Send report using Slack. """
from drupdates.settings import Settings
from drupdates.utils import Utils
from drupdates.constructors.reports import Report
import json, os
class Slack(Report):
""" Slack report plugin. """
def __init__(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
settings_file = current_dir + '/settings/default.yaml'
self.settings = Settings()
self.settings.add(settings_file)
def send_message(self, report_text):
""" Post the report to a Slack channel or DM a specific user."""
url = self.settings.get('slackURL')
user = self.settings.get('slackUser')
payload = {}
payload['text'] = report_text
payload['new-bot-name'] = user
direct = self.settings.get('slackRecipient')
channel = self.settings.get('slackChannel')
if direct:
payload['channel'] = '@' + direct
elif channel:
payload['channel'] = '#' + direct
Utils.api_call(url, 'Slack', 'post', data=json.dumps(payload))
| mit | 81,357,146,827,252,450 | 33.580645 | 72 | 0.616604 | false |
maxime-beck/compassion-modules | mobile_app_connector/mappings/compassion_project_mapping.py | 1 | 8179 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Quentin Gigon <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo.addons.message_center_compassion.mappings.base_mapping import \
OnrampMapping
class MobileProjectMapping(OnrampMapping):
ODOO_MODEL = 'compassion.project'
MAPPING_NAME = 'mobile_app_project'
CONNECT_MAPPING = {
'AddressStreet': 'street',
'AirportDistance': 'closest_airport_distance',
'AirportPreferredTransportation': 'transport_mode_to_airport',
'AirportTravelTime': 'time_to_airport',
'AllocatedSurvivalSlots': None,
'AnnualPrimarySchoolCostLocalCurrency': 'annual_primary_school_cost',
'AnnualSecondarySchoolCostLocalCurrency':
'annual_secondary_school_cost',
'AvailableForVisits': 'available_for_visits',
'AverageCoolestTemperature': 'average_coolest_temperature',
'AverageWarmestTemperature': 'average_warmest_temperature',
'ChildDevelopmentCenterName': 'child_center_original_name',
'ChildDevelopmentCenterNameLocalLanguage': (
'preferred_lang_id.name', 'res.lang.compassion'),
'ChurchMinistry': ('ministry_ids.name', 'icp.church.ministry'),
'City': 'city',
'Climate': 'community_climate',
'ClosestMajorCityEnglish': None,
'Cluster': 'cluster',
'CognitiveActivities0To5': ('cognitive_activity_babies_ids.name',
'icp.cognitive.activity'),
'CognitiveActivities12Plus': ('cognitive_activity_ados_ids.name',
'icp.cognitive.activity'),
'CognitiveActivities6To11': ('cognitive_activity_kids_ids.name',
'icp.cognitive.activity'),
'CommunityInvolvement': None,
'Community_Name': 'community_name',
'CompassionConnectEnabled': None,
'ComputersForBeneficiaryUse': 'nb_child_computers',
'ComputersForStaffUse': 'nb_staff_computers',
'CoolestMonth': 'coolest_month',
'Country': ('country_id.name', 'res.country'),
'CountryDivision': None,
'Country_Name': None,
'CulturalRitualsAndCustoms': 'cultural_rituals',
'CurrentStageInQavahProcess': None,
'Denomination': 'church_denomination',
'EconomicNeedDescription': 'economic_needs',
'EducationalNeedDescription': 'education_needs',
'ElectricalPowerAvailability': 'electrical_power',
'Facilities': ('facility_ids.name', 'icp.church.facility'),
'FacilityOwnershipStatus': 'church_ownership',
'FamilyMonthlyIncome': 'monthly_income',
'FieldOffice_Country': ('field_office_id.country',
'compassion.field.office'),
'FieldOffice_Name': ('field_office_id.name',
'compassion.field.office'),
'FieldOffice_RegionName': ('field_office_id.region',
'compassion.field.office'),
'FirstLetterWritingMonth': 'first_scheduled_letter',
'FirstPartnershipAgreementSignedDate': None,
'FloorArea': None,
'FoundationDate': 'church_foundation_date',
'GPSLatitude': 'gps_latitude',
'GPSLongitude': 'gps_longitude',
'HarvestMonths': ('harvest_month_ids.name', 'connect.month'),
'HealthContextNeeds': 'health_needs',
'HomeBasedSponsorshipBeneficiaries': None,
'HomeFloor': 'typical_floor_material',
'HomeRoof': 'typical_roof_material',
'HomeWall': 'typical_wall_material',
'HungerMonths': ('hunger_month_ids.name', 'connect.month'),
'ICPStatus': 'status',
'ICP_ID': 'icp_id',
'ICP_Name': 'local_church_name',
'ICP_NameNonLatin': 'local_church_original_name',
'ImplementedProgram': ('implemented_program_ids.name', 'icp.program'),
'InterestedGlobalPartnerName': ('interested_partner_ids.name',
'compassion.global.partner'),
'InternationalDenominationAffiliation': 'international_affiliation',
'InternetAccess': 'church_internet_access',
'IsParticipatingInQavahProcess': None,
'LastReviewedDate': 'last_reviewed_date',
'LocaleType': 'community_locale',
'MajorRevision_RevisedValues': None,
'MobileInternetAccess': ('mobile_device_ids.value',
'icp.mobile.device'),
'MonthSchoolYearBegins': 'school_year_begins',
'NumberOfActiveMembers': 'number_church_members',
'NumberOfClassrooms': 'nb_classrooms',
'NumberOfLatrines': 'nb_latrines',
'NumberOfSponsorshipBeneficiaries': None,
'NumberOfSurvivalBeneficiaries': None,
'OnSiteInternetQuality': None,
'PhysicalActivities0To5': ('physical_activity_babies_ids.name',
'icp.physical.activity'),
'PhysicalActivities12Plus': ('physical_activity_ados_ids.name',
'icp.physical.activity'),
'PhysicalActivities6To11': ('physical_activity_kids_ids.name',
'icp.physical.activity'),
'PlantingMonths': ('planting_month_ids.name', 'connect.month'),
'Population': 'community_population',
'PostalCode': 'zip_code',
'PreferredLanguage': ('preferred_lang_id.name', 'res.lang.compassion'),
'PrimaryDiet': ('primary_diet_ids.name', 'icp.diet'),
'PrimaryEthnicGroup': 'primary_ethnic_group_name',
'PrimaryLanguage': ('primary_language_id.name', 'res.lang.compassion'),
'PrimaryOccupation': ('primary_adults_occupation_ids.value',
'icp.community.occupation'),
'ProgramBreakReason': None,
'ProgramBreakStartDate': None,
'ProgramEndDate': 'program_end_date',
'ProgramStartDate': 'program_start_date',
'ProgramsOfInterest': ('interested_program_ids.name', 'icp.program'),
'ProjectActivitiesForFamilies': None, #
'RainyMonths': ('rainy_month_ids.name', 'connect.month'),
'SchoolCostPaidByICP': ('school_cost_paid_ids.value',
'icp.school.cost'),
'SecondLetterWritingMonth': 'second_scheduled_letter',
'SocialMedia': 'social_media_site',
'SocialNeedsDescription': 'social_needs',
'SocioEmotionalActivities0To5': ('socio_activity_babies_ids.value',
'icp.sociological.activity'),
'SocioEmotionalActivities12Plus': ('socio_activity_ados_ids.value',
'icp.sociological.activity'),
'SocioEmotionalActivities6To11': ('socio_activity_kids_ids.value',
'icp.sociological.activity'),
'SourceKitName': None,
'SpiritualActivities0To5': ('spiritual_activity_babies_ids.value',
'icp.spiritual.activity'),
'SpiritualActivities12Plus': ('spiritual_activity_ados_ids.value',
'icp.spiritual.activity'),
'SpiritualActivities6To11': ('spiritual_activity_kids_ids.value',
'icp.spiritual.activity'),
'Terrain': 'community_terrain',
'Territory': 'territory',
'TranslationCompletedFields': None,
'TranslationRequiredFields': None,
'TranslationStatus': None,
'TravelTimeToMedicalServices': 'time_to_medical_facility',
'UnemploymentRate': 'unemployment_rate',
'UtilitiesOnSite': ('utility_ids.name', 'icp.church.utility'),
'WarmestMonth': 'warmest_month',
'Website': 'website',
'WeeklyChildAttendance': 'weekly_child_attendance',
'icpbio': None,
'icpbioInHtml': None
}
FIELDS_TO_SUBMIT = {k: None for k, v in CONNECT_MAPPING.iteritems() if v}
| agpl-3.0 | -3,517,541,789,825,772,000 | 50.11875 | 79 | 0.595183 | false |
RudolfCardinal/pythonlib | cardinal_pythonlib/wsgi/headers_mw.py | 1 | 4487 | #!/usr/bin/env python
# cardinal_pythonlib/headers_mw.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal ([email protected]).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**WSGI middleware to add arbitrary HTTP headers.**
"""
import logging
from cardinal_pythonlib.wsgi.constants import (
TYPE_WSGI_APP,
TYPE_WSGI_APP_RESULT,
TYPE_WSGI_ENVIRON,
TYPE_WSGI_EXC_INFO,
TYPE_WSGI_RESPONSE_HEADERS,
TYPE_WSGI_START_RESPONSE,
TYPE_WSGI_START_RESP_RESULT,
TYPE_WSGI_STATUS,
)
log = logging.getLogger(__name__)
class HeaderModifyMode(object):
"""
Options for
:class:`cardinal_pythonlib.wsgi.headers_mw.AddHeadersMiddleware`.
"""
ADD = 0
ADD_IF_ABSENT = 1
class AddHeadersMiddleware(object):
"""
WSGI middleware to add arbitrary HTTP headers.
See e.g. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers for a
list of possible HTTP headers.
Note:
- HTTP headers are case-insensitive. However, the canonical form is
hyphenated camel case;
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers.
- You can specify the same HTTP header multiple times; apart from
Set-Cookie, this should have the effect of the browser treating them as
concatenated in a CSV format.
https://stackoverflow.com/questions/3096888;
https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
"""
def __init__(self,
app: TYPE_WSGI_APP,
headers: TYPE_WSGI_RESPONSE_HEADERS,
method: int = HeaderModifyMode.ADD) -> None:
"""
Args:
app:
The WSGI app to which to apply the middleware.
headers:
A list of tuples, each of the form ``(key, value)``.
"""
assert isinstance(headers, list)
for key_value_tuple in headers:
assert isinstance(key_value_tuple, tuple)
assert len(key_value_tuple) == 2
assert isinstance(key_value_tuple[0], str)
assert isinstance(key_value_tuple[1], str)
assert method in [
HeaderModifyMode.ADD,
HeaderModifyMode.ADD_IF_ABSENT,
]
self.app = app
self.headers = headers
self.method = method
def __call__(self,
environ: TYPE_WSGI_ENVIRON,
start_response: TYPE_WSGI_START_RESPONSE) \
-> TYPE_WSGI_APP_RESULT:
"""
Called every time the WSGI app is used.
"""
def add(status: TYPE_WSGI_STATUS,
headers: TYPE_WSGI_RESPONSE_HEADERS,
exc_info: TYPE_WSGI_EXC_INFO = None) \
-> TYPE_WSGI_START_RESP_RESULT:
# Add headers. If they were present already, there will be
# several versions now. See above.
return start_response(status, headers + self.headers, exc_info)
def add_if_absent(status: TYPE_WSGI_STATUS,
headers: TYPE_WSGI_RESPONSE_HEADERS,
exc_info: TYPE_WSGI_EXC_INFO = None) \
-> TYPE_WSGI_START_RESP_RESULT:
# Add headers, but not if that header was already present.
# Note case-insensitivity.
header_keys_lower = [kv[0].lower() for kv in headers]
new_headers = [x for x in self.headers
if x[0].lower() not in header_keys_lower]
return start_response(status, headers + new_headers, exc_info)
method = self.method
if method == HeaderModifyMode.ADD:
custom_start_response = add
else:
custom_start_response = add_if_absent
return self.app(environ, custom_start_response)
| apache-2.0 | 8,220,256,760,036,851,000 | 32.992424 | 79 | 0.592378 | false |
simonzhangsm/voltdb | tests/sqlcoverage/config/all-config.py | 1 | 15894 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2018 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# HACK:
# This SQL coverage configuration set represents hopefully the largest
# set of statements that should always pass. Some of the template files
# generate statements that result in repeated failures due to functional
# differences between HSQL and VoltDB backends. In such cases, we work around
# these errors by generating a fixed sample query file and, after culling out
# any statements that cause mismatches, using it to replace the original
# template file name in the configuration list below.
# In this way, the sample file gets used as a trivial template file that passes
# through the generator untouched. It also has an unfortunate side-effect of
# causing any future improvements to the template to be ignored unless/until
# the sample file is manually re-generated from it and re-edited to eliminate
# mismatches.
#
# Actually, in the specific case of templates that generate random integer
# constant timestamp values, the template must be replaced by TWO separate
# generated sample files -- one generated for hsql with millisecond constants
# and one for VoltDB with microsecond constants (always 1000 X the hsql values).
# The hsql version of the sample file gets associated with the optional
# "template-hsqldb" key in the configuration. Otherwise, both hsql and VoltDB
# use the same input file associated with the "template" key.
#
# The generated sample files follow a naming convention of starting with
# "regression". The hsql variants end in "-hsql.sql".
# It is NOT advisable to try to edit these sample files directly.
# It is better to edit the original template, re-generate the sample(s),
# and re-cull the resulting "mismatches" -- being careful to cull ONLY
# mismatches that are NOT accountable to the known backend differences that
# we are working around in this way. It helps to have comments, below,
# to describe which specific issues each "regression" file is intended to
# work around.
#
# To regenerate the regression-*.sql file for a configuration, run the SQLGenerateReport.py
# tool on the report.xml file generated for that configuration, using the -f true switch,
# which will cause the successful statements to be written to stdout.
{
# from regression-config.py
# THESE ALL SUCCEED, USE THE TEMPLATE INPUT
"basic": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "basic.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE THE TEMPLATE INPUT
"basic-joins": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "basic-joins.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE THE TEMPLATE INPUT
"basic-unions": {"schema": "union-schema.py",
"ddl": "DDL.sql",
"template": "basic-unions.sql",
"normalizer": "normalizer.py"},
"mixed-unions": {"schema": "union-schema.py",
"ddl": "DDL.sql",
"template": "mixed-unions.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE THE TEMPLATE INPUT
"basic-index": {"schema": "schema.py",
"ddl": "index-DDL.sql",
"template": "basic.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE THE TEMPLATE INPUT
"basic-strings": {"schema": "strings-schema.py",
"ddl": "strings-DDL.sql",
"template": "basic-strings.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE TEMPLATE INPUT
"partial-covering": {"schema": "partial-covering-schema.py",
"ddl": "partial-covering-DDL.sql",
"template": "partial-covering.sql",
"normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE, USE REGRESSION INPUT
"regression-basic-ints": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "regression-basic-ints.sql",
"normalizer": "normalizer.py"},
# HSQL HAS BAD DEFAULT PRECISION
# AND VoltDB gives VOLTDB ERROR: Type DECIMAL can't be cast as FLOAT
# AND HSQLDB backend gives the likes of:
# VOLTDB ERROR: UNEXPECTED FAILURE: org.voltdb.ExpectedProcedureException:
# HSQLDB Backend DML Error (Scale of 56.11063569750000000000000000000000 is 32 and the max is 12)
# USE REGRESSION INPUT
"regression-basic-decimal": {"schema": "decimal-schema.py",
"ddl": "DDL.sql",
"template": "regression-basic-decimal.sql",
"normalizer": "normalizer.py"},
# If the ONLY problem was that HSQL HAS BAD DEFAULT PRECISION, we could use the original template input
# and FUZZY MATCHING, instead.
# Enable this test to investigate the "DECIMAL can't be cast as FLOAT" and/or "Backend DML Error" issues
# without being thrown off by HSQL HAS BAD DEFAULT PRECISION issues.
# "basic-decimal-fuzzy": {"schema": "decimal-schema.py",
# "ddl": "DDL.sql",
# "template": "basic-decimal.sql",
# "normalizer": "fuzzynormalizer.py"},
#
# FLOATING POINT ROUNDING ISSUES BETWEEN VOLT AND HSQL, USE FUZZY MATCHING
"basic-timestamp": {"schema": "timestamp-schema.py",
"ddl": "DDL.sql",
"template": "basic-timestamp.sql",
"normalizer": "normalizer.py"},
"advanced-timestamp": {"schema": "timestamp-schema.py",
"ddl": "DDL.sql",
"template": "advanced-timestamp.sql",
"normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE, USE REGRESSION INPUT
"regression-basic-matview": {"schema": "matview-basic-schema.py",
"ddl": "matview-DDL.sql",
"template": "regression-basic-matview.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE TEMPLATE INPUT
"advanced": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "advanced.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE TEMPLATE INPUT
"advanced-index": {"schema": "schema.py",
"ddl": "index-DDL.sql",
"template": "advanced.sql",
"normalizer": "normalizer.py"},
"advanced-compoundex": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "advanced.sql",
"normalizer": "normalizer.py"},
# THESE ALL SUCCEED, USE TEMPLATE INPUT
"advanced-strings": {"schema": "strings-schema.py",
"ddl": "strings-DDL.sql",
"template": "advanced-strings.sql",
"normalizer": "normalizer.py"},
"advanced-ints": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "advanced-ints.sql",
"normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE, USE REGRESSION INPUT
"regression-advanced-ints-cntonly": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "regression-advanced-ints-cntonly.sql",
"normalizer": "not-a-normalizer.py"},
# ADVANCED MATERIALIZED VIEW TESTING, INCLUDING COMPLEX GROUP BY AND AGGREGATIONS.
"advanced-matview-nonjoin": {"schema": "matview-advanced-nonjoin-schema.py",
"ddl": "matview-DDL.sql",
"template": "advanced-matview-nonjoin.sql",
"normalizer": "normalizer.py"},
"advanced-matview-join": {"schema": "matview-advanced-join-schema.py",
"ddl": "matview-DDL.sql",
"template": "advanced-matview-join.sql",
"normalizer": "normalizer.py"},
# To test index count
"index-count1": {"schema": "index-count1-schema.py",
"ddl": "DDL.sql",
"template": "index-count1.sql",
"normalizer": "normalizer.py"},
# To test index scan: forward scan, reverse scan
"index-scan": {"schema": "index-scan-schema.py",
"ddl": "index-DDL.sql",
"template": "index-scan.sql",
"normalizer": "normalizer.py"},
# This suite written to test push-down of aggregates and limits in combination
# with indexes, projections and order-by.
"pushdown": {"schema": "pushdown-schema.py",
"ddl": "DDL.sql",
"template": "pushdown.sql",
"normalizer": "normalizer.py"},
# HSQL SEEMS TO HAVE A BAD DEFAULT PRECISION
# AND VoltDB gives VOLTDB ERROR: Type DECIMAL can't be cast as FLOAT, so keep it disabled, for now.
# If the only problem were HSQL HAS BAD DEFAULT PRECISION, we could USE FUZZY MATCHING.
# Enable this test to investigate the "DECIMAL can't be cast as FLOAT" issue
# "advanced-decimal-fuzzy": {"schema": "decimal-schema.py",
# "ddl": "DDL.sql",
# "template": "advanced-decimal.sql",
# "normalizer": "fuzzynormalizer.py"},
# from config.py
"basic-compoundex": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "compound.sql",
"normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE, DISABLING
# also, the generator fails to generates statements for:
# Template "SELECT * FROM _table WHERE (_variable _cmp _value[int64]) _logic (_variable _cmp _variable)" failed to yield SQL statements
# Template "UPDATE _table SET BIG = _value[int64] WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# Template "DELETE FROM _table WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# because there are insufficient columns of the same type to satisfy all the _variables
# given how the generator works.
"basic-ints": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "basic-ints.sql",
"normalizer": "normalizer.py"},
# HSQL SEEMS TO HAVE A BAD DEFAULT PRECISION, DISABLING
# "basic-decimal": {"schema": "decimal-schema.py",
# "ddl": "DDL.sql",
# "template": "basic-decimal.sql",
# "normalizer": "normalizer.py"},
# Floating point rounding differences lead to deltas
# "basic-timestamp": {"schema": "timestamp-schema.py",
# "ddl": "timestamp-DDL.sql",
# "template": "basic-timestamp.sql",
# "normalizer": "normalizer.py"},
# BIGINT OVERFLOW CAUSES FAILURES IN THIS SUITE
# also, the generator fails to generate statements for:
# Template "UPDATE _table SET BIG = _value[int64] WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# Template "DELETE FROM _table WHERE (_variable _cmp _variable) _logic (_variable _cmp _value[int64])" failed to yield SQL statements
# "basic-matview": {"schema": "matview-schema.py",
# "ddl": "int-DDL.sql",
# "template": "basic-matview.sql",
# "normalizer": "normalizer.py"},
"basic-index-joins": {"schema": "schema.py",
"ddl": "index-DDL.sql",
"template": "basic-joins.sql",
"normalizer": "normalizer.py"},
"basic-compoundex-joins": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "basic-joins.sql",
"normalizer": "normalizer.py"},
# TODO: Need to scale down precision of values to keep HSQL happy even after math
"numeric-decimals": {"schema": "decimal-schema.py",
"ddl": "DDL.sql",
"template": "numeric-decimals.sql",
"normalizer": "fuzzynormalizer.py"},
"numeric-ints": {"schema": "int-schema.py",
"ddl": "int-DDL.sql",
"template": "numeric-ints.sql",
"normalizer": "normalizer.py"},
# HSQL SEEMS TO HAVE A BAD DEFAULT PRECISION, DISABLING
# "advanced-decimal": {"schema": "decimal-schema.py",
# "ddl": "DDL.sql",
# "template": "advanced-decimal.sql",
# "normalizer": "normalizer.py"},
"advanced-joins": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "advanced-joins.sql",
"normalizer": "normalizer.py"},
"advanced-index-joins": {"schema": "schema.py",
"ddl": "index-DDL.sql",
"template": "advanced-joins.sql",
"normalizer": "normalizer.py"},
"advanced-subq-joins": {"schema": "schema.py",
"ddl": "DDL.sql",
"template": "advanced-subq-joins.sql",
"normalizer": "normalizer.py"},
"advanced-subq-part-joins": {"schema": "schema.py",
"ddl": "subq-part-DDL.sql",
"template": "advanced-subq-part-joins.sql",
"normalizer": "normalizer.py"},
"advanced-compoundex-joins": {"schema": "schema.py",
"ddl": "compoundex-DDL.sql",
"template": "advanced-joins.sql",
"normalizer": "normalizer.py"},
"advanced-matview-subq-nonjoin": {"schema": "matview-advanced-nonjoin-schema.py",
"ddl": "matview-DDL.sql",
"template": "advanced-matview-subq-nonjoin.sql",
"normalizer": "normalizer.py"},
"insert-into-select": {"schema": "insert-into-select-schema.py",
"ddl": "insert-into-select-DDL.sql",
"template": "insert-into-select.sql",
"normalizer": "normalizer.py"},
}
| agpl-3.0 | 3,054,152,486,480,809,000 | 54.573427 | 152 | 0.57355 | false |
aep124/TwitterAnalyticsTools | textonly.py | 1 | 2405 | # this is a script to retrieve and process text-only data for classification
# This process includes four main tasks
# 1) getting raw tweets
# 2) apply labels (this step can be conducted at any time)
# 2) filtering those tweets (e.g., according to CMU POS tagger)
# 3) deriving a set of features (a.k.a. word list)
# 4) write the feature vectors to an arff file
import tools4pgs
import tools4parsing
import tools4fv
import tools4labeling
import pickle
import copy
import numpy as np
import pandas as pd
# dividing into two dataframe because tweet info is fixed, but features are flexible
# tweet info data frame columns:
# NAME DATATYPE
# twtid ....... string (of digits)
# raw ......... string
# filtered .... string
# userid ...... string (of digits)
# handle ...... string
# label ....... string
# imgurl ...... string
# tweet features data frame columns
# twtid ....... string (of digits)
# feature 1 ... TF score for word 1
# feature 2 ... TF score for word 2
# :
# feature n ... TF score for word n
# label ....... string
############### (1) Get Tweets ################
# TODO: modify query handling to accomodate the column names that databases use, as well as subsets query variables
# (this is written for robbery database)
query = 'SELECT id,text,user_id FROM tweets'
condition = "WHERE text like '%bears%'"
tools4pgs.writetwtinfo(query, condition, 'twtinfo.p')
############### (2) Apply Labels ###############
labelmap = tools4labeling.getlabelmap('labelsystem')
tools4labeling.writelabels('twtinfo.p', labelmap)
################# (3) Filter ################
keepset = tools4parsing.getkeepset('POS2keep')
tools4parsing.writefiltered('twtinfo.p', keepset)
# TODO: add functionality for reply tweets (conversations) ????????
############## (4) Derive Features ##############
wordmap = tools4fv.getwordmap('twtinfo.p')
wordlist = wordmap.keys()
# specify threshold directly :
# freq_threshold = 2
# could also specify threshold by number of words (e.g., 500):
# freq_threshold = sorted(wordmap.values())[-500]
# wordlist = [w for w in wordmap.keys() if wordmap[w] >= freq_threshold]
tools4fv.writetf('twtinfo.p','twtfeatures.p', wordlist)
tools4fv.synclabels('twtinfo.p','twtfeatures.p')
############### (5) Make ARFF File ###############
#tools4fv.writearff('twtfeatures.p')
| mit | 2,456,822,204,111,772,700 | 24.585106 | 116 | 0.642827 | false |
daicang/Leetcode-solutions | 146-lru-cache.py | 1 | 1792 | class DLNode(object):
def __init__(self):
self.key = None
self.value = None
self.prev = None
self.next = None
class LRUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.head = DLNode()
self.tail = DLNode()
self.capacity = capacity
self.size = 0
self.cache = {}
self.head.next = self.tail
self.tail.prev = self.head
def _move_to_head(self, node):
node.prev = self.head
node.next = self.head.next
node.prev.next = node
node.next.prev = node
def _unlink_node(self, node):
node.prev.next = node.next
node.next.prev = node.prev
def get(self, key):
"""
:type key: int
:rtype: int
"""
node = self.cache.get(key)
if node is None:
return -1
self._unlink_node(node)
self._move_to_head(node)
return node.value
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: None
"""
node = self.cache.get(key)
if node:
node.value = value
self._unlink_node(node)
self._move_to_head(node)
return
node = DLNode()
node.key = key
node.value = value
self.cache[key] = node
self._move_to_head(node)
self.size += 1
if self.size > self.capacity:
outdated = self.tail.prev
self._unlink_node(outdated)
del self.cache[outdated.key]
self.size -= 1
c = LRUCache(2)
c.put(1, 1)
c.put(2, 2)
print c.get(1)
c.put(3, 3)
print c.get(2)
c.put(4, 4)
print c.get(1)
print c.get(3)
print c.get(4)
| mit | 3,490,341,262,473,513,000 | 19.363636 | 40 | 0.497768 | false |
brettchien/LeetCode | 9_PalindromeNumber.py | 1 | 3651 | class Solution:
# @param {integer} x
# @return {boolean}
def isPalindrome(self, x):
if x < 0:
return False
if x > 0 and x % 10 == 0:
return False
reverse = 0
while x > reverse:
reverse = reverse * 10 + x % 10
x /= 10
return (x == reverse) or (x == reverse / 10)
def cisPalindrome(self, x):
if x < 0:
return False
if x < 10:
return True
if x < 100:
hi = x / 10
lo = x % 10
return hi == lo
pivot = 1
count = 0
while pivot <= x:
count += 1
pivot *= 10
digits = count / 2
first = x / (10 ** (digits + (count % 2)))
second = x % (10 ** digits)
print x, first, second
while digits >= 1:
print first, second
if digits == 1:
return first == second
lo = second % 10
hi = first / (10 ** (digits-1))
print hi, lo
if hi != lo:
return False
else:
first = first % (10 ** (digits-1))
second = second / 10
digits -= 1
def bisPalindrome(self, x):
if x < 0:
return False
if x < 10:
return True
if x < 100:
hi = x / 10
lo = x % 10
return hi == lo
pivot = 1
count = 1
while pivot <= x:
count += 1
pivot *= 10
count -= 1
odd = (count % 2 == 1)
print x, pivot, count
while x > 0:
print x
digit = x % 10
pivot /= 100
x /= 10
hiDigit = x / pivot
print pivot, x, digit, hiDigit
if hiDigit != digit:
return False
x -= digit * pivot
if x == 0:
return True
print x
if odd:
if pivot == 10:
return True
else:
if pivot == 100:
hi = x / 10
lo = x % 10
return hi == lo
def aisPalindrome(self, x):
if x < 0:
return False
if x < 10:
return True
if x == 10:
return False
pivot = 1
count = 1
while pivot <= x:
count += 1
pivot *= 10
count -= 1
print x, pivot, count
while x > 0:
print x
digit = x % 10
pivot /= 100
x /= 10
if digit == 0 and pivot > x:
continue
if count % 2 == 0: #even numbers of digits
if pivot == 10:
return x == digit
else: # odd numbers of digits
if pivot == 1:
return True
check = x - digit * pivot
print pivot, x, digit, check
if check == 0:
return True
elif check < 0 or check >= digit * pivot:
return False
else:
x -= digit * pivot
if __name__ == "__main__":
sol = Solution()
print sol.isPalindrome(121) == True
print sol.isPalindrome(101) == True
print sol.isPalindrome(100) == False
print sol.isPalindrome(9999) == True
print sol.isPalindrome(99999) == True
print sol.isPalindrome(999999) == True
print sol.isPalindrome(1000110001) == True
print sol.isPalindrome(1000021) == False
| mit | 3,103,546,097,456,922,600 | 26.870229 | 54 | 0.398247 | false |
felipemontefuscolo/bitme | get_bitmex_candles.py | 1 | 4122 | #!/usr/bin/env python
import sys
import time
import swagger_client
from swagger_client.rest import ApiException
from utils.utils import smart_open
import argparse
import pandas as pd
MAX_NUM_CANDLES_BITMEX = 500
def print_file(file_or_stdout, api_instance, bin_size, partial, symbol, reverse, start_time, end_time):
chunks = split_in_chunks(start_time, end_time, MAX_NUM_CANDLES_BITMEX, bin_size)
with smart_open(file_or_stdout) as fh:
print("time,open,high,low,close,volume", file=fh)
num_pages = len(chunks)
for i in range(num_pages):
chunk = chunks[i]
s = chunk[0]
e = chunk[1]
count = (e - s) / pd.Timedelta(bin_size)
page = api_instance.trade_get_bucketed(
bin_size=bin_size,
partial=partial,
symbol=symbol,
count=count,
start=0.0,
reverse=reverse,
start_time=s,
end_time=e)
print("from {} to {}: {} candles downloaded".format(s, e, len(page)))
# TODO: bitmex has a bug where the high is not the highest value !!!!!
for line in reversed(page):
print(','.join([line.timestamp.strftime('%Y-%m-%dT%H:%M:%S'),
str(line.open),
str(max(line.high, line.open)),
str(min(line.low, line.open)),
str(line.close),
str(line.volume)]), file=fh)
sys.stdout.write(
"progress: completed %d out of %d pages (%.2f%%) \r" %
(i + 1, num_pages, 100 * float(i + 1) / num_pages))
sys.stdout.flush()
time.sleep(1.001)
print("")
def split_in_chunks(start: pd.Timedelta, end: pd.Timedelta, chunk_size: int, bucket_size: str):
i = start
r = []
dt = chunk_size * pd.Timedelta(bucket_size)
while i <= end:
r += [(i, min(end, i + dt))]
i += dt
return r
def get_args(args=None, namespace=None):
parser = argparse.ArgumentParser(description="Get bitmex data")
parser.add_argument('-b', '--begin-time', type=pd.Timestamp, required=True, help="Example: '2018-04-01T00:00:01'")
parser.add_argument('-e', '--end-time', type=pd.Timestamp, required=True, help="Example: '2018-04-02T00:00:01'")
parser.add_argument('-s', '--symbol', type=str, default='XBTUSD',
help='Instrument symbol. Send a bare series (e.g. XBU) to get data for the nearest expiring'
'contract in that series. You can also send a timeframe, e.g. `XBU:monthly`. '
'Timeframes are `daily`, `weekly`, `monthly`, `quarterly`, and `biquarterly`. (optional)')
parser.add_argument('-z', '--bin-size', choices=('1m', '5m', '1h', '1d'), default='1m', type=str,
help='Time interval to bucket by')
parser.add_argument('-o', '--file-or-stdout', type=str, required=True, help='Output filename or "-" for stdout')
parser.add_argument('--partial', action='store_true', default=False, )
args = parser.parse_args(args, namespace)
return args
def main():
args = get_args()
# create an instance of the API class
configuration = swagger_client.Configuration()
configuration.host = 'https://www.bitmex.com/api/v1'
api_instance = swagger_client.TradeApi(swagger_client.ApiClient(configuration))
print("print to file " + (args.file_or_stdout if args.file_or_stdout is not '-' else 'std output'))
try:
print_file(file_or_stdout=args.file_or_stdout,
api_instance=api_instance,
bin_size=args.bin_size, partial=args.partial, symbol=args.symbol,
reverse=False,
start_time=args.begin_time, end_time=args.end_time)
except ApiException as e:
print("Exception when calling TradeApi->trade_get_bucketed: %s\n" % e)
return 0
if __name__ == "__main__":
sys.exit(main())
| mpl-2.0 | -6,723,906,315,588,768,000 | 35.157895 | 119 | 0.562591 | false |
google/makani | avionics/network/network_config.py | 1 | 19633 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for network.yaml."""
import collections
import os
import sys
import gflags
import makani
from makani.lib.python import string_util
import yaml
class NetworkConfigException(Exception):
pass
def _GetPortNumber(port, port_names):
if isinstance(port, int):
return port
else:
for p, name in port_names.iteritems():
if name == port:
return p
raise NetworkConfigException('Invalid port specification: {}.'.format(port))
def _ConvertPortNumbersInList(names_or_numbers, port_names):
return [_GetPortNumber(n, port_names) for n in names_or_numbers]
def _RangeParse(node_select):
nodes = set()
for item in node_select:
if isinstance(item, list):
nodes.update(range(*item))
else:
nodes.add(item)
return nodes
def _ValidateMessageTypes(message_types, supplied_types):
for supplied in supplied_types:
for m in message_types:
if supplied == m['name']:
break
else:
raise NetworkConfigException('Message type %s is invalid.' % supplied)
# Flatten lists of lists, and make raw elements into lists of one item; yaml
# doesn't support merging lists automatically.
def _FlattenList(l):
"""Flattens lists of lists into plain lists, recursively.
For example, [[4, 5, 6], [7, 8], []] will become [4, 5, 6, 7, 8].
Non-list elements will get wrapped in a list, so 'foo' becomes ['foo'].
None becomes [].
Args:
l: a list, or not.
Returns:
A flattened list.
"""
ret = []
if l is None:
pass
elif not isinstance(l, list):
ret = [l]
else:
for item in l:
ret.extend(_FlattenList(item))
return ret
def _FlattenNodeList(l):
"""Replaces a list of list of nodes with a list of node names."""
return [n['name'] for n in _FlattenList(l)]
class AioNode(object):
"""Wrapper class for AIO node definition in network.yaml."""
def __init__(self, config, kind, instance):
self._config = config
self._kind = kind
self._instance = instance
self._enum_value = config._aio_enum_value # pylint: disable=protected-access
config._aio_enum_value += 1 # pylint: disable=protected-access
for i, node in enumerate(kind['instances']):
if node['name'] == instance['name']:
self._index = i
break
else:
raise NetworkConfigException('Node name not found under this label name.')
if not string_util.IsSnakeCase(self.snake_name):
raise NetworkConfigException('AioNode name is not snake case: %s'
% self.snake_name)
def __hash__(self):
return self.snake_name.__hash__()
def __eq__(self, other):
return self.snake_name == other.snake_name
def __cmp__(self, other):
return cmp(self.enum_value, other.enum_value)
@property
def label_name(self):
return self._kind['label_name']
@property
def snake_name(self):
return self._instance['name']
@property
def camel_name(self):
return string_util.SnakeToCamel(self.snake_name)
@property
def enum_name(self):
return 'kAioNode' + self.camel_name
@property
def application_path(self):
return self._kind['application']
@property
def bootloader_path(self):
return self._kind.get('bootloader',
self._config._yaml['default_bootloader']) # pylint: disable=protected-access
@property
def bootloader_application_path(self):
return self._kind.get(
'bootloader_application',
self._config._yaml['default_bootloader_application']) # pylint: disable=protected-access
@property
def label_value(self):
return self._index
@property
def ip_octet(self):
return self._instance['ip']
@property
def ip(self):
return '192.168.1.%d' % self.ip_octet
@property
def enum_value(self):
return self._enum_value
@property
def tms570_node(self):
return self.snake_name in self._config._yaml['tms570s'] # pylint: disable=protected-access
@property
def q7_node(self):
return self.snake_name in self._config._yaml['q7s'] # pylint: disable=protected-access
class MessageRoute(object):
"""Wrapper class for message route definitions in network.yaml."""
def __init__(self, config, route):
self._config = config
self._route = route
required_fields = ['senders', 'receivers']
if not all(x in route for x in required_fields):
raise NetworkConfigException(
'MessageRoute missing one or more required fields: %s' %
', '.join(required_fields))
@property
def senders(self):
return frozenset(self._config.GetAioNode(name)
for name in self._route['senders'])
@property
def receivers(self):
return frozenset(self._config.GetAioNode(name)
for name in self._route['receivers'])
class MessageType(object):
"""Wrapper class for message type definition in network.yaml."""
def __init__(self, config, message, type_name, enum_value):
self._config = config
self._message = message
self._type_name = type_name.capitalize()
self._enum_value = enum_value
required_fields = ['name', 'freq', 'routes']
if not all(x in self._message for x in required_fields):
raise NetworkConfigException(
'Message %s missing one or more required fields: %s' %
(self._message.get('name', ''), ', '.join(required_fields)))
if not string_util.IsCamelCase(self.name):
raise NetworkConfigException('MessageType name is not camel case: %s'
% self.camel_name)
self._routes = [MessageRoute(config, r) for r in message['routes']]
def __hash__(self):
return self.name.__hash__()
def __eq__(self, other):
return self.name == other.name
def __cmp__(self, other):
type_name_compare = cmp(self.type_name, other.type_name)
if type_name_compare == 0:
return cmp(self.enum_value, other.enum_value)
return type_name_compare
@property
def name(self):
return self._message['name']
@property
def snake_name(self):
return string_util.CamelToSnake(self.name)
@property
def type_name(self):
return self._type_name
@property
def enum_prefix(self):
# TODO: Rename MessageType to AioMessageType.
if self.type_name == 'Aio':
return 'MessageType'
return '%sMessageType' % self.type_name
@property
def enum_name(self):
return 'k%s%s' % (self.enum_prefix, self.name)
@property
def enum_value(self):
return self._enum_value
@property
def ip(self):
if self.type_name == 'Aio':
return '239.0.0.%d' % self.enum_value
elif self.type_name == 'Eop':
return '239.0.2.%d' % self.enum_value
elif self.type_name == 'Winch':
return '239.0.1.%d' % self.enum_value
raise ValueError('Unknown message type name: ' + self.type_name)
@property
def frequency_hz(self):
return self._message['freq']
@property
def all_senders(self):
return frozenset(name for route in self._routes for name in route.senders)
@property
def all_receivers(self):
return frozenset(name for route in self._routes for name in route.receivers)
@property
def routes(self):
return self._routes
@property
def inhibit_routing(self):
return self._message.get('inhibit_routing', False)
@property
def inhibit_cvt(self):
return self._message.get('inhibit_cvt', False)
@property
def aio_message(self):
return self.type_name == 'Aio'
@property
def eop_message(self):
return self.type_name == 'Eop'
@property
def winch_message(self):
return self.type_name == 'Winch'
class NetworkConfig(object):
"""Wrapper for the network.yaml file.
Provides an interface to access information about AioNodes and MessageTypes.
"""
def _PreprocessMessageTypes(self, key, y):
"""Preprocesses a message list.
Args:
key: YAML file key for message list (e.g., aio_messages).
y: Loaded YAML file.
Returns:
The processed message list.
"""
if key in y:
for m in y[key]:
if 'routes' in m:
for r in m['routes']:
r['receivers'] = _FlattenNodeList(r['receivers'])
r['senders'] = _FlattenNodeList(r['senders'])
else:
m['routes'] = []
if 'receivers' in m and 'senders' in m:
m['routes'].append({
'receivers': _FlattenNodeList(m['receivers']),
'senders': _FlattenNodeList(m['senders'])})
m.pop('receivers')
m.pop('senders')
return y[key]
return []
def _PreprocessYamlFile(self, yaml_file):
"""Read the YAML file and prepare it for processing.
Flatten lists, generate masks, convert port names into numbers, etc.
Args:
yaml_file: Path to the network.yaml file.
Returns:
The parsed YAML file.
Raises:
NetworkConfigException: if there is overlap between C network ports and
A or B network ports.
"""
if not yaml_file:
yaml_file = os.path.join(makani.HOME, 'avionics/network/network.yaml')
with open(yaml_file, 'r') as f:
y = yaml.full_load(f)
y['tms570s'] = _FlattenNodeList(y['tms570s'])
y['q7s'] = _FlattenNodeList(y['q7s'])
all_message_types = []
all_message_types += self._PreprocessMessageTypes('aio_messages', y)
all_message_types += self._PreprocessMessageTypes('eop_messages', y)
all_message_types += self._PreprocessMessageTypes('winch_messages', y)
for switch_name, switch in y['switches'].iteritems():
if 'config' in switch:
# Note: If config is shared and port name/number assignments are ever
# allowed to differ between config users, we'll need to clone it
# before modifying it.
config = switch['config']
for l in ['network_a', 'network_b', 'network_c', 'unicast']:
if l in config:
config[l] = _RangeParse(config[l])
if set(config.get('network_c', [])) & (
set(config.get('network_a', []))
| set(config.get('network_b', []))):
raise NetworkConfigException(
'A port on %s is assigned to network C as well as A or B.'
% switch_name)
if 'port_names' in switch:
port_names = switch['port_names']
if 'trunk' in config:
trunk = config['trunk']
trunk['ports'] = _ConvertPortNumbersInList(trunk['ports'],
port_names)
if 'unicast_learning' in trunk:
trunk['unicast_learning'] = _ConvertPortNumbersInList(
trunk['unicast_learning'], port_names)
if 'network_c_transit' in trunk:
trunk['network_c_transit'] = _ConvertPortNumbersInList(
trunk['network_c_transit'], port_names)
overrides = trunk['override_message_routes']
for k, v in overrides.iteritems():
overrides[k] = _ConvertPortNumbersInList(v, port_names)
_ValidateMessageTypes(all_message_types, overrides.iterkeys())
bandwidth = {}
for port_name, port_bandwidth in config['bandwidth'].iteritems():
if port_name == 'default':
bandwidth['default'] = port_bandwidth
else:
bandwidth[_GetPortNumber(
port_name, port_names)] = port_bandwidth
config['bandwidth'] = bandwidth
config['restrict'] = {
_GetPortNumber(k, port_names): v
for k, v in config.get('restrict', {}).iteritems()}
for v in config['restrict'].itervalues():
_ValidateMessageTypes(all_message_types, v)
self._yaml = y
return y
def _GenerateAioNodes(self):
"""Generate the list of AIO nodes.
Ensure a sorted ordering of AIO nodes as found in the YAML file.
"""
self._aio_nodes = []
self._aio_nodes_by_name = {}
self._aio_nodes_by_label = collections.defaultdict(list)
self._aio_enum_value = 0
for kind in self._yaml['aio_nodes']:
for instance in kind['instances']:
node = AioNode(self, kind, instance)
self._aio_nodes.append(node)
self._aio_nodes_by_name[node.snake_name] = node
self._aio_nodes_by_label[node.label_name].append(node)
self._aio_nodes = tuple(self._aio_nodes)
def _GenerateMessages(self, type_name, y):
"""Generate the list of AIO nodes.
Args:
type_name: Message type name (e.g., 'Aio' or 'Winch').
y: A parsed YAML file.
Raises:
NetworkConfigException: if message indices are invalid.
"""
key = type_name.lower() + '_messages'
message_types = []
message_types_by_name = {}
if key in y:
static_assignments = {m['name']: m['index'] for m in y[key]
if 'index' in m}
value = 0
used_values = set(static_assignments.values())
if len(static_assignments) != len(used_values):
raise NetworkConfigException('Duplicate message indices in %s.' % key)
if used_values and (min(used_values) < 0 or max(used_values) > 255):
raise NetworkConfigException('Invalid message indices in %s.' % key)
for message in y[key]:
if message['name'] in static_assignments:
enum_value = static_assignments[message['name']]
else:
while value in used_values:
value += 1
used_values.add(value)
enum_value = value
message_type = MessageType(self, message, type_name, enum_value)
message_types.append(message_type)
message_types_by_name[message_type.name] = message_type
setattr(self, '_%s' % key, message_types)
setattr(self, '_%s_by_name' % key, message_types_by_name)
def _ValidateAioNodeIps(self):
"""Ensure that IPs are not duplicated, and that unused IPs are declared."""
used_ips = set()
unused_ips_list = _RangeParse(self._yaml.get('unused_ips', []))
unused_ips = set(unused_ips_list)
next_ip = self._yaml['next_ip']
unknown_ip = self._yaml['unknown_ip']
for node in self._aio_nodes:
ip = node.ip_octet
if ip in used_ips:
raise NetworkConfigException('IP address %d is used more than once.'
% ip)
if ip in unused_ips:
raise NetworkConfigException('IP address %d is used and unused.' % ip)
if ip >= next_ip and ip != unknown_ip:
raise NetworkConfigException('An address at or above next_ip is in '
'use.')
used_ips.add(ip)
covered_range = used_ips.union(unused_ips)
expected_range = set(range(next_ip))
expected_range.add(unknown_ip)
missed_ips = covered_range.symmetric_difference(expected_range)
if missed_ips:
raise NetworkConfigException('Address range through "next_ip" isn\'t '
'fully covered by used IPs and "unused_ips";'
' errors are %s.' % missed_ips)
def _ValidateMessageSortOrder(self, message_types):
"""Ensure that messages are unique and sorted alphabetically."""
names = [m.name for m in message_types]
sorted_names = sorted(names, key=lambda s: s.lower())
if names != sorted_names:
raise NetworkConfigException('Messages are not in alphabetical order.')
if len(set(names)) != len(names):
raise NetworkConfigException('Duplicate message entry found.')
def _ValidateAioNodeSortOrder(self):
"""Ensure that AIO nodes are unique and sorted alphabetically.
Motors are not sorted alphabetically.
Raises:
NetworkConfigException: if nodes are not in order.
"""
node_names = []
for node in self._aio_nodes:
if node.label_name != 'unknown':
node_names.append(node.enum_name)
sorted_node_names = sorted(node_names, key=lambda s: s.lower())
for a, b in zip(node_names, sorted_node_names):
if a != b and not ('Motor' in a and 'Motor' in b):
raise NetworkConfigException('Node sort order violation near "%s".' % a)
if len(set(node_names)) != len(node_names):
raise NetworkConfigException('Duplicate name AIO node entry found.')
def __init__(self, yaml_file=None):
y = self._PreprocessYamlFile(yaml_file)
self._GenerateAioNodes()
self._GenerateMessages('Aio', y)
self._GenerateMessages('Eop', y)
self._GenerateMessages('Winch', y)
self._ValidateAioNodeIps()
self._ValidateMessageSortOrder(self.aio_messages)
self._ValidateMessageSortOrder(self.eop_messages)
self._ValidateMessageSortOrder(self.winch_messages)
self._ValidateAioNodeSortOrder()
@property
def aio_nodes(self):
return self._aio_nodes
def GetAioNode(self, name):
if name.startswith('kAioNode'):
name = name[len('kAioNode'):]
if string_util.IsCamelCase(name):
name = string_util.CamelToSnake(name)
name = name.lower()
try:
return self._aio_nodes_by_name[name]
except KeyError:
raise ValueError('Invalid node name: %s' % name)
def GetAioMessageType(self, name):
if name.startswith('kMessageType'):
name = name[len('kMessageType'):]
return self._aio_messages_by_name[name]
@property
def aio_labels(self):
return sorted(self._aio_nodes_by_label.iterkeys())
def GetAioNodesByLabel(self, label):
if label not in self._aio_nodes_by_label:
raise ValueError('Invalid label: %s' % label)
return tuple(self._aio_nodes_by_label[label])
@property
def messages_by_type(self):
return {
'Aio': self.aio_messages,
'Eop': self.eop_messages,
'Winch': self.winch_messages
}
@property
def all_messages(self):
return self._aio_messages + self._eop_messages + self._winch_messages
@property
def aio_messages(self):
return self._aio_messages
@property
def eop_messages(self):
return self._eop_messages
@property
def winch_messages(self):
return self._winch_messages
# TODO: Split this section of network.yaml into its own file.
def GetSwitchChips(self):
return self._yaml['switch_chips']
# TODO: Split this section of network.yaml into its own file.
def GetSwitches(self):
return self._yaml['switches']
def ParseGenerationFlags(argv):
"""Use gflags to parse arguments specific to network.yaml code generation."""
gflags.DEFINE_string('autogen_root', makani.HOME,
'Root of the source tree for the output files.')
gflags.DEFINE_string('output_dir', '.',
'Full path to the directory for output files.')
gflags.DEFINE_string('network_file', None,
'Full path to the yaml file that describes the network.')
try:
argv = gflags.FLAGS(argv)
except gflags.FlagsError, e:
sys.stderr.write('\nError: %s\n\nUsage: %s ARGS\n%s\n'
% (e, argv[0], gflags.FLAGS))
sys.exit(1)
argv = gflags.FLAGS(argv)
return gflags.FLAGS, argv
| apache-2.0 | 6,779,570,442,275,803,000 | 30.36262 | 103 | 0.629247 | false |
Oslandia/qgis-swmm | SwmmAlgorithm.py | 1 | 21125 | # -*- coding: utf-8 -*-
"""
***************************************************************************
SwmmAlgorithm.py
---------------------
Date : December 2013
Copyright : (C) 2013 by Vincent Mora
Email : vincent dot mora dot mtl at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Vincent Mora'
__date__ = 'December 2013'
__copyright__ = '(C) 2013, Oslandia'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import re
import datetime
import codecs
import subprocess
from qgis.core import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.core.ProcessingLog import ProcessingLog
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterFile
from processing.core.parameters import Parameter
from processing.core.outputs import OutputVector
from processing.core.outputs import OutputTable
from processing.core.ProcessingConfig import ProcessingConfig
from processing.tools import dataobjects
def convert_date(d):
month = {'JAN':'01',
'FEB':'02',
'MAR':'03',
'APR':'04',
'MAY':'05',
'JUN':'06',
'JUL':'07',
'AUG':'08',
'SEP':'09',
'OCT':'10',
'NOV':'11',
'DEC':'12'}
m = re.search('^(\S+)-(\d\d)-(\d\d\d\d)$',d) # for date and time saved as timestamps
if not m : raise RuntimeError
return m.group(3)+'-'+month[m.group(1)]+'-'+m.group(2)
class SwmmAlgorithm(GeoAlgorithm):
TITLE = 'TITLE'
OPTIONS = 'OPTIONS' # analysis options
REPORT = 'REPORT' # output reporting instructions
FILES = 'FILES' # interface file options
RAINGAGES = 'RAINGAGES' # rain gage information
HYDROGRAPHS = 'HYDROGRAPHS' # unit hydrograph data used to construct RDII inflows
EVAPORATION = 'EVAPORATION' # evaporation data
TEMPERATURE = 'TEMPERATURE' # air temperature and snow melt data
SUBCATCHMENTS = 'SUBCATCHMENTS' # basic subcatchment information
SUBAREAS = 'SUBAREAS' # subcatchment impervious/pervious sub-area data
INFILTRATION = 'INFILTRATION' # subcatchment infiltration parameters
LID_CONTROLS = 'LID_CONTROLS' # low impact development control information
LID_USAGE = 'LID_USAGE' # assignment of LID controls to subcatchments
AQUIFERS = 'AQUIFERS' # groundwater aquifer parameters
GROUNDWATER = 'GROUNDWATER' # subcatchment groundwater parameters
SNOWPACKS = 'SNOWPACKS' # subcatchment snow pack parameters
JUNCTIONS = 'JUNCTIONS' # junction node information
OUTFALLS = 'OUTFALLS' # outfall node information
DIVIDERS = 'DIVIDERS' # flow divider node information
STORAGE = 'STORAGE' # storage node information
CONDUITS = 'CONDUITS' # conduit link information
PUMPS = 'PUMPS' # pump link information
ORIFICES = 'ORIFICES' # orifice link information
WEIRS = 'WEIRS' # weir link information
OUTLETS = 'OUTLETS' # outlet link information
XSECTIONS = 'XSECTIONS' # conduit, orifice, and weir cross-section geometry
TRANSECTS = 'TRANSECTS' # transect geometry for conduits with irregular cross-sections
LOSSES = 'LOSSES' # conduit entrance/exit losses and flap valves
CONTROLS = 'CONTROLS' # rules that control pump and regulator operation
POLLUTANTS = 'POLLUTANTS' # pollutant information
LANDUSES = 'LANDUSES' # land use categories
COVERAGES = 'COVERAGES' # assignment of land uses to subcatchments
BUILDUP = 'BUILDUP' # buildup functions for pollutants and land uses
WASHOFF = 'WASHOFF' # washoff functions for pollutants and land uses
TREATMENT = 'TREATMENT' # pollutant removal functions at conveyance system nodes
INFLOWS = 'INFLOWS' # external hydrograph/pollutograph inflow at nodes
DWF = 'DWF' # baseline dry weather sanitary inflow at nodes
PATTERNS = 'PATTERNS' # periodic variation in dry weather inflow
RDII = 'RDII' # rainfall-dependent I/I information at nodes
LOADINGS = 'LOADINGS' # initial pollutant loads on subcatchments
CURVES = 'CURVES' # x-y tabular data referenced in other sections
TIMESERIES = 'TIMESERIES' # time series data referenced in other sections
NODE_OUTPUT = 'NODE_OUTPUT'
LINK_OUTPUT = 'LINK_OUTPUT'
NODE_TABLE_OUTPUT = 'NODE_TABLE_OUTPUT'
def __init__(self):
GeoAlgorithm.__init__(self)
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/swmm.png')
def helpFile(self):
return None
def commandLineName(self):
return 'swmm:simulation'
def defineCharacteristics(self):
self.name = 'Simulate flow in storm water conveyance systems'
self.group = 'Simulation'
self.addParameter(ParameterString(self.TITLE, 'Title', 'Swmm Simulation'))
self.addParameter(ParameterTable(self.OPTIONS, 'Analysis options', True))
self.addParameter(ParameterTable(self.REPORT, 'Output reporting instructions', True))
self.addParameter(ParameterTable(self.FILES, 'Interface file options', True))
self.addParameter(ParameterTable(self.RAINGAGES, 'Rain gage information', True))
self.addParameter(ParameterTable(self.HYDROGRAPHS, 'Unit hydrograph data used to construct RDII inflows', True))
self.addParameter(ParameterTable(self.EVAPORATION, 'Evaporation data', True))
self.addParameter(ParameterTable(self.TEMPERATURE, 'Air temperature and snow melt data', True))
self.addParameter(ParameterVector(self.SUBCATCHMENTS, 'Basic subcatchment information', [ParameterVector.VECTOR_TYPE_POLYGON], True))
self.addParameter(ParameterTable(self.SUBAREAS, 'Subcatchment impervious/pervious sub-area data', True))
self.addParameter(ParameterTable(self.INFILTRATION, 'Subcatchment infiltration parameters', True))
self.addParameter(ParameterTable(self.LID_CONTROLS, 'Low impact development control information', True))
self.addParameter(ParameterTable(self.LID_USAGE, 'Assignment of LID controls to subcatchments', True))
self.addParameter(ParameterTable(self.AQUIFERS, 'Groundwater aquifer parameters', True))
self.addParameter(ParameterTable(self.GROUNDWATER, 'Subcatchment groundwater parameters', True))
self.addParameter(ParameterTable(self.SNOWPACKS, 'Subcatchment snow pack parameters', True))
self.addParameter(ParameterVector(self.JUNCTIONS, 'Junction node information', [ParameterVector.VECTOR_TYPE_POINT], True))
self.addParameter(ParameterTable(self.OUTFALLS, 'Outfall node information', True))
self.addParameter(ParameterVector(self.DIVIDERS, 'Flow divider node information', [ParameterVector.VECTOR_TYPE_POINT], True))
self.addParameter(ParameterVector(self.STORAGE, 'Storage node information', [ParameterVector.VECTOR_TYPE_POINT], True))
self.addParameter(ParameterVector(self.CONDUITS, 'Conduit link information', [ParameterVector.VECTOR_TYPE_LINE], True))
self.addParameter(ParameterVector(self.PUMPS, 'Pump link information', [ParameterVector.VECTOR_TYPE_POINT], True))
self.addParameter(ParameterVector(self.ORIFICES, 'Orifice link information', [ParameterVector.VECTOR_TYPE_POINT], True))
self.addParameter(ParameterVector(self.WEIRS, 'Weir link information', [ParameterVector.VECTOR_TYPE_POINT], True))
self.addParameter(ParameterVector(self.OUTLETS, 'Outlet link information', [ParameterVector.VECTOR_TYPE_POINT], True))
self.addParameter(ParameterTable(self.XSECTIONS, 'Conduit, orifice, and weir cross-section geometry', True))
self.addParameter(ParameterTable(self.TRANSECTS, 'Transect geometry for conduits with irregular cross-sections', True))
self.addParameter(ParameterTable(self.LOSSES, 'Conduit entrance/exit losses and flap valves', True))
self.addParameter(ParameterTable(self.CONTROLS, 'Rules that control pump and regulator operation', True))
self.addParameter(ParameterTable(self.POLLUTANTS, 'Pollutant information', True))
self.addParameter(ParameterTable(self.LANDUSES, 'Land use categories', True))
self.addParameter(ParameterTable(self.COVERAGES, 'Assignment of land uses to subcatchments', True))
self.addParameter(ParameterTable(self.BUILDUP, 'Buildup functions for pollutants and land uses', True))
self.addParameter(ParameterTable(self.WASHOFF, 'Washoff functions for pollutants and land uses', True))
self.addParameter(ParameterTable(self.TREATMENT, 'Pollutant removal functions at conveyance system nodes', True))
self.addParameter(ParameterTable(self.INFLOWS, 'External hydrograph/pollutograph inflow at nodes', True))
self.addParameter(ParameterTable(self.DWF, 'Baseline dry weather sanitary inflow at nodes', True))
self.addParameter(ParameterTable(self.PATTERNS, 'Periodic variation in dry weather inflow', True))
self.addParameter(ParameterTable(self.RDII, 'Rainfall-dependent I/I information at nodes', True))
self.addParameter(ParameterTable(self.LOADINGS, 'Initial pollutant loads on subcatchments', True))
self.addParameter(ParameterTable(self.CURVES, 'x-y tabular data referenced in other sections', True))
self.addParameter(ParameterTable(self.TIMESERIES, 'Time series data referenced in other sections', True))
self.addOutput(OutputVector(self.NODE_OUTPUT, 'Node output layer'))
self.addOutput(OutputTable(self.NODE_TABLE_OUTPUT, 'Node output table'))
self.addOutput(OutputVector(self.LINK_OUTPUT, 'Link output layer'))
pass
def checkBeforeOpeningParametersDialog(self):
if not ProcessingConfig.getSetting('Swmm_CLI'):
return 'Swmm command line tool is not configured.\n\
Please configure it before running Swmm algorithms.'
layers = dataobjects.getVectorLayers()
for p in self.parameters:
for layer in layers:
if layer.name() == p.name.lower() :
self.setParameterValue(p.name, layer )
return None
def swmmTable(self, table_name):
uri = self.getParameterValue(table_name)
if not uri: return u''
layer = dataobjects.getObjectFromUri(uri)
pkidx = layer.dataProvider().pkAttributeIndexes()
fields = ""
for i,field in enumerate(layer.dataProvider().fields()):
if not i in pkidx: fields+=field.name()+"\t"
tbl =u'['+table_name+']\n'\
';'+fields+'\n'
for feature in layer.getFeatures():
for i,v in enumerate(feature):
if not i in pkidx:
if str(v) != 'NULL':
m = re.search('^(\d\d\d\d)-(\d\d)-(\d\d) (\d\d:\d\d):\d\d',str(v)) # for date and time saved as timestamps
if m:
tbl += '/'.join(m.group(2,3,1))+'\t'+m.group(4)+'\t'
else:
tbl += str(v)+'\t'
else: tbl += '\t'
tbl += '\n'
tbl += '\n'
return tbl;
def swmmKeyVal(self, table_name, simul_title):
uri = self.getParameterValue(table_name)
if not uri: return u''
layer = dataobjects.getObjectFromUri(uri)
fields = []
for i,field in enumerate(layer.dataProvider().fields()):
fields.append(field.name())
tbl =u'['+table_name+']\n'
found = False
for feature in layer.getFeatures():
if str(feature[0]) == simul_title:
for i,v in enumerate(feature):
if i and str(v) != 'NULL': tbl += fields[i].upper()+'\t'+str(v)+'\n'
elif i : tbl += '\t'
found = True
tbl += '\n'
tbl += '\n'
if not found:
raise GeoAlgorithmExecutionException(
"No simulation named '"+simul_title+"' in "+table_name)
return tbl;
def processAlgorithm(self, progress):
swmm_cli = os.path.abspath(ProcessingConfig.getSetting('Swmm_CLI'))
if not swmm_cli:
raise GeoAlgorithmExecutionException(
'Swmm command line toom is not configured.\n\
Please configure it before running Swmm algorithms.')
folder = ProcessingConfig.getSetting(ProcessingConfig.OUTPUT_FOLDER)
filename = os.path.join(folder, 'swmm.inp')
f = codecs.open(filename,'w',encoding='utf-8')
f.write('[TITLE]\n')
f.write(self.getParameterValue(self.TITLE)+'\n\n')
f.write(self.swmmKeyVal(self.OPTIONS, self.getParameterValue(self.TITLE)))
f.write(self.swmmKeyVal(self.REPORT,self.getParameterValue(self.TITLE)))
f.write(self.swmmTable(self.FILES))
f.write(self.swmmTable(self.RAINGAGES))
f.write(self.swmmTable(self.HYDROGRAPHS))
f.write(self.swmmKeyVal(self.EVAPORATION, self.getParameterValue(self.TITLE)))
f.write(self.swmmTable(self.TEMPERATURE))
f.write(self.swmmTable(self.SUBCATCHMENTS))
f.write(self.swmmTable(self.SUBAREAS))
f.write(self.swmmTable(self.INFILTRATION))
f.write(self.swmmTable(self.LID_CONTROLS))
f.write(self.swmmTable(self.LID_USAGE))
f.write(self.swmmTable(self.AQUIFERS))
f.write(self.swmmTable(self.GROUNDWATER))
f.write(self.swmmTable(self.SNOWPACKS))
f.write(self.swmmTable(self.JUNCTIONS))
f.write(self.swmmTable(self.OUTFALLS))
f.write(self.swmmTable(self.DIVIDERS))
f.write(self.swmmTable(self.STORAGE))
f.write(self.swmmTable(self.CONDUITS))
f.write(self.swmmTable(self.PUMPS))
f.write(self.swmmTable(self.ORIFICES))
f.write(self.swmmTable(self.WEIRS))
f.write(self.swmmTable(self.OUTLETS))
f.write(self.swmmTable(self.XSECTIONS))
f.write(self.swmmTable(self.TRANSECTS))
f.write(self.swmmTable(self.LOSSES))
f.write(self.swmmTable(self.CONTROLS))
f.write(self.swmmTable(self.POLLUTANTS))
f.write(self.swmmTable(self.LANDUSES))
f.write(self.swmmTable(self.COVERAGES))
f.write(self.swmmTable(self.BUILDUP))
f.write(self.swmmTable(self.WASHOFF))
f.write(self.swmmTable(self.TREATMENT))
f.write(self.swmmTable(self.INFLOWS))
f.write(self.swmmTable(self.DWF))
f.write(self.swmmTable(self.PATTERNS))
f.write(self.swmmTable(self.RDII))
f.write(self.swmmTable(self.LOADINGS))
f.write(self.swmmTable(self.CURVES))
f.write(self.swmmTable(self.TIMESERIES))
f.close()
outfilename = os.path.join(folder, 'swmm.out')
progress.setText('running simulation')
log=""
proc = subprocess.Popen(
# this doesn't work on linux, but IMHO should
# [swmm_cli, filename, outfilename]
# this works on linux
swmm_cli+' '+filename+' '+outfilename,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False,
).stdout
for line in iter(proc.readline, ''):
log+=line
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, log)
if re.search('There are errors', log):
o = open(outfilename,'r')
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, o.read())
o.close()
raise RuntimeError('There were errors, look into logs for details')
progress.setText('postprocessing output')
# put features in a map indexed by the identifier (first column)
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.JUNCTIONS))
node_fields = QgsFields()
node_fields.append(QgsField('Node', QVariant.String))
node_fields.append(QgsField('Time', QVariant.String))
node_fields.append(QgsField('Inflow', QVariant.Double))
node_fields.append(QgsField('Flooding', QVariant.Double))
node_fields.append(QgsField('Depth', QVariant.Double))
node_fields.append(QgsField('Head', QVariant.Double))
node_feat = {}
for feat in layer.getFeatures():
if feat.geometry() and feat.geometry().exportToWkt():
node_feat[feat.attributes()[0]] = feat
node_writer = self.getOutputFromName(
self.NODE_OUTPUT).getVectorWriter(node_fields.toList(),
QGis.WKBPoint,
layer.crs())
node_table_writer = self.getOutputFromName(
self.NODE_TABLE_OUTPUT).getTableWriter(node_fields.toList())
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.CONDUITS))
link_fields = QgsFields()
link_fields.append(QgsField('Link', QVariant.String))
link_fields.append(QgsField('Time', QVariant.String))
link_fields.append(QgsField('Flow', QVariant.Double))
link_fields.append(QgsField('Velocity', QVariant.Double))
link_fields.append(QgsField('Depth', QVariant.Double))
link_fields.append(QgsField('PercentFull', QVariant.Double))
link_feat = {}
for feat in layer.getFeatures():
if feat.geometry() and feat.geometry().exportToWkt():
link_feat[feat.attributes()[0]] = feat
link_writer = self.getOutputFromName(
self.LINK_OUTPUT).getVectorWriter(link_fields.toList(),
QGis.WKBLineString,
layer.crs())
# here we create output layers
# it's a python implementation of a join
# on the identifyer field between the results an the JUNCTIONS or PIPES
# geometries
total_size = os.path.getsize(outfilename)
total_read = 0
o = codecs.open(outfilename,'r',encoding='utf-8')
# get nodes results
link_id = ''
node_id = ''
line = o.readline()
while line:
line = line.rstrip()
if (node_id or link_id) and not line:
link_id = ''
node_id = ''
if re.search('^ <<< Node ', line):
node_id = line[11:-4]
for i in range(5): line = o.readline()
line = line.rstrip()
if re.search('^ <<< Link ', line):
link_id = line[11:-4]
for i in range(5): line = o.readline()
line = line.rstrip()
if node_id:
feature = QgsFeature(node_fields)
tbl = line.split()
if len(tbl) >= 6:
feature['Node'] = node_id
feature['Time'] = convert_date(tbl[0])+' '+tbl[1]
feature['Inflow'] = tbl[2]
feature['Flooding'] = tbl[3]
feature['Depth'] = tbl[4]
feature['Head'] = tbl[5]
feat = node_feat.get(node_id, None)
if feat : feature.setGeometry(feat.geometry())
node_writer.addFeature(feature)
node_table_writer.addRecord([node_id]+tbl)
if link_id:
feature = QgsFeature(link_fields)
tbl = line.split()
if len(tbl) >= 6:
feature['Link'] = link_id
feature['Time'] = convert_date(tbl[0])+' '+tbl[1]
feature['Flow'] = tbl[2]
feature['Velocity'] = tbl[3]
feature['Depth'] = tbl[4]
feature['PercentFull'] = tbl[5]
feat = link_feat.get(link_id, None)
if feat : feature.setGeometry(feat.geometry())
link_writer.addFeature(feature)
line = o.readline()
total_read += len(line)
progress.setPercentage(int(100*total_read/total_size))
o.close()
| gpl-2.0 | 8,211,793,255,597,314,000 | 48.940898 | 141 | 0.62097 | false |
LibrePCB/LibrePCB | tests/funq/libraryeditor/conftest.py | 1 | 1156 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
library = 'libraries/Populated Library.lplib'
@pytest.fixture
def library_editor(librepcb, helpers):
"""
Fixture opening the library editor with an empty library
"""
librepcb.add_local_library_to_workspace(path=library)
with librepcb.open() as app:
# Wait until the library scan is finished
helpers.wait_for_library_scan_complete(app)
# Open library manager
app.widget('controlPanelOpenLibraryManagerButton').click()
assert app.widget('libraryManager').properties()['visible'] is True
# Select the empty library in library list
library_list = app.widget('libraryManagerInstalledLibrariesList')
helpers.wait_for_model_items_count(library_list, 2)
library_item = library_list.model().items().items[1]
library_list.select_item(library_item)
# Open library editor of empty library
app.widget('libraryManagerLibraryInfoWidgetOpenEditorButton').click()
assert app.widget('libraryEditor').properties()['visible'] is True
# Start the actual test
yield app
| gpl-3.0 | -901,900,597,603,947,900 | 33 | 77 | 0.684256 | false |
pengkobe/leetcode | questions/Regular_Expression_Matching.py | 1 | 6170 | # -*- coding: utf-8 -*-
# 本题难度:★★★
# 实现一个正则表达式引擎,让其支持匹配 . 和 *,其中:
# . 匹配任何单字符
# * 匹配 0 个或者多个前字符
# 需要匹配全部输入而非部分输入,函数格式如下:
# bool isMatch(const char *s, const char *p)
# 如:
# isMatch('aa', 'a') // false
# isMatch('aa', 'aa') // true
# isMatch('aaa', 'aa') // false
# isMatch('aa', 'a*') // true
# isMatch('aa', '.*') // true
# isMatch('ab', '.*') // true
# isMatch('aab', 'c*a*b') // true
# 参考答案:https://github.com/barretlee/daily-algorithms/blob/master/answers/6.md
# Wrong Anwer1
# def isMatch(_str,patt):
# if not _str and not patt:
# return True;
# if not _str and not patt.replace("*",""):
# return True;
# if not _str or not patt:
# return False;
# 此处与题目要求不符
# if patt and patt[0]=="*":
# return isMatch(_str[1:],patt) or isMatch(_str,patt[1:]);
# else:
# return (_str[0]==patt[0] or patt[0] ==".") and isMatch(_str[1:],patt[1:]);
# if __name__ == '__main__':
# assert isMatch('aa', 'a') == False
# assert isMatch('aa', 'aa') == True
# assert isMatch('aaa', 'aaa') == True
# assert isMatch('aaa', '.a') == False
# assert isMatch('aa', '.*') == True
# assert isMatch('aab', '*') == True
# assert isMatch('b', '.*.') == False
# assert isMatch('aab', 'c*a*b') == True
# 提交解法1 备份
# class Solution(object):
# def isMatch(self, _str, patt):
# """
# :type s: str
# :type p: str
# :rtype: bool
# """
# if len(patt)==0:
# return len(_str)==0
# if len(patt)>1 and patt[1]=="*":
# i = 0;
# if len(_str) ==0:
# if self.isMatch(_str[0:],patt[2:]):
# return True;
# while i < len(_str):
# if i == 0 and self.isMatch(_str[0:],patt[2:]):
# return True;
# if _str[i] ==patt[0] or patt[0] ==".":
# if self.isMatch(_str[i+1:],patt[2:]):
# return True;
# else:
# break;
# i = i +1;
# return False;
# else:
# if _str and (_str[0]==patt[0] or patt[0] =="."):
# return self.isMatch(_str[1:],patt[1:]);
# else:
# return False;
# 解法1
def isMatch2(_str,patt):
if len(patt)==0:
return len(_str)==0
if len(patt)>1 and patt[1]=="*":
i = 0;
if len(_str) ==0:
if isMatch2(_str[0:],patt[2:]):
return True;
while i < len(_str):
if i == 0 and isMatch2(_str[0:],patt[2:]):
return True;
if _str[i] == patt[0] or patt[0] ==".":
if isMatch2(_str[i+1:],patt[2:]):
return True;
else:
break;
i = i +1;
return False;
else:
print('else',_str[0:]);
if _str and (_str[0]==patt[0] or patt[0] =="."):
return isMatch2(_str[1:],patt[1:]);
else:
return False;
if __name__ == '__main__':
assert isMatch2('aa', 'a') == False
assert isMatch2('aa', 'aa') == True
assert isMatch2('aaa', 'aaa') == True
assert isMatch2('aaa', '.a') == False
assert isMatch2('ab', '.*') == True
assert isMatch2('aa', '.*') == True
assert isMatch2('b', '.*.') == True
assert isMatch2('aab', 'c*a*b') == True
assert isMatch2('aaba', 'ab*a*c*a') == False
assert isMatch2('a', '.*..a*') == False
assert isMatch2('a', 'ab*') == True
assert isMatch2('abcd', 'd*') == False
assert isMatch2('ab', '.*c') == False
## 解法1 参考
# def isMatch3( s, p):
# if len(p)==0:
# return len(s)==0
# if len(p)==1 or p[1]!='*':
# if len(s)==0 or (s[0]!=p[0] and p[0]!='.'):
# return False
# return isMatch3(s[1:],p[1:])
# else:
# i=-1;
# length=len(s)
# while i<length and (i<0 or p[0]=='.' or p[0]==s[i]):
# print(length,i+1,s[i+1:]);
# if isMatch3(s[i+1:],p[2:]):
# return True
# i+=1
# return False
## 动态规划的解法
## 思路推演
# 1. 全部初始化为 False 先,这里用二位数组 dp[i][j] 标识, 即 s 中前 r 个字符与 p 中前 j 个字符是否匹配
# 2. dp[0][0]=True,空字符配空字符,恒为 True
# 3. s 为空字符,考虑 x* 号情形,注意,按题目要求,*前必须有一个非*字符
# 4. 正式开始规划
# 1. 为 .
# 2. 为 * (难点)
# 3. 为普通字符
# @return a boolean
def isMatch4(s, p):
s_len = len(s);
p_len = len(p);
dp = [[False for j in range(p_len+1)] for i in range(s_len+1)];
dp[0][0] = True;
for i in range(2,p_len+1):
if p[i-1] == "*":
dp[0][i] = dp[0][i-2];
for i in range(1,s_len+1):
for j in range(1,p_len+1):
if p[j-1] == ".":
dp[i][j] = dp[i-1][j-1];
elif p[j-1] == "*":
# 误点1. p[i-2]=="."
# 误点2 . dp[i-1][j-1] --> dp[i-1][j]
dp[i][j] = dp[i][j-1] or dp[i][j-2] or ((s[i-1] == p[j-2] or p[j-2]==".") and dp[i-1][j]);
else:
dp[i][j] = dp[i-1][j-1] and (s[i-1] == p[j -1]);
return dp[s_len][p_len];
if __name__ == '__main__':
assert isMatch4('aa', 'a') == False
assert isMatch4('aa', 'aa') == True
assert isMatch4('aaa', '.a') == False
assert isMatch4('ab', '.*') == True
assert isMatch4('aa', '.*') == True
assert isMatch4('b', '.*.') == True
assert isMatch4('aab', 'c*a*b') == True
assert isMatch4('aaba', 'ab*a*c*a') == False
assert isMatch4('a', '.*..a*') == False
assert isMatch4('a', 'ab*') == True
assert isMatch4('abcd', 'd*') == False
assert isMatch4('ab', '.*c') == False
assert isMatch4('abc', 'a*c') == False
# dp[i-1][j-1] --> dp[i-1][j], 举例如 aa vs .
assert isMatch4('aaa', '.*') == True
| gpl-3.0 | -772,837,806,286,783,600 | 29.136126 | 106 | 0.442669 | false |
MisanthropicBit/bibpy | examples/requirements_check.py | 1 | 1795 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Example of checking the requirements of bibtext and biblatex."""
import bibpy
from bibpy.tools import get_abspath_for
def format_requirements_check(required, optional):
s = ""
if required:
s = "required field(s) " + ", ".join(map(str, required))
if optional:
if required:
s += " and "
temp = ["/".join(map(str, opt)) for opt in optional]
s += "optional field(s) " + ", ".join(temp)
return s
if __name__ == '__main__':
filename = get_abspath_for(
__file__,
'../tests/data/biblatex_missing_requirements.bib'
)
entries = bibpy.read_file(filename, format='biblatex').entries
# Collect all results for which a requirements check failed into a list of
# pairs. There is also bibpy.requirements.check for checking individual
# entries
checked = bibpy.requirements.collect(entries, format='biblatex')
print("* Using bibpy.requirements.collect:")
for (entry, (required, optional)) in checked:
if required or optional:
# Either a missing required or optional field for this entry
print("{0}:{1} is missing {2}"
.format(entry.bibtype, entry.bibkey,
format_requirements_check(required, optional)))
# Requirements checks can also be performed on individual entries.
# Use Entry.validate(format) to throw a RequiredFieldError instead of
# returning a bool
entry = entries[2]
print()
print("* {0} for {1}:{2} = {3}".format("entry.valid('biblatex')",
entry.bibtype,
entry.bibkey,
entry.valid('biblatex')))
| mit | 8,251,867,451,367,847,000 | 31.636364 | 78 | 0.578273 | false |
syci/ingadhoc-odoo-addons | partner_views_fields/res_config.py | 1 | 1176 | # -*- coding: utf-8 -*-
from openerp import fields, models
class partner_configuration(models.TransientModel):
_inherit = 'base.config.settings'
group_ref = fields.Boolean(
"Show Reference On Partners Tree View",
implied_group='partner_views_fields.group_ref',
)
group_user_id = fields.Boolean(
"Show Commercial On Partners Tree View",
implied_group='partner_views_fields.group_user_id',
)
group_city = fields.Boolean(
"Show City On Partners Tree and Search Views",
implied_group='partner_views_fields.group_city',
)
group_state_id = fields.Boolean(
"Show State On Partners Tree and Search Views",
implied_group='partner_views_fields.group_state_id',
)
group_country_id = fields.Boolean(
"Show Country On Partners Tree and Search Views",
implied_group='partner_views_fields.group_country_id',
)
group_function = fields.Boolean(
"Show Function On Partners Tree and Search Views",
implied_group='partner_views_fields.group_function',
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,898,531,089,247,119,000 | 34.636364 | 65 | 0.654762 | false |
safwanrahman/kuma | kuma/urls.py | 1 | 2694 | from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views.static import serve
from django.views.generic import RedirectView
from kuma.attachments import views as attachment_views
from kuma.core import views as core_views
from kuma.wiki.admin import purge_view
from kuma.wiki.views.legacy import mindtouch_to_kuma_redirect
admin.autodiscover()
handler403 = core_views.handler403
handler404 = core_views.handler404
handler500 = core_views.handler500
urlpatterns = [
url('', include('kuma.landing.urls')),
url(
r'^events',
RedirectView.as_view(
url='https://mozilla.org/contribute/events',
permanent=False
),
name='events'
),
]
if settings.MAINTENANCE_MODE:
urlpatterns.append(
url(
r'^admin/.*',
RedirectView.as_view(
pattern_name='maintenance_mode',
permanent=False
)
)
)
else:
urlpatterns += [
# Django admin:
url(r'^admin/wiki/document/purge/',
purge_view,
name='wiki.admin_bulk_purge'),
url(r'^admin/', include(admin.site.urls)),
]
urlpatterns += [
url(r'^search', include('kuma.search.urls')),
url(r'^docs', include('kuma.wiki.urls')),
url('', include('kuma.attachments.urls')),
url('', include('kuma.dashboards.urls')),
url('', include('kuma.users.urls')),
]
if settings.MAINTENANCE_MODE:
urlpatterns.append(
# Redirect if we try to use the "tidings" unsubscribe.
url(
r'^unsubscribe/.*',
RedirectView.as_view(
pattern_name='maintenance_mode',
permanent=False
)
)
)
else:
urlpatterns.append(
url(r'^', include('tidings.urls')),
)
urlpatterns += [
# Services and sundry.
url(r'^humans.txt$',
serve,
{'document_root': settings.HUMANSTXT_ROOT, 'path': 'humans.txt'}),
url(r'^miel$',
handler500,
name='users.honeypot'),
]
if settings.SERVE_MEDIA:
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += [
url(r'^%s/(?P<path>.*)$' % media_url,
serve,
{'document_root': settings.MEDIA_ROOT}),
]
# Legacy MindTouch redirects. These go last so that they don't mess
# with local instances' ability to serve media.
urlpatterns += [
url(r'^@api/deki/files/(?P<file_id>\d+)/=(?P<filename>.+)$',
attachment_views.mindtouch_file_redirect,
name='attachments.mindtouch_file_redirect'),
url(r'^(?P<path>.*)$',
mindtouch_to_kuma_redirect),
]
| mpl-2.0 | -7,610,912,507,910,773,000 | 25.673267 | 74 | 0.59614 | false |
carthach/essentia | test/src/unittests/rhythm/test_onsetdetection.py | 1 | 5977 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from numpy import *
from essentia_test import *
framesize = 1024
hopsize = 512
class TestOnsetDetection(TestCase):
def testZero(self):
# Inputting zeros should return no onsets (empty array)
audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded/techno_loop.wav'),
sampleRate = 44100)()
frames = FrameGenerator(audio, frameSize=framesize, hopSize=hopsize)
win = Windowing(type='hamming')
fft = FFT()
onset_hfc = OnsetDetection(method='hfc')
onset_complex = OnsetDetection(method='complex')
for frame in frames:
fft_frame = fft(win(frame))
mag, ph = CartesianToPolar()(fft_frame)
mag = zeros(len(mag))
self.assertEqual(onset_hfc(mag, ph), 0)
self.assertEqual(onset_complex(mag, ph), 0)
def testImpulse(self):
# tests that for an impulse will yield the correct position
audiosize = 10000
audio = zeros(audiosize)
pos = 5.5 # impulse will be in between frames 4 and 5
audio[int(floor(pos*(hopsize)))] = 1.
frames = FrameGenerator(audio, frameSize=framesize, hopSize=hopsize,
startFromZero=True)
win = Windowing(type='hamming', zeroPadding=framesize)
fft = FFT()
onset_hfc = OnsetDetection(method='hfc')
onset_complex_phase = OnsetDetection(method='complex_phase')
onset_rms = OnsetDetection(method='rms')
onset_flux = OnsetDetection(method='flux')
onset_melflux = OnsetDetection(method='melflux')
onset_complex = OnsetDetection(method='complex')
nframe = 0
for frame in frames:
mag, ph = CartesianToPolar()(fft(win(frame)))
# 'rms' (energy flux) and 'melflux' method will result in a non-zero value on frames 4 and 5,
# energy flux for frame 6 is zero due to half-rectification
# 'flux' on contrary will results in non-zero value for frame 6, as it does not half-rectify
if nframe == floor(pos)-1: # 4th frame
self.assertNotEqual(onset_complex_phase(mag,ph), 0)
self.assertNotEqual(onset_hfc(mag,ph), 0)
self.assertNotEqual(onset_rms(mag,ph), 0)
self.assertNotEqual(onset_flux(mag,ph), 0)
self.assertNotEqual(onset_melflux(mag,ph), 0)
self.assertNotEqual(onset_complex(mag,ph), 0)
elif nframe == ceil(pos)-1: # 5th frame
self.assertNotEqual(onset_complex_phase(mag,ph), 0)
self.assertNotEqual(onset_hfc(mag,ph), 0)
self.assertNotEqual(onset_rms(mag,ph), 0)
self.assertNotEqual(onset_flux(mag,ph), 0)
self.assertNotEqual(onset_melflux(mag,ph), 0)
self.assertNotEqual(onset_complex(mag,ph), 0)
elif nframe == ceil(pos): # 6th frame
self.assertEqual(onset_complex_phase(mag,ph), 0)
self.assertEqual(onset_hfc(mag,ph), 0)
self.assertEqual(onset_rms(mag,ph), 0)
self.assertNotEqual(onset_flux(mag,ph), 0)
self.assertEqual(onset_melflux(mag,ph), 0)
self.assertNotEqual(onset_complex(mag,ph), 0)
else:
self.assertEqual(onset_complex_phase(mag,ph), 0)
self.assertEqual(onset_hfc(mag,ph), 0)
self.assertEqual(onset_rms(mag,ph), 0)
self.assertEqual(onset_flux(mag,ph), 0)
self.assertEqual(onset_melflux(mag,ph), 0)
self.assertEqual(onset_complex(mag,ph), 0)
nframe += 1
def testConstantInput(self):
audio = ones(44100*5)
frames = FrameGenerator(audio, frameSize=framesize, hopSize=hopsize)
win = Windowing(type='hamming')
fft = FFT()
onset_hfc = OnsetDetection(method='hfc')
onset_complex = OnsetDetection(method='complex')
found_complex = []
found_hfc = []
for frame in frames:
fft_frame = fft(win(frame))
mag, ph = CartesianToPolar()(fft_frame)
mag = zeros(len(mag))
found_hfc += [onset_hfc(mag, ph)]
found_complex += [onset_complex(mag, ph)]
self.assertEqualVector(found_complex, zeros(len(found_complex)))
self.assertEqualVector(found_hfc, zeros(len(found_hfc)))
def testInvalidParam(self):
self.assertConfigureFails(OnsetDetection(), { 'sampleRate':-1 })
self.assertConfigureFails(OnsetDetection(), { 'method':'unknown' })
def testEmpty(self):
# Empty input should raise an exception
spectrum = []
phase = []
self.assertComputeFails(OnsetDetection(), spectrum, phase)
spectrum = ones(1024)
self.assertComputeFails(OnsetDetection(method='complex'), spectrum, phase)
def testDifferentSizes(self):
spectrum = ones(1024)
phase = ones(512)
self.assertComputeFails(OnsetDetection(method='complex'),spectrum, phase)
suite = allTests(TestOnsetDetection)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | -5,361,231,174,398,711,000 | 41.091549 | 105 | 0.621047 | false |
malt1/lutris | tests/test_installer.py | 1 | 1700 | from unittest import TestCase
from lutris.installer import ScriptInterpreter, ScriptingError
class MockInterpreter(ScriptInterpreter):
""" a script interpreter mock """
script = {'runner': 'linux'}
def is_valid(self):
return True
class TestScriptInterpreter(TestCase):
def test_script_with_correct_values_is_valid(self):
script = {
'runner': 'foo',
'installer': 'bar',
'name': 'baz',
'game_slug': 'baz',
}
interpreter = ScriptInterpreter(script, None)
self.assertFalse(interpreter.errors)
self.assertTrue(interpreter.is_valid())
def test_move_requires_src_and_dst(self):
script = {
'foo': 'bar',
'installer': {},
'name': 'missing_runner',
'game_slug': 'missing-runner'
}
with self.assertRaises(ScriptingError):
interpreter = ScriptInterpreter(script, None)
interpreter._get_move_paths({})
def test_get_command_returns_a_method(self):
interpreter = MockInterpreter({}, None)
command, params = interpreter._map_command({'move': 'whatever'})
self.assertIn("bound method MockInterpreter.move", str(command))
self.assertEqual(params, "whatever")
def test_get_command_doesnt_return_private_methods(self):
""" """
interpreter = MockInterpreter({}, None)
with self.assertRaises(ScriptingError) as ex:
command, params = interpreter._map_command(
{'_substitute': 'foo'}
)
self.assertEqual(ex.exception.message,
"The command substitute does not exists")
| gpl-3.0 | -5,782,265,075,609,909,000 | 33 | 72 | 0.594118 | false |
jossgray/zyrecffi | zyrecffi/_cffi.py | 1 | 3231 | from cffi import FFI
import os, sys
ffi = FFI()
ffi.cdef('''
// zsock.h
typedef struct _zsock_t zsock_t;
// zmsg.h
typedef struct _zmsg_t zmsg_t;
int zmsg_addstr (zmsg_t* self, const char* string);
char* zmsg_popstr (zmsg_t* self);
// zyre.h
typedef struct _zyre_t zyre_t;
zyre_t* zyre_new (const char *name);
void zyre_destroy (zyre_t **self_p);
const char* zyre_uuid (zyre_t *self);
const char *zyre_name (zyre_t *self);
void zyre_set_header (zyre_t *self, const char *name, const char *format, ...);
void zyre_set_verbose (zyre_t *self);
void zyre_set_port (zyre_t *self, int port_nbr);
void zyre_set_interval (zyre_t *self, size_t interval);
void zyre_set_interface (zyre_t *self, const char *value);
int zyre_set_endpoint (zyre_t *self, const char *format, ...);
void zyre_gossip_bind (zyre_t *self, const char *format, ...);
void zyre_gossip_connect (zyre_t *self, const char *format, ...);
int zyre_start (zyre_t *self);
void zyre_stop (zyre_t *self);
int zyre_join (zyre_t *self, const char *group);
int zyre_leave (zyre_t *self, const char *group);
zmsg_t* zyre_recv (zyre_t *self);
int zyre_whisper (zyre_t *self, const char *peer, zmsg_t **msg_p);
int zyre_shout (zyre_t *self, const char *group, zmsg_t **msg_p);
int zyre_whispers (zyre_t *self, const char *peer, const char *format, ...);
int zyre_shouts (zyre_t *self, const char *group, const char *format, ...);
zsock_t* zyre_socket (zyre_t *self);
void zyre_dump (zyre_t *self);
void zyre_version (int *major, int *minor, int *patch);
void zyre_test (bool verbose);
// zhash.h
typedef struct _zhash_t zhash_t;
// zyre_event.h
typedef struct _zyre_event_t zyre_event_t;
typedef enum {
ZYRE_EVENT_ENTER = 1,
ZYRE_EVENT_JOIN = 2,
ZYRE_EVENT_LEAVE = 3,
ZYRE_EVENT_EXIT = 4,
ZYRE_EVENT_WHISPER = 5,
ZYRE_EVENT_SHOUT = 6
} zyre_event_type_t;
zyre_event_t* zyre_event_new (zyre_t *self);
void zyre_event_destroy (zyre_event_t **self_p);
zyre_event_type_t zyre_event_type (zyre_event_t *self);
char * zyre_event_sender (zyre_event_t *self);
char * zyre_event_name (zyre_event_t *self);
char * zyre_event_address (zyre_event_t *self);
char * zyre_event_header (zyre_event_t *self, char *name);
char * zyre_event_group (zyre_event_t *self);
zmsg_t * zyre_event_msg (zyre_event_t *self);
zhash_t * zyre_event_headers (zyre_event_t *self);
// zsys.h
const char * zsys_interface ();
// zsock_option.h
int zsock_fd (zsock_t *self);
// zpoller.h
typedef struct _zpoller_t zpoller_t;
zpoller_t * zpoller_new (void *reader, ...);
void zpoller_destroy (zpoller_t **self_p);
void * zpoller_wait (zpoller_t *self, int timeout);
int zpoller_add (zpoller_t *self, void *reader);
''')
os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.abspath(os.path.join(os.path.dirname(__file__)))
_zyre_lib_name, _czmq_lib_name = 'zyre', 'czmq'
if sys.platform == 'win32':
_zyre_lib_name, _czmq_lib_name = 'zyre.dll', 'czmq.dll'
zyre_lib = ffi.dlopen(_zyre_lib_name)
czmq_lib = ffi.dlopen(_czmq_lib_name)
new_int_ptr = lambda val: ffi.new('int*', val)
new_void_ptr = lambda val: ffi.new('void*', val)
c_string_to_py = lambda s: ffi.string(s) if s else None
check_null = lambda val: val if val else None | gpl-3.0 | 5,003,882,334,878,248,000 | 21.444444 | 104 | 0.665738 | false |
cwisecarver/osf.io | api/base/serializers.py | 1 | 58011 | import collections
import re
import furl
from django.core.urlresolvers import resolve, reverse, NoReverseMatch
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from rest_framework import exceptions, permissions
from rest_framework import serializers as ser
from rest_framework.fields import SkipField
from rest_framework.fields import get_attribute as get_nested_attributes
from api.base import utils
from api.base.exceptions import InvalidQueryStringError
from api.base.exceptions import Conflict
from api.base.exceptions import JSONAPIException
from api.base.exceptions import TargetNotSupportedError
from api.base.exceptions import RelationshipPostMakesNoChanges
from api.base.settings import BULK_SETTINGS
from api.base.utils import absolute_reverse, extend_querystring_params, get_user_auth, extend_querystring_if_key_exists
from framework.auth import core as auth_core
from osf.models import AbstractNode as Node
from website import settings
from website import util as website_utils
from website.util.sanitize import strip_html
from website.project.model import has_anonymous_link
def format_relationship_links(related_link=None, self_link=None, rel_meta=None, self_meta=None):
"""
Properly handles formatting of self and related links according to JSON API.
Removes related or self link, if none.
"""
ret = {'links': {}}
if related_link:
ret['links'].update({
'related': {
'href': related_link or {},
'meta': rel_meta or {}
}
})
if self_link:
ret['links'].update({
'self': {
'href': self_link or {},
'meta': self_meta or {}
}
})
return ret
def is_anonymized(request):
if hasattr(request, '_is_anonymized'):
return request._is_anonymized
private_key = request.query_params.get('view_only', None)
request._is_anonymized = website_utils.check_private_key_for_anonymized_link(private_key)
return request._is_anonymized
class ShowIfVersion(ser.Field):
"""
Skips the field if the specified request version is not after a feature's earliest supported version,
or not before the feature's latest supported version.
"""
def __init__(self, field, min_version, max_version, **kwargs):
super(ShowIfVersion, self).__init__(**kwargs)
self.field = field
self.required = field.required
self.read_only = field.read_only
self.min_version = min_version
self.max_version = max_version
self.help_text = 'This field is deprecated as of version {}'.format(self.max_version) or kwargs.get('help_text')
def get_attribute(self, instance):
request = self.context.get('request')
if request and utils.is_deprecated(request.version, self.min_version, self.max_version):
raise SkipField
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(ShowIfVersion, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
class HideIfRegistration(ser.Field):
"""
If node is a registration, this field will return None.
"""
def __init__(self, field, **kwargs):
super(HideIfRegistration, self).__init__(**kwargs)
self.field = field
self.source = field.source
self.required = field.required
self.read_only = field.read_only
def get_attribute(self, instance):
if instance.is_registration:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(HideIfRegistration, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
class HideIfDisabled(ser.Field):
"""
If the user is disabled, returns None for attribute fields, or skips
if a RelationshipField.
"""
def __init__(self, field, **kwargs):
super(HideIfDisabled, self).__init__(**kwargs)
self.field = field
self.source = field.source
self.required = field.required
self.read_only = field.read_only
def get_attribute(self, instance):
if instance.is_disabled:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(HideIfDisabled, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
class HideIfWithdrawal(HideIfRegistration):
"""
If registration is withdrawn, this field will return None.
"""
def get_attribute(self, instance):
if instance.is_retracted:
if isinstance(self.field, RelationshipField):
raise SkipField
else:
return None
return self.field.get_attribute(instance)
class AllowMissing(ser.Field):
def __init__(self, field, **kwargs):
super(AllowMissing, self).__init__(**kwargs)
self.field = field
def to_representation(self, value):
return self.field.to_representation(value)
def bind(self, field_name, parent):
super(AllowMissing, self).bind(field_name, parent)
self.field.bind(field_name, self)
def get_attribute(self, instance):
"""
Overwrite the error message to return a blank value is if there is no existing value.
This allows the display of keys that do not exist in the DB (gitHub on a new OSF account for example.)
"""
try:
return self.field.get_attribute(instance)
except SkipField:
return ''
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def _url_val(val, obj, serializer, request, **kwargs):
"""Function applied by `HyperlinksField` to get the correct value in the
schema.
"""
url = None
if isinstance(val, Link): # If a Link is passed, get the url value
url = val.resolve_url(obj, request)
elif isinstance(val, basestring): # if a string is passed, it's a method of the serializer
if getattr(serializer, 'field', None):
serializer = serializer.parent
url = getattr(serializer, val)(obj) if obj is not None else None
else:
url = val
if not url and url != 0:
raise SkipField
else:
return url
class DateByVersion(ser.DateTimeField):
"""
Custom DateTimeField that forces dates into the ISO-8601 format with timezone information in version 2.2.
"""
def to_representation(self, value):
request = self.context.get('request')
if request:
if request.version >= '2.2':
self.format = '%Y-%m-%dT%H:%M:%S.%fZ'
else:
self.format = '%Y-%m-%dT%H:%M:%S.%f' if value.microsecond else '%Y-%m-%dT%H:%M:%S'
return super(DateByVersion, self).to_representation(value)
class IDField(ser.CharField):
"""
ID field that validates that 'id' in the request body is the same as the instance 'id' for single requests.
"""
def __init__(self, **kwargs):
kwargs['label'] = 'ID'
super(IDField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
request = self.context.get('request')
if request:
if request.method in utils.UPDATE_METHODS and not utils.is_bulk_request(request):
id_field = self.get_id(self.root.instance)
if id_field != data:
raise Conflict(detail=('The id you used in the URL, "{}", does not match the id you used in the json body\'s id field, "{}". The object "{}" exists, otherwise you\'d get a 404, so most likely you need to change the id field to match.'.format(id_field, data, id_field)))
return super(IDField, self).to_internal_value(data)
def get_id(self, obj):
return getattr(obj, self.source, '_id')
class TypeField(ser.CharField):
"""
Type field that validates that 'type' in the request body is the same as the Meta type.
Also ensures that type is write-only and required.
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
super(TypeField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
if isinstance(self.root, JSONAPIListSerializer):
type_ = self.root.child.Meta.type_
else:
type_ = self.root.Meta.type_
if type_ != data:
raise Conflict(detail=('This resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the resource\'s type.'.format(type_, data)))
return super(TypeField, self).to_internal_value(data)
class TargetTypeField(ser.CharField):
"""
Enforces that the related resource has the correct type
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
self.target_type = kwargs.pop('target_type')
super(TargetTypeField, self).__init__(**kwargs)
def to_internal_value(self, data):
if self.target_type != data:
raise Conflict(detail=('The target resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the target resource\'s type.'.format(self.target_type, data)))
return super(TargetTypeField, self).to_internal_value(data)
class JSONAPIListField(ser.ListField):
def to_internal_value(self, data):
if not isinstance(data, list):
self.fail('not_a_list', input_type=type(data).__name__)
return super(JSONAPIListField, self).to_internal_value(data)
class AuthorizedCharField(ser.CharField):
"""
Passes auth of the logged-in user to the object's method
defined as the field source.
Example:
content = AuthorizedCharField(source='get_content')
"""
def __init__(self, source, **kwargs):
self.source = source
super(AuthorizedCharField, self).__init__(source=self.source, **kwargs)
def get_attribute(self, obj):
user = self.context['request'].user
auth = auth_core.Auth(user)
field_source_method = getattr(obj, self.source)
return field_source_method(auth=auth)
class AnonymizedRegexField(AuthorizedCharField):
"""
Performs a regex replace on the content of the authorized object's
source field when an anonymous view is requested.
Example:
content = AnonymizedRegexField(source='get_content', regex='\[@[^\]]*\]\([^\) ]*\)', replace='@A User')
"""
def __init__(self, source, regex, replace, **kwargs):
self.source = source
self.regex = regex
self.replace = replace
super(AnonymizedRegexField, self).__init__(source=self.source, **kwargs)
def get_attribute(self, obj):
value = super(AnonymizedRegexField, self).get_attribute(obj)
if value:
user = self.context['request'].user
auth = auth_core.Auth(user)
if 'view_only' in self.context['request'].query_params:
auth.private_key = self.context['request'].query_params['view_only']
if has_anonymous_link(obj.node, auth):
value = re.sub(self.regex, self.replace, value)
return value
class RelationshipField(ser.HyperlinkedIdentityField):
"""
RelationshipField that permits the return of both self and related links, along with optional
meta information. ::
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<_id>'},
self_view='nodes:node-node-children-relationship',
self_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'}
)
The lookup field must be surrounded in angular brackets to find the attribute on the target. Otherwise, the lookup
field will be returned verbatim. ::
wiki_home = RelationshipField(
related_view='addon:addon-detail',
related_view_kwargs={'node_id': '<_id>', 'provider': 'wiki'},
)
'_id' is enclosed in angular brackets, but 'wiki' is not. 'id' will be looked up on the target, but 'wiki' will not.
The serialized result would be '/nodes/abc12/addons/wiki'.
Field can handle nested attributes: ::
wiki_home = RelationshipField(
related_view='wiki:wiki-detail',
related_view_kwargs={'node_id': '<_id>', 'wiki_id': '<wiki_pages_current.home>'}
)
Field can handle a filter_key, which operates as the source field (but
is named differently to not interfere with HyperLinkedIdentifyField's source
The ``filter_key`` argument defines the Mongo key (or ODM field name) to filter on
when using the ``FilterMixin`` on a view. ::
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
Field can include optional filters:
Example:
replies = RelationshipField(
self_view='nodes:node-comments',
self_view_kwargs={'node_id': '<node._id>'},
filter={'target': '<_id>'})
)
"""
json_api_link = True # serializes to a links object
def __init__(self, related_view=None, related_view_kwargs=None, self_view=None, self_view_kwargs=None,
self_meta=None, related_meta=None, always_embed=False, filter=None, filter_key=None, **kwargs):
related_view = related_view
self_view = self_view
related_kwargs = related_view_kwargs
self_kwargs = self_view_kwargs
self.views = {'related': related_view, 'self': self_view}
self.view_kwargs = {'related': related_kwargs, 'self': self_kwargs}
self.related_meta = related_meta
self.self_meta = self_meta
self.always_embed = always_embed
self.filter = filter
self.filter_key = filter_key
assert (related_view is not None or self_view is not None), 'Self or related view must be specified.'
if related_view:
assert related_kwargs is not None, 'Must provide related view kwargs.'
if not callable(related_kwargs):
assert isinstance(related_kwargs,
dict), "Related view kwargs must have format {'lookup_url_kwarg: lookup_field}."
if self_view:
assert self_kwargs is not None, 'Must provide self view kwargs.'
assert isinstance(self_kwargs, dict), "Self view kwargs must have format {'lookup_url_kwarg: lookup_field}."
view_name = related_view
if view_name:
lookup_kwargs = related_kwargs
else:
view_name = self_view
lookup_kwargs = self_kwargs
if kwargs.get('lookup_url_kwarg', None):
lookup_kwargs = kwargs.pop('lookup_url_kwarg')
super(RelationshipField, self).__init__(view_name, lookup_url_kwarg=lookup_kwargs, **kwargs)
# Allow a RelationshipField to be modified if explicitly set so
if kwargs.get('read_only') is not None:
self.read_only = kwargs['read_only']
def resolve(self, resource, field_name, request):
"""
Resolves the view when embedding.
"""
lookup_url_kwarg = self.lookup_url_kwarg
if callable(lookup_url_kwarg):
lookup_url_kwarg = lookup_url_kwarg(getattr(resource, field_name))
kwargs = {attr_name: self.lookup_attribute(resource, attr) for (attr_name, attr) in lookup_url_kwarg.items()}
kwargs.update({'version': request.parser_context['kwargs']['version']})
view = self.view_name
if callable(self.view_name):
view = view(getattr(resource, field_name))
return resolve(
reverse(
view,
kwargs=kwargs
)
)
def process_related_counts_parameters(self, params, value):
"""
Processes related_counts parameter.
Can either be a True/False value for fetching counts on all fields, or a comma-separated list for specifying
individual fields. Ensures field for which we are requesting counts is a relationship field.
"""
if utils.is_truthy(params) or utils.is_falsy(params):
return params
field_counts_requested = [val for val in params.split(',')]
countable_fields = {field for field in self.parent.fields if
getattr(self.parent.fields[field], 'json_api_link', False) or
getattr(getattr(self.parent.fields[field], 'field', None), 'json_api_link', None)}
for count_field in field_counts_requested:
# Some fields will hide relationships, e.g. HideIfWithdrawal
# Ignore related_counts for these fields
fetched_field = self.parent.fields.get(count_field)
hidden = fetched_field and isinstance(fetched_field, HideIfWithdrawal) and getattr(value, 'is_retracted', False)
if not hidden and count_field not in countable_fields:
raise InvalidQueryStringError(
detail="Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got '{0}'".format(
params),
parameter='related_counts'
)
return field_counts_requested
def get_meta_information(self, meta_data, value):
"""
For retrieving meta values, otherwise returns {}
"""
meta = {}
for key in meta_data or {}:
if key == 'count' or key == 'unread':
show_related_counts = self.context['request'].query_params.get('related_counts', False)
if self.context['request'].parser_context.get('kwargs'):
if self.context['request'].parser_context['kwargs'].get('is_embedded'):
show_related_counts = False
field_counts_requested = self.process_related_counts_parameters(show_related_counts, value)
if utils.is_truthy(show_related_counts):
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
elif utils.is_falsy(show_related_counts):
continue
elif self.field_name in field_counts_requested:
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
else:
continue
elif key == 'projects_in_common':
if not get_user_auth(self.context['request']).user:
continue
if not self.context['request'].query_params.get('show_projects_in_common', False):
continue
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
else:
meta[key] = website_utils.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
return meta
def lookup_attribute(self, obj, lookup_field):
"""
Returns attribute from target object unless attribute surrounded in angular brackets where it returns the lookup field.
Also handles the lookup of nested attributes.
"""
bracket_check = _tpl(lookup_field)
if bracket_check:
source_attrs = bracket_check.split('.')
# If you are using a nested attribute for lookup, and you get the attribute wrong, you will not get an
# error message, you will just not see that field. This allows us to have slightly more dynamic use of
# nested attributes in relationship fields.
try:
return_val = get_nested_attributes(obj, source_attrs)
except KeyError:
return None
return return_val
return lookup_field
def kwargs_lookup(self, obj, kwargs_dict):
"""
For returning kwargs dictionary of format {"lookup_url_kwarg": lookup_value}
"""
if callable(kwargs_dict):
kwargs_dict = kwargs_dict(obj)
kwargs_retrieval = {}
for lookup_url_kwarg, lookup_field in kwargs_dict.items():
try:
lookup_value = self.lookup_attribute(obj, lookup_field)
except AttributeError as exc:
raise AssertionError(exc)
if lookup_value is None:
return None
kwargs_retrieval[lookup_url_kwarg] = lookup_value
return kwargs_retrieval
# Overrides HyperlinkedIdentityField
def get_url(self, obj, view_name, request, format):
urls = {}
for view_name, view in self.views.items():
if view is None:
urls[view_name] = {}
else:
kwargs = self.kwargs_lookup(obj, self.view_kwargs[view_name])
if kwargs is None:
urls[view_name] = {}
else:
if callable(view):
view = view(getattr(obj, self.field_name))
kwargs.update({'version': request.parser_context['kwargs']['version']})
url = self.reverse(view, kwargs=kwargs, request=request, format=format)
if self.filter:
formatted_filters = self.format_filter(obj)
if formatted_filters:
for filter in formatted_filters:
url = extend_querystring_params(
url,
{'filter[{}]'.format(filter['field_name']): filter['value']}
)
else:
url = None
url = extend_querystring_if_key_exists(url, self.context['request'], 'view_only')
urls[view_name] = url
if not urls['self'] and not urls['related']:
urls = None
return urls
def to_esi_representation(self, value, envelope='data'):
relationships = self.to_representation(value)
try:
href = relationships['links']['related']['href']
except KeyError:
raise SkipField
else:
if href and not href == '{}':
if self.always_embed:
envelope = 'data'
query_dict = dict(format=['jsonapi', ], envelope=[envelope, ])
if 'view_only' in self.parent.context['request'].query_params.keys():
query_dict.update(view_only=[self.parent.context['request'].query_params['view_only']])
esi_url = extend_querystring_params(href, query_dict)
return '<esi:include src="{}"/>'.format(esi_url)
def format_filter(self, obj):
""" Take filters specified in self.filter and format them in a way that can be easily parametrized
:param obj: RelationshipField object
:return: list of dictionaries with 'field_name' and 'value' for each filter
"""
filter_fields = self.filter.keys()
filters = []
for field_name in filter_fields:
try:
# check if serializer method passed in
serializer_method = getattr(self.parent, self.filter[field_name])
except AttributeError:
value = self.lookup_attribute(obj, self.filter[field_name])
else:
value = serializer_method(obj)
if not value:
continue
filters.append({'field_name': field_name, 'value': value})
return filters if filters else None
# Overrides HyperlinkedIdentityField
def to_representation(self, value):
request = self.context.get('request', None)
format = self.context.get('format', None)
assert request is not None, (
'`%s` requires the request in the serializer'
" context. Add `context={'request': request}` when instantiating "
'the serializer.' % self.__class__.__name__
)
# By default use whatever format is given for the current context
# unless the target is a different type to the source.
#
# Eg. Consider a HyperlinkedIdentityField pointing from a json
# representation to an html property of that representation...
#
# '/snippets/1/' should link to '/snippets/1/highlight/'
# ...but...
# '/snippets/1/.json' should link to '/snippets/1/highlight/.html'
if format and self.format and self.format != format:
format = self.format
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.get_url(value, self.view_name, request, format)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s". You may have failed to include the related '
'model in your API, or incorrectly configured the '
'`lookup_field` attribute on this field.'
)
if value in ('', None):
value_string = {'': 'the empty string', None: 'None'}[value]
msg += (
' WARNING: The value of the field on the model instance '
"was %s, which may be why it didn't match any "
'entries in your URL conf.' % value_string
)
raise ImproperlyConfigured(msg % self.view_name)
if url is None:
raise SkipField
related_url = url['related']
related_meta = self.get_meta_information(self.related_meta, value)
self_url = url['self']
self_meta = self.get_meta_information(self.self_meta, value)
return format_relationship_links(related_url, self_url, related_meta, self_meta)
class FileCommentRelationshipField(RelationshipField):
def get_url(self, obj, view_name, request, format):
if obj.kind == 'folder':
raise SkipField
return super(FileCommentRelationshipField, self).get_url(obj, view_name, request, format)
class TargetField(ser.Field):
"""
Field that returns a nested dict with the url (constructed based
on the object's type), optional meta information, and link_type.
Example:
target = TargetField(link_type='related', meta={'type': 'get_target_type'})
"""
json_api_link = True # serializes to a links object
view_map = {
'node': {
'view': 'nodes:node-detail',
'lookup_kwarg': 'node_id'
},
'comment': {
'view': 'comments:comment-detail',
'lookup_kwarg': 'comment_id'
},
'nodewikipage': {
'view': None,
'lookup_kwarg': None
}
}
def __init__(self, **kwargs):
self.meta = kwargs.pop('meta', {})
self.link_type = kwargs.pop('link_type', 'url')
super(TargetField, self).__init__(read_only=True, **kwargs)
def resolve(self, resource, field_name, request):
"""
Resolves the view for target node or target comment when embedding.
"""
view_info = self.view_map.get(resource.target.referent._name, None)
if not view_info:
raise TargetNotSupportedError('{} is not a supported target type'.format(
resource.target._name
))
if not view_info['view']:
return None, None, None
embed_value = resource.target._id
return resolve(
reverse(
view_info['view'],
kwargs={
view_info['lookup_kwarg']: embed_value,
'version': request.parser_context['kwargs']['version']
}
)
)
def to_esi_representation(self, value, envelope='data'):
href = value.get_absolute_url()
if href:
esi_url = extend_querystring_params(href, dict(envelope=[envelope, ], format=['jsonapi', ]))
return '<esi:include src="{}"/>'.format(esi_url)
return self.to_representation(value)
def to_representation(self, value):
"""
Returns nested dictionary in format {'links': {'self.link_type': ... }
If no meta information, self.link_type is equal to a string containing link's URL. Otherwise,
the link is represented as a links object with 'href' and 'meta' members.
"""
meta = website_utils.rapply(self.meta, _url_val, obj=value, serializer=self.parent, request=self.context['request'])
return {'links': {self.link_type: {'href': value.referent.get_absolute_url(), 'meta': meta}}}
class LinksField(ser.Field):
"""Links field that resolves to a links object. Used in conjunction with `Link`.
If the object to be serialized implements `get_absolute_url`, then the return value
of that method is used for the `self` link.
Example: ::
links = LinksField({
'html': 'absolute_url',
'children': {
'related': Link('nodes:node-children', node_id='<_id>'),
'count': 'get_node_count'
},
'contributors': {
'related': Link('nodes:node-contributors', node_id='<_id>'),
'count': 'get_contrib_count'
},
'registrations': {
'related': Link('nodes:node-registrations', node_id='<_id>'),
'count': 'get_registration_count'
},
})
"""
def __init__(self, links, *args, **kwargs):
ser.Field.__init__(self, read_only=True, *args, **kwargs)
self.links = links
def get_attribute(self, obj):
# We pass the object instance onto `to_representation`,
# not just the field attribute.
return obj
def extend_absolute_url(self, obj):
return extend_querystring_if_key_exists(obj.get_absolute_url(), self.context['request'], 'view_only')
def to_representation(self, obj):
ret = {}
for name, value in self.links.iteritems():
try:
url = _url_val(value, obj=obj, serializer=self.parent, request=self.context['request'])
except SkipField:
continue
else:
ret[name] = url
if hasattr(obj, 'get_absolute_url') and 'self' not in self.links:
ret['self'] = self.extend_absolute_url(obj)
return ret
class ListDictField(ser.DictField):
def __init__(self, **kwargs):
super(ListDictField, self).__init__(**kwargs)
def to_representation(self, value):
"""
Ensure the value of each key in the Dict to be a list
"""
res = {}
for key, val in value.items():
if isinstance(self.child.to_representation(val), list):
res[six.text_type(key)] = self.child.to_representation(val)
else:
if self.child.to_representation(val):
res[six.text_type(key)] = [self.child.to_representation(val)]
else:
res[six.text_type(key)] = []
return res
_tpl_pattern = re.compile(r'\s*<\s*(\S*)\s*>\s*')
def _tpl(val):
"""Return value within ``< >`` if possible, else return ``None``."""
match = _tpl_pattern.match(val)
if match:
return match.groups()[0]
return None
def _get_attr_from_tpl(attr_tpl, obj):
attr_name = _tpl(str(attr_tpl))
if attr_name:
attribute_value = obj
for attr_segment in attr_name.split('.'):
attribute_value = getattr(attribute_value, attr_segment, ser.empty)
if attribute_value is not ser.empty:
return attribute_value
elif attr_name in obj:
return obj[attr_name]
else:
raise AttributeError(
'{attr_name!r} is not a valid '
'attribute of {obj!r}'.format(
attr_name=attr_name, obj=obj,
))
else:
return attr_tpl
# TODO: Make this a Field that is usable on its own?
class Link(object):
"""Link object to use in conjunction with Links field. Does reverse lookup of
URLs given an endpoint name and attributed enclosed in `<>`. This includes
complex key strings like 'user.id'
"""
def __init__(self, endpoint, args=None, kwargs=None, query_kwargs=None, **kw):
self.endpoint = endpoint
self.kwargs = kwargs or {}
self.args = args or tuple()
self.reverse_kwargs = kw
self.query_kwargs = query_kwargs or {}
def resolve_url(self, obj, request):
kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.kwargs.items()}
kwarg_values.update({'version': request.parser_context['kwargs']['version']})
arg_values = [_get_attr_from_tpl(attr_tpl, obj) for attr_tpl in self.args]
query_kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.query_kwargs.items()}
# Presumably, if you have are expecting a value but the value is empty, then the link is invalid.
for item in kwarg_values:
if kwarg_values[item] is None:
raise SkipField
return utils.absolute_reverse(
self.endpoint,
args=arg_values,
kwargs=kwarg_values,
query_kwargs=query_kwarg_values,
**self.reverse_kwargs
)
class WaterbutlerLink(Link):
"""Link object to use in conjunction with Links field. Builds a Waterbutler URL for files.
"""
def __init__(self, must_be_file=None, must_be_folder=None, **kwargs):
self.kwargs = kwargs
self.must_be_file = must_be_file
self.must_be_folder = must_be_folder
def resolve_url(self, obj, request):
"""Reverse URL lookup for WaterButler routes
"""
if self.must_be_folder is True and not obj.path.endswith('/'):
raise SkipField
if self.must_be_file is True and obj.path.endswith('/'):
raise SkipField
url = website_utils.waterbutler_api_url_for(obj.node._id, obj.provider, obj.path, **self.kwargs)
if not url:
raise SkipField
else:
return url
class NodeFileHyperLinkField(RelationshipField):
def __init__(self, kind=None, never_embed=False, **kws):
self.kind = kind
self.never_embed = never_embed
super(NodeFileHyperLinkField, self).__init__(**kws)
def get_url(self, obj, view_name, request, format):
if self.kind and obj.kind != self.kind:
raise SkipField
return super(NodeFileHyperLinkField, self).get_url(obj, view_name, request, format)
class JSONAPIListSerializer(ser.ListSerializer):
def to_representation(self, data):
enable_esi = self.context.get('enable_esi', False)
envelope = self.context.update({'envelope': None})
# Don't envelope when serializing collection
errors = {}
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if isinstance(data, collections.Mapping):
errors = data.get('errors', None)
data = data.get('data', None)
if enable_esi:
ret = [
self.child.to_esi_representation(item, envelope=None) for item in data
]
else:
ret = [
self.child.to_representation(item, envelope=envelope) for item in data
]
if errors and bulk_skip_uneditable:
ret.append({'errors': errors})
return ret
# Overrides ListSerializer which doesn't support multiple update by default
def update(self, instance, validated_data):
# avoiding circular import
from api.nodes.serializers import ContributorIDField
# if PATCH request, the child serializer's partial attribute needs to be True
if self.context['request'].method == 'PATCH':
self.child.partial = True
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if not bulk_skip_uneditable:
if len(instance) != len(validated_data):
raise exceptions.ValidationError({'non_field_errors': 'Could not find all objects to update.'})
id_lookup = self.child.fields['id'].source
data_mapping = {item.get(id_lookup): item for item in validated_data}
if isinstance(self.child.fields['id'], ContributorIDField):
instance_mapping = {self.child.fields['id'].get_id(item): item for item in instance}
else:
instance_mapping = {getattr(item, id_lookup): item for item in instance}
ret = {'data': []}
for resource_id, resource in instance_mapping.items():
data = data_mapping.pop(resource_id, None)
ret['data'].append(self.child.update(resource, data))
# If skip_uneditable in request, add validated_data for nodes in which the user did not have edit permissions to errors
if data_mapping and bulk_skip_uneditable:
ret.update({'errors': data_mapping.values()})
return ret
# overrides ListSerializer
def run_validation(self, data):
meta = getattr(self, 'Meta', None)
bulk_limit = getattr(meta, 'bulk_limit', BULK_SETTINGS['DEFAULT_BULK_LIMIT'])
num_items = len(data)
if num_items > bulk_limit:
raise JSONAPIException(source={'pointer': '/data'},
detail='Bulk operation limit is {}, got {}.'.format(bulk_limit, num_items))
return super(JSONAPIListSerializer, self).run_validation(data)
# overrides ListSerializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' from validated_data.
"""
ret = super(JSONAPIListSerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = website_utils.rapply(self.validated_data, strip_html)
for data in self._validated_data:
data.pop('type', None)
return ret
class SparseFieldsetMixin(object):
def parse_sparse_fields(self, allow_unsafe=False, **kwargs):
request = kwargs.get('context', {}).get('request', None)
if request and (allow_unsafe or request.method in permissions.SAFE_METHODS):
sparse_fieldset_query_param = 'fields[{}]'.format(self.Meta.type_)
if sparse_fieldset_query_param in request.query_params:
fieldset = request.query_params[sparse_fieldset_query_param].split(',')
for field_name in self.fields.fields.copy().keys():
if field_name in ('id', 'links', 'type'):
# MUST return these fields
continue
if field_name not in fieldset:
self.fields.pop(field_name)
class BaseAPISerializer(ser.Serializer, SparseFieldsetMixin):
def __init__(self, *args, **kwargs):
self.parse_sparse_fields(**kwargs)
super(BaseAPISerializer, self).__init__(*args, **kwargs)
self.model_field_names = [name if field.source == '*' else field.source
for name, field in self.fields.iteritems()]
class JSONAPISerializer(BaseAPISerializer):
"""Base serializer. Requires that a `type_` option is set on `class Meta`. Also
allows for enveloping of both single resources and collections. Looks to nest fields
according to JSON API spec. Relational fields must set json_api_link=True flag.
Self/html links must be nested under "links".
"""
# Don't serialize relationships that use these views
# when viewing thru an anonymous VOL
views_to_hide_if_anonymous = {
'users:user-detail',
'nodes:node-registrations',
}
# overrides Serializer
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls(*args, **kwargs)
return JSONAPIListSerializer(*args, **kwargs)
def invalid_embeds(self, fields, embeds):
fields_check = fields[:]
for index, field in enumerate(fields_check):
if getattr(field, 'field', None):
fields_check[index] = field.field
invalid_embeds = set(embeds.keys()) - set(
[f.field_name for f in fields_check if getattr(f, 'json_api_link', False)])
return invalid_embeds
def to_esi_representation(self, data, envelope='data'):
href = None
query_params_blacklist = ['page[size]']
href = self.get_absolute_url(data)
if href and href != '{}':
esi_url = furl.furl(href).add(args=dict(self.context['request'].query_params)).remove(
args=query_params_blacklist).remove(args=['envelope']).add(args={'envelope': envelope}).url
return '<esi:include src="{}"/>'.format(esi_url)
# failsafe, let python do it if something bad happened in the ESI construction
return super(JSONAPISerializer, self).to_representation(data)
# overrides Serializer
def to_representation(self, obj, envelope='data'):
"""Serialize to final representation.
:param obj: Object to be serialized.
:param envelope: Key for resource object.
"""
ret = {}
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
self.parse_sparse_fields(allow_unsafe=True, context=self.context)
data = {
'id': '',
'type': type_,
'attributes': {},
'relationships': {},
'embeds': {},
'links': {},
}
embeds = self.context.get('embed', {})
context_envelope = self.context.get('envelope', envelope)
if context_envelope == 'None':
context_envelope = None
enable_esi = self.context.get('enable_esi', False)
is_anonymous = is_anonymized(self.context['request'])
to_be_removed = set()
if is_anonymous and hasattr(self, 'non_anonymized_fields'):
# Drop any fields that are not specified in the `non_anonymized_fields` variable.
allowed = set(self.non_anonymized_fields)
existing = set(self.fields.keys())
to_be_removed = existing - allowed
fields = [field for field in self.fields.values() if
not field.write_only and field.field_name not in to_be_removed]
invalid_embeds = self.invalid_embeds(fields, embeds)
invalid_embeds = invalid_embeds - to_be_removed
if invalid_embeds:
raise InvalidQueryStringError(parameter='embed',
detail='The following fields are not embeddable: {}'.format(
', '.join(invalid_embeds)))
for field in fields:
try:
attribute = field.get_attribute(obj)
except SkipField:
continue
nested_field = getattr(field, 'field', None)
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data['attributes'][field.field_name] = None
else:
try:
if hasattr(attribute, 'all'):
representation = field.to_representation(attribute.all())
else:
representation = field.to_representation(attribute)
except SkipField:
continue
if getattr(field, 'json_api_link', False) or getattr(nested_field, 'json_api_link', False):
# If embed=field_name is appended to the query string or 'always_embed' flag is True, directly embed the
# results in addition to adding a relationship link
if embeds and (field.field_name in embeds or getattr(field, 'always_embed', None)):
if enable_esi:
try:
result = field.to_esi_representation(attribute, envelope=envelope)
except SkipField:
continue
else:
try:
# If a field has an empty representation, it should not be embedded.
result = self.context['embed'][field.field_name](obj)
except SkipField:
result = None
if result:
data['embeds'][field.field_name] = result
else:
data['embeds'][field.field_name] = {'error': 'This field is not embeddable.'}
try:
if not (is_anonymous and
hasattr(field, 'view_name') and
field.view_name in self.views_to_hide_if_anonymous):
data['relationships'][field.field_name] = representation
except SkipField:
continue
elif field.field_name == 'id':
data['id'] = representation
elif field.field_name == 'links':
data['links'] = representation
else:
data['attributes'][field.field_name] = representation
if not data['relationships']:
del data['relationships']
if not data['embeds']:
del data['embeds']
if context_envelope:
ret[context_envelope] = data
if is_anonymous:
ret['meta'] = {'anonymous': True}
else:
ret = data
return ret
def get_absolute_url(self, obj):
raise NotImplementedError()
def get_absolute_html_url(self, obj):
return extend_querystring_if_key_exists(obj.absolute_url, self.context['request'], 'view_only')
# overrides Serializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' and '_id' from validated_data.
"""
ret = super(JSONAPISerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = self.sanitize_data()
self._validated_data.pop('type', None)
self._validated_data.pop('target_type', None)
if self.context['request'].method in utils.UPDATE_METHODS:
self._validated_data.pop('_id', None)
return ret
def sanitize_data(self):
return website_utils.rapply(self.validated_data, strip_html)
class JSONAPIRelationshipSerializer(BaseAPISerializer):
"""Base Relationship serializer. Requires that a `type_` option is set on `class Meta`.
Provides a simplified serialization of the relationship, allowing for simple update request
bodies.
"""
id = ser.CharField(required=False, allow_null=True)
type = TypeField(required=False, allow_null=True)
def to_representation(self, obj):
meta = getattr(self, 'Meta', None)
type_ = getattr(meta, 'type_', None)
assert type_ is not None, 'Must define Meta.type_'
relation_id_field = self.fields['id']
attribute = relation_id_field.get_attribute(obj)
relationship = relation_id_field.to_representation(attribute)
data = {'type': type_, 'id': relationship} if relationship else None
return data
def DevOnly(field):
"""Make a field only active in ``DEV_MODE``. ::
experimental_field = DevMode(CharField(required=False))
"""
return field if settings.DEV_MODE else None
class RestrictedDictSerializer(ser.Serializer):
def to_representation(self, obj):
data = {}
fields = [field for field in self.fields.values() if not field.write_only]
for field in fields:
try:
attribute = field.get_attribute(obj)
except ser.SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data[field.field_name] = None
else:
data[field.field_name] = field.to_representation(attribute)
return data
def relationship_diff(current_items, new_items):
"""
To be used in POST and PUT/PATCH relationship requests, as, by JSON API specs,
in update requests, the 'remove' items' relationships would be deleted, and the
'add' would be added, while for create requests, only the 'add' would be added.
:param current_items: The current items in the relationship
:param new_items: The items passed in the request
:return:
"""
return {
'add': {k: new_items[k] for k in (set(new_items.keys()) - set(current_items.keys()))},
'remove': {k: current_items[k] for k in (set(current_items.keys()) - set(new_items.keys()))}
}
class AddonAccountSerializer(JSONAPISerializer):
id = ser.CharField(source='_id', read_only=True)
provider = ser.CharField(read_only=True)
profile_url = ser.CharField(required=False, read_only=True)
display_name = ser.CharField(required=False, read_only=True)
links = links = LinksField({
'self': 'get_absolute_url',
})
class Meta:
type_ = 'external_accounts'
def get_absolute_url(self, obj):
kwargs = self.context['request'].parser_context['kwargs']
kwargs.update({'account_id': obj._id})
return absolute_reverse(
'users:user-external_account-detail',
kwargs=kwargs
)
return obj.get_absolute_url()
class LinkedNode(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
type_ = 'linked_nodes'
class LinkedRegistration(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
type_ = 'linked_registrations'
class LinkedNodesRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=LinkedNode())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].linked_nodes_self_url
def get_related_url(self, obj):
return obj['self'].linked_nodes_related_url
class Meta:
type_ = 'linked_nodes'
def get_pointers_to_add_remove(self, pointers, new_pointers):
diff = relationship_diff(
current_items={pointer._id: pointer for pointer in pointers},
new_items={val['_id']: val for val in new_pointers}
)
nodes_to_add = []
for node_id in diff['add']:
node = Node.load(node_id)
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_id))
nodes_to_add.append(node)
return nodes_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {'data': [
pointer for pointer in
obj.linked_nodes.filter(is_deleted=False, type='osf.node')
], 'self': obj}
def update(self, instance, validated_data):
collection = instance['self']
auth = utils.get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.rm_pointer(pointer, auth)
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = utils.get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise RelationshipPostMakesNoChanges
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
class LinkedRegistrationsRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=LinkedRegistration())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].linked_registrations_self_url
def get_related_url(self, obj):
return obj['self'].linked_registrations_related_url
class Meta:
type_ = 'linked_registrations'
def get_pointers_to_add_remove(self, pointers, new_pointers):
diff = relationship_diff(
current_items={pointer._id: pointer for pointer in pointers},
new_items={val['_id']: val for val in new_pointers}
)
nodes_to_add = []
for node_id in diff['add']:
node = Node.load(node_id)
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_id))
nodes_to_add.append(node)
return nodes_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {'data': [
pointer for pointer in
obj.linked_nodes.filter(is_deleted=False, type='osf.registration')
], 'self': obj}
def update(self, instance, validated_data):
collection = instance['self']
auth = utils.get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.rm_pointer(pointer, auth)
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = utils.get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise RelationshipPostMakesNoChanges
for node in add:
collection.add_pointer(node, auth)
return self.make_instance_obj(collection)
| apache-2.0 | 3,039,951,152,254,714,000 | 37.725634 | 289 | 0.597076 | false |
yakky/djangocms-text-ckeditor | djangocms_text_ckeditor/forms.py | 1 | 3464 | # -*- coding: utf-8 -*-
from django import forms
from django.core import signing
from django.core.signing import BadSignature
from django.forms.models import ModelForm
from django.template import RequestContext
from django.utils.translation import ugettext
from cms.models import CMSPlugin
from .models import Text
from .utils import _render_cms_plugin, plugin_tags_to_id_list, plugin_to_tag
class ActionTokenValidationForm(forms.Form):
token = forms.CharField(required=True)
def get_id_from_token(self, session_id):
payload = self.cleaned_data['token']
signer = signing.Signer(salt=session_id)
try:
return signer.unsign(payload)
except BadSignature:
return False
class RenderPluginForm(forms.Form):
plugin = forms.ModelChoiceField(
queryset=CMSPlugin.objects.none(),
required=True,
)
def __init__(self, *args, **kwargs):
self.text_plugin = kwargs.pop('text_plugin')
super(RenderPluginForm, self).__init__(*args, **kwargs)
self.fields['plugin'].queryset = self.get_child_plugins()
def get_child_plugins(self):
return self.text_plugin.get_descendants()
def render_plugin(self, request):
plugin = self.cleaned_data['plugin']
context = RequestContext(request)
context['request'] = request
rendered_content = _render_cms_plugin(plugin, context)
return plugin_to_tag(plugin, content=rendered_content, admin=True)
class DeleteOnCancelForm(forms.Form):
child_plugins = forms.ModelMultipleChoiceField(
queryset=CMSPlugin.objects.none(),
required=False,
)
def __init__(self, *args, **kwargs):
self.text_plugin = kwargs.pop('text_plugin')
super(DeleteOnCancelForm, self).__init__(*args, **kwargs)
self.fields['child_plugins'].queryset = self.get_child_plugins()
def clean(self):
children = self.cleaned_data.get('child_plugins')
if not children and self.text_plugin.get_plugin_instance()[0]:
# This check prevents users from using a cancel token
# to delete just any text plugin.
# Only non-saved text plugins can be deleted.
message = ugettext("Can't delete a saved plugin.")
raise forms.ValidationError(message, code='invalid')
return self.cleaned_data
def get_child_plugins(self):
# We use this queryset to limit the plugins
# a user can delete to only plugins that have not
# been saved in text and are descendants of the text plugin.
instance = self.text_plugin.get_plugin_instance()[0]
if instance:
# Only non-saved children can be deleted.
excluded_plugins = plugin_tags_to_id_list(instance.body)
else:
excluded_plugins = []
queryset = self.text_plugin.get_descendants()
if excluded_plugins:
queryset = queryset.exclude(pk__in=excluded_plugins)
return queryset
def delete(self):
child_plugins = self.cleaned_data.get('child_plugins')
if child_plugins:
child_plugins.delete()
else:
self.text_plugin.delete()
class TextForm(ModelForm):
body = forms.CharField()
class Meta:
model = Text
exclude = (
'page',
'position',
'placeholder',
'language',
'plugin_type',
)
| bsd-3-clause | -7,001,191,991,926,625,000 | 29.928571 | 76 | 0.633661 | false |
SanPen/GridCal | src/research/PTDF/ACPTDF_research2.py | 1 | 14022 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import numba as nb
import time
from warnings import warn
import scipy.sparse as sp
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse import hstack as hs, vstack as vs
from scipy.sparse.linalg import factorized, spsolve, inv
from matplotlib import pyplot as plt
from GridCal.Engine import *
def SysMat(Y, Ys, pq, pvpq):
"""
Computes the system Jacobian matrix in polar coordinates
Args:
Ybus: Admittance matrix
V: Array of nodal voltages
Ibus: Array of nodal current injections
pq: Array with the indices of the PQ buses
pvpq: Array with the indices of the PV and PQ buses
Returns:
The system Jacobian matrix
"""
A11 = -Ys.imag[np.ix_(pvpq, pvpq)]
A12 = Y.real[np.ix_(pvpq, pq)]
A21 = -Ys.real[np.ix_(pq, pvpq)]
A22 = -Y.imag[np.ix_(pq, pq)]
Asys = sp.vstack([sp.hstack([A11, A12]),
sp.hstack([A21, A22])], format="csc")
return Asys
def compute_acptdf(Ybus, Yseries, Yf, Yt, Cf, V, pq, pv, distribute_slack):
"""
Compute the AC-PTDF
:param Ybus: admittance matrix
:param Yf: Admittance matrix of the buses "from"
:param Yt: Admittance matrix of the buses "to"
:param Cf: Connectivity branch - bus "from"
:param V: voltages array
:param Ibus: array of currents
:param pq: array of pq node indices
:param pv: array of pv node indices
:return: AC-PTDF matrix (branches, buses)
"""
n = len(V)
pvpq = np.r_[pv, pq]
npq = len(pq)
# compute the Jacobian
J = SysMat(Ybus, Yseries, pq, pvpq)
if distribute_slack:
dP = np.ones((n, n)) * (-1 / (n - 1))
for i in range(n):
dP[i, i] = 1.0
else:
dP = np.eye(n, n)
# compose the compatible array (the Q increments are considered zero
dQ = np.zeros((npq, n))
# dQ = np.eye(n, n)[pq, :]
dS = np.r_[dP[pvpq, :], dQ]
# solve the voltage increments
dx = spsolve(J, dS)
# compute branch derivatives
If = Yf * V
E = V / np.abs(V)
Vdiag = sp.diags(V)
Vdiag_conj = sp.diags(np.conj(V))
Ediag = sp.diags(E)
Ediag_conj = sp.diags(np.conj(E))
If_diag_conj = sp.diags(np.conj(If))
Yf_conj = Yf.copy()
Yf_conj.data = np.conj(Yf_conj.data)
Yt_conj = Yt.copy()
Yt_conj.data = np.conj(Yt_conj.data)
dSf_dVa = 1j * (If_diag_conj * Cf * Vdiag - sp.diags(Cf * V) * Yf_conj * Vdiag_conj)
dSf_dVm = If_diag_conj * Cf * Ediag - sp.diags(Cf * V) * Yf_conj * Ediag_conj
# compose the final AC-PTDF
dPf_dVa = dSf_dVa.real[:, pvpq]
dPf_dVm = dSf_dVm.real[:, pq]
PTDF = sp.hstack((dPf_dVa, dPf_dVm)) * dx
return PTDF
def make_lodf(circuit: SnapshotCircuit, PTDF, correct_values=True):
"""
:param circuit:
:param PTDF: PTDF matrix in numpy array form
:return:
"""
nl = circuit.nbr
# compute the connectivity matrix
Cft = circuit.C_branch_bus_f - circuit.C_branch_bus_t
H = PTDF * Cft.T
# old code
# h = sp.diags(H.diagonal())
# LODF = H / (np.ones((nl, nl)) - h * np.ones(nl))
# divide each row of H by the vector 1 - H.diagonal
# LODF = H / (1 - H.diagonal())
# replace possible nan and inf
# LODF[LODF == -np.inf] = 0
# LODF[LODF == np.inf] = 0
# LODF = np.nan_to_num(LODF)
# this loop avoids the divisions by zero
# in those cases the LODF column should be zero
LODF = np.zeros((nl, nl))
div = 1 - H.diagonal()
for j in range(H.shape[1]):
if div[j] != 0:
LODF[:, j] = H[:, j] / div[j]
# replace the diagonal elements by -1
# old code
# LODF = LODF - sp.diags(LODF.diagonal()) - sp.eye(nl, nl), replaced by:
for i in range(nl):
LODF[i, i] = - 1.0
if correct_values:
i1, j1 = np.where(LODF > 1)
for i, j in zip(i1, j1):
LODF[i, j] = 1
i2, j2 = np.where(LODF < -1)
for i, j in zip(i2, j2):
LODF[i, j] = -1
return LODF
def get_branch_time_series(circuit: TimeCircuit, PTDF):
"""
:param grid:
:return:
"""
# option 2: call the power directly
P = circuit.Sbus.real
Pbr = np.dot(PTDF, P).T * circuit.Sbase
return Pbr
def multiple_failure_old(flows, LODF, beta, delta, alpha):
"""
:param flows: array of all the pre-contingency flows
:param LODF: Line Outage Distribution Factors Matrix
:param beta: index of the first failed line
:param delta: index of the second failed line
:param alpha: index of the line where you want to see the effects
:return: post contingency flow in the line alpha
"""
# multiple contingency matrix
M = np.ones((2, 2))
M[0, 1] = -LODF[beta, delta]
M[1, 0] = -LODF[delta, beta]
# normal flows of the lines beta and delta
F = flows[[beta, delta]]
# contingency flows after failing the ines beta and delta
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines beta and delta
L = LODF[alpha, :][[beta, delta]]
dFf_alpha = np.dot(L, Ff)
return F[alpha] + dFf_alpha
def multiple_failure(flows, LODF, failed_idx):
"""
From the paper:
Multiple Element Contingency Screening
IEEE TRANSACTIONS ON POWER SYSTEMS, VOL. 26, NO. 3, AUGUST 2011
C. Matthew Davis and Thomas J. Overbye
:param flows: array of all the pre-contingency flows (the base flows)
:param LODF: Line Outage Distribution Factors Matrix
:param failed_idx: indices of the failed lines
:return: all post contingency flows
"""
# multiple contingency matrix
M = -LODF[np.ix_(failed_idx, failed_idx)]
for i in range(len(failed_idx)):
M[i, i] = 1.0
# normal flows of the failed lines indicated by failed_idx
F = flows[failed_idx]
# Affected flows after failing the lines indicated by failed_idx
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines indicated by failed_idx
L = LODF[:, failed_idx]
dFf_alpha = np.dot(L, Ff)
# return the final contingency flow as the base flow plus the contingency flow delta
return flows + dFf_alpha
def get_n_minus_1_flows(circuit: MultiCircuit):
opt = PowerFlowOptions()
branches = circuit.get_branches()
m = circuit.get_branch_number()
Pmat = np.zeros((m, m)) # monitored, contingency
for c, branch in enumerate(branches):
if branch.active:
branch.active = False
pf = PowerFlowDriver(circuit, opt)
pf.run()
Pmat[:, c] = pf.results.Sbranch.real
branch.active = True
return Pmat
def check_lodf(grid: MultiCircuit):
flows_n1_nr = get_n_minus_1_flows(grid)
# assume 1 island
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0]
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=True)
LODF = make_lodf(circuit, PTDF)
Pbus = circuit.get_injections(False).real
flows_n = np.dot(PTDF, Pbus)
nl = circuit.nbr
flows_n1 = np.zeros((nl, nl))
for c in range(nl): # branch that fails (contingency)
# for m in range(nl): # branch to monitor
# flows_n1[m, c] = flows_n[m] + LODF[m, c] * flows_n[c]
flows_n1[:, c] = flows_n[:] + LODF[:, c] * flows_n[c]
return flows_n, flows_n1_nr, flows_n1
def test_ptdf(grid):
"""
Sigma-distances test
:param grid:
:return:
"""
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0] # pick the first island
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=False)
print('PTDF:')
print(PTDF)
if __name__ == '__main__':
from GridCal.Engine import FileOpen
import pandas as pd
np.set_printoptions(threshold=sys.maxsize, linewidth=200000000)
# np.set_printoptions(linewidth=2000, suppress=True)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/lynn5buspv.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 118.xlsx'
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
# fname = 'helm_data1.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14 PQ only.gridcal'
# fname = 'IEEE 14 PQ only full.gridcal'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case5.m'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case30.m'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/PGOC_6bus.gridcal'
grid_ = FileOpen(fname).open()
test_ptdf(grid_)
name = os.path.splitext(fname.split(os.sep)[-1])[0]
method = 'ACPTDF (No Jacobian, V=Vpf)'
nc_ = compile_snapshot_circuit(grid_)
islands_ = split_into_islands(nc_)
circuit_ = islands_[0]
pf_driver_ = PowerFlowDriver(grid_, PowerFlowOptions())
pf_driver_.run()
H_ = compute_acptdf(Ybus=circuit_.Ybus,
Yseries=circuit_.Yseries,
Yf=circuit_.Yf,
Yt=circuit_.Yt,
Cf=circuit_.C_branch_bus_f,
V=pf_driver_.results.voltage,
pq=circuit_.pq,
pv=circuit_.pv,
distribute_slack=False)
LODF_ = make_lodf(circuit_, H_)
if H_.shape[0] < 50:
print('PTDF:\n', H_)
print('LODF:\n', LODF_)
flows_n_, flows_n1_nr_, flows_n1_ = check_lodf(grid_)
# in the case of the grid PGOC_6bus
flows_multiple = multiple_failure(flows=flows_n_,
LODF=LODF_,
failed_idx=[1, 5]) # failed lines 2 and 6
Pn1_nr_df = pd.DataFrame(data=flows_n1_nr_, index=nc_.branch_names, columns=nc_.branch_names)
flows_n1_df = pd.DataFrame(data=flows_n1_, index=nc_.branch_names, columns=nc_.branch_names)
# plot N-1
fig = plt.figure(figsize=(12, 8))
title = 'N-1 with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
Pn1_nr_df.plot(ax=ax1, legend=False)
flows_n1_df.plot(ax=ax2, legend=False)
diff = Pn1_nr_df - flows_n1_df
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson N-1 flows')
ax2.set_title('PTDF N-1 flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
# ------------------------------------------------------------------------------------------------------------------
# Perform real time series
# ------------------------------------------------------------------------------------------------------------------
if grid_.time_profile is not None:
grid_.ensure_profiles_exist()
nc_ts = compile_time_circuit(grid_)
islands_ts = split_time_circuit_into_islands(nc_ts)
circuit_ts = islands_ts[0]
pf_options = PowerFlowOptions()
ts_driver = TimeSeries(grid=grid_, options=pf_options)
ts_driver.run()
Pbr_nr = ts_driver.results.Sbranch.real
df_Pbr_nr = pd.DataFrame(data=Pbr_nr, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# Compute the PTDF based flows
Pbr_ptdf = get_branch_time_series(circuit=circuit_ts, PTDF=H_)
df_Pbr_ptdf = pd.DataFrame(data=Pbr_ptdf, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# plot
fig = plt.figure(figsize=(12, 8))
title = 'Flows with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
df_Pbr_nr.plot(ax=ax1, legend=False)
df_Pbr_ptdf.plot(ax=ax2, legend=False)
diff = df_Pbr_nr - df_Pbr_ptdf
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson flows')
ax2.set_title('PTDF flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
plt.show()
| gpl-3.0 | 2,332,068,375,098,212,400 | 31.234483 | 120 | 0.590857 | false |
Yubico/yubikey-manager | ykman/cli/fido.py | 1 | 24461 | # Copyright (c) 2018 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from fido2.ctap import CtapError
from fido2.ctap1 import ApduError
from fido2.ctap2 import (
Ctap2,
ClientPin,
CredentialManagement,
FPBioEnrollment,
CaptureError,
)
from fido2.pcsc import CtapPcscDevice
from yubikit.core.fido import FidoConnection
from yubikit.core.smartcard import SW
from time import sleep
from .util import (
click_postpone_execution,
click_prompt,
click_force_option,
ykman_group,
prompt_timeout,
)
from .util import cli_fail
from ..fido import is_in_fips_mode, fips_reset, fips_change_pin, fips_verify_pin
from ..hid import list_ctap_devices
from ..device import is_fips_version
from ..pcsc import list_devices as list_ccid
from smartcard.Exceptions import NoCardException, CardConnectionException
from typing import Optional
import click
import logging
logger = logging.getLogger(__name__)
FIPS_PIN_MIN_LENGTH = 6
PIN_MIN_LENGTH = 4
@ykman_group(FidoConnection)
@click.pass_context
@click_postpone_execution
def fido(ctx):
"""
Manage the FIDO applications.
Examples:
\b
Reset the FIDO (FIDO2 and U2F) applications:
$ ykman fido reset
\b
Change the FIDO2 PIN from 123456 to 654321:
$ ykman fido access change-pin --pin 123456 --new-pin 654321
"""
conn = ctx.obj["conn"]
try:
ctx.obj["ctap2"] = Ctap2(conn)
except (ValueError, CtapError) as e:
logger.info("FIDO device does not support CTAP2: %s", e)
@fido.command()
@click.pass_context
def info(ctx):
"""
Display general status of the FIDO2 application.
"""
conn = ctx.obj["conn"]
ctap2 = ctx.obj.get("ctap2")
if is_fips_version(ctx.obj["info"].version):
click.echo("FIPS Approved Mode: " + ("Yes" if is_in_fips_mode(conn) else "No"))
elif ctap2:
client_pin = ClientPin(ctap2) # N.B. All YubiKeys with CTAP2 support PIN.
if ctap2.info.options["clientPin"]:
if ctap2.info.force_pin_change:
click.echo(
"NOTE: The FIDO PID is disabled and must be changed before it can "
"be used!"
)
pin_retries, power_cycle = client_pin.get_pin_retries()
if pin_retries:
click.echo(f"PIN is set, with {pin_retries} attempt(s) remaining.")
if power_cycle:
click.echo(
"PIN is temporarily blocked. "
"Remove and re-insert the YubiKey to unblock."
)
else:
click.echo("PIN is set, but has been blocked.")
else:
click.echo("PIN is not set.")
bio_enroll = ctap2.info.options.get("bioEnroll")
if bio_enroll:
uv_retries, _ = client_pin.get_uv_retries()
if uv_retries:
click.echo(
f"Fingerprints registered, with {uv_retries} attempt(s) "
"remaining."
)
else:
click.echo(
"Fingerprints registered, but blocked until PIN is verified."
)
elif bio_enroll is False:
click.echo("No fingerprints have been registered.")
always_uv = ctap2.info.options.get("alwaysUv")
if always_uv is not None:
click.echo(
"Always Require User Verification is turned "
+ ("on." if always_uv else "off.")
)
else:
click.echo("PIN is not supported.")
@fido.command("reset")
@click_force_option
@click.pass_context
def reset(ctx, force):
"""
Reset all FIDO applications.
This action will wipe all FIDO credentials, including FIDO U2F credentials,
on the YubiKey and remove the PIN code.
The reset must be triggered immediately after the YubiKey is
inserted, and requires a touch on the YubiKey.
"""
conn = ctx.obj["conn"]
if isinstance(conn, CtapPcscDevice): # NFC
readers = list_ccid(conn._name)
if not readers or readers[0].reader.name != conn._name:
logger.error(f"Multiple readers matched: {readers}")
cli_fail("Unable to isolate NFC reader.")
dev = readers[0]
logger.debug(f"use: {dev}")
is_fips = False
def prompt_re_insert():
click.echo(
"Remove and re-place your YubiKey on the NFC reader to perform the "
"reset..."
)
removed = False
while True:
sleep(0.5)
try:
with dev.open_connection(FidoConnection):
if removed:
sleep(1.0) # Wait for the device to settle
break
except CardConnectionException:
pass # Expected, ignore
except NoCardException:
removed = True
return dev.open_connection(FidoConnection)
else: # USB
n_keys = len(list_ctap_devices())
if n_keys > 1:
cli_fail("Only one YubiKey can be connected to perform a reset.")
is_fips = is_fips_version(ctx.obj["info"].version)
ctap2 = ctx.obj.get("ctap2")
if not is_fips and not ctap2:
cli_fail("This YubiKey does not support FIDO reset.")
def prompt_re_insert():
click.echo("Remove and re-insert your YubiKey to perform the reset...")
removed = False
while True:
sleep(0.5)
keys = list_ctap_devices()
if not keys:
removed = True
if removed and len(keys) == 1:
return keys[0].open_connection(FidoConnection)
if not force:
if not click.confirm(
"WARNING! This will delete all FIDO credentials, including FIDO U2F "
"credentials, and restore factory settings. Proceed?",
err=True,
):
ctx.abort()
if is_fips:
destroy_input = click_prompt(
"WARNING! This is a YubiKey FIPS device. This command will also "
"overwrite the U2F attestation key; this action cannot be undone and "
"this YubiKey will no longer be a FIPS compliant device.\n"
'To proceed, please enter the text "OVERWRITE"',
default="",
show_default=False,
)
if destroy_input != "OVERWRITE":
cli_fail("Reset aborted by user.")
conn = prompt_re_insert()
try:
with prompt_timeout():
if is_fips:
fips_reset(conn)
else:
Ctap2(conn).reset()
except CtapError as e:
logger.error("Reset failed", exc_info=e)
if e.code == CtapError.ERR.ACTION_TIMEOUT:
cli_fail(
"Reset failed. You need to touch your YubiKey to confirm the reset."
)
elif e.code in (CtapError.ERR.NOT_ALLOWED, CtapError.ERR.PIN_AUTH_BLOCKED):
cli_fail(
"Reset failed. Reset must be triggered within 5 seconds after the "
"YubiKey is inserted."
)
else:
cli_fail(f"Reset failed: {e.code.name}")
except ApduError as e: # From fips_reset
logger.error("Reset failed", exc_info=e)
if e.code == SW.COMMAND_NOT_ALLOWED:
cli_fail(
"Reset failed. Reset must be triggered within 5 seconds after the "
"YubiKey is inserted."
)
else:
cli_fail("Reset failed.")
except Exception as e:
logger.error(e)
cli_fail("Reset failed.")
def _fail_pin_error(ctx, e, other="%s"):
if e.code == CtapError.ERR.PIN_INVALID:
cli_fail("Wrong PIN.")
elif e.code == CtapError.ERR.PIN_AUTH_BLOCKED:
cli_fail(
"PIN authentication is currently blocked. "
"Remove and re-insert the YubiKey."
)
elif e.code == CtapError.ERR.PIN_BLOCKED:
cli_fail("PIN is blocked.")
else:
cli_fail(other % e.code)
@fido.group("access")
def access():
"""
Manage the PIN for FIDO.
"""
@access.command("change-pin")
@click.pass_context
@click.option("-P", "--pin", help="Current PIN code.")
@click.option("-n", "--new-pin", help="A new PIN.")
@click.option(
"-u", "--u2f", is_flag=True, help="Set FIDO U2F PIN instead of FIDO2 PIN."
)
def change_pin(ctx, pin, new_pin, u2f):
"""
Set or change the PIN code.
The FIDO2 PIN must be at least 4 characters long, and supports any type
of alphanumeric characters.
On YubiKey FIPS, a PIN can be set for FIDO U2F. That PIN must be at least
6 characters long.
"""
is_fips = is_fips_version(ctx.obj["info"].version)
if is_fips and not u2f:
cli_fail("This is a YubiKey FIPS. To set the U2F PIN, pass the --u2f option.")
if u2f and not is_fips:
cli_fail(
"This is not a YubiKey FIPS, and therefore does not support a U2F PIN. "
"To set the FIDO2 PIN, remove the --u2f option."
)
if is_fips:
conn = ctx.obj["conn"]
else:
ctap2 = ctx.obj.get("ctap2")
if not ctap2:
cli_fail("PIN is not supported on this YubiKey.")
client_pin = ClientPin(ctap2)
def prompt_new_pin():
return click_prompt(
"Enter your new PIN",
default="",
hide_input=True,
show_default=False,
confirmation_prompt=True,
)
def change_pin(pin, new_pin):
if pin is not None:
_fail_if_not_valid_pin(ctx, pin, is_fips)
try:
if is_fips:
try:
# Failing this with empty current PIN does not cost a retry
fips_change_pin(conn, pin or "", new_pin)
except ApduError as e:
if e.code == SW.WRONG_LENGTH:
pin = _prompt_current_pin()
_fail_if_not_valid_pin(ctx, pin, is_fips)
fips_change_pin(conn, pin, new_pin)
else:
raise
else:
client_pin.change_pin(pin, new_pin)
except CtapError as e:
logger.error("Failed to change PIN", exc_info=e)
if e.code == CtapError.ERR.PIN_POLICY_VIOLATION:
cli_fail("New PIN doesn't meet policy requirements.")
else:
_fail_pin_error(ctx, e, "Failed to change PIN: %s")
except ApduError as e:
logger.error("Failed to change PIN", exc_info=e)
if e.code == SW.VERIFY_FAIL_NO_RETRY:
cli_fail("Wrong PIN.")
elif e.code == SW.AUTH_METHOD_BLOCKED:
cli_fail("PIN is blocked.")
else:
cli_fail(f"Failed to change PIN: SW={e.code:04x}")
def set_pin(new_pin):
_fail_if_not_valid_pin(ctx, new_pin, is_fips)
try:
client_pin.set_pin(new_pin)
except CtapError as e:
logger.error("Failed to set PIN", exc_info=e)
if e.code == CtapError.ERR.PIN_POLICY_VIOLATION:
cli_fail("PIN is too long.")
else:
cli_fail(f"Failed to set PIN: {e.code}")
if not is_fips:
if ctap2.info.options.get("clientPin"):
if not pin:
pin = _prompt_current_pin()
else:
if pin:
cli_fail("There is no current PIN set. Use --new-pin to set one.")
if not new_pin:
new_pin = prompt_new_pin()
if is_fips:
_fail_if_not_valid_pin(ctx, new_pin, is_fips)
change_pin(pin, new_pin)
else:
if len(new_pin) < ctap2.info.min_pin_length:
cli_fail("New PIN is too short.")
if ctap2.info.options.get("clientPin"):
change_pin(pin, new_pin)
else:
set_pin(new_pin)
def _require_pin(ctx, pin, feature="This feature"):
ctap2 = ctx.obj.get("ctap2")
if not ctap2:
cli_fail(f"{feature} is not supported on this YubiKey.")
if not ctap2.info.options.get("clientPin"):
cli_fail(f"{feature} requires having a PIN. Set a PIN first.")
if ctap2.info.force_pin_change:
cli_fail("The FIDO PIN is blocked. Change the PIN first.")
if pin is None:
pin = _prompt_current_pin(prompt="Enter your PIN")
return pin
@access.command("verify-pin")
@click.pass_context
@click.option("-P", "--pin", help="Current PIN code.")
def verify(ctx, pin):
"""
Verify the FIDO PIN against a YubiKey.
For YubiKeys supporting FIDO2 this will reset the "retries" counter of the PIN.
For YubiKey FIPS this will unlock the session, allowing U2F registration.
"""
ctap2 = ctx.obj.get("ctap2")
if ctap2:
pin = _require_pin(ctx, pin)
client_pin = ClientPin(ctap2)
try:
# Get a PIN token to verify the PIN.
client_pin.get_pin_token(
pin, ClientPin.PERMISSION.GET_ASSERTION, "ykman.example.com"
)
except CtapError as e:
logger.error("PIN verification failed", exc_info=e)
cli_fail(f"Error: {e}")
elif is_fips_version(ctx.obj["info"].version):
_fail_if_not_valid_pin(ctx, pin, True)
try:
fips_verify_pin(ctx.obj["conn"], pin)
except ApduError as e:
logger.error("PIN verification failed", exc_info=e)
if e.code == SW.VERIFY_FAIL_NO_RETRY:
cli_fail("Wrong PIN.")
elif e.code == SW.AUTH_METHOD_BLOCKED:
cli_fail("PIN is blocked.")
elif e.code == SW.COMMAND_NOT_ALLOWED:
cli_fail("PIN is not set.")
else:
cli_fail(f"PIN verification failed: {e.code.name}")
else:
cli_fail("This YubiKey does not support a FIDO PIN.")
click.echo("PIN verified.")
def _prompt_current_pin(prompt="Enter your current PIN"):
return click_prompt(prompt, default="", hide_input=True, show_default=False)
def _fail_if_not_valid_pin(ctx, pin=None, is_fips=False):
min_length = FIPS_PIN_MIN_LENGTH if is_fips else PIN_MIN_LENGTH
if not pin or len(pin) < min_length:
ctx.fail(f"PIN must be over {min_length} characters long")
def _gen_creds(credman):
data = credman.get_metadata()
if data.get(CredentialManagement.RESULT.EXISTING_CRED_COUNT) == 0:
return # No credentials
for rp in credman.enumerate_rps():
for cred in credman.enumerate_creds(rp[CredentialManagement.RESULT.RP_ID_HASH]):
yield (
rp[CredentialManagement.RESULT.RP]["id"],
cred[CredentialManagement.RESULT.CREDENTIAL_ID],
cred[CredentialManagement.RESULT.USER]["id"],
cred[CredentialManagement.RESULT.USER]["name"],
)
def _format_cred(rp_id, user_id, user_name):
return f"{rp_id} {user_id.hex()} {user_name}"
@fido.group("credentials")
def creds():
"""
Manage discoverable (resident) credentials.
This command lets you manage credentials stored on your YubiKey.
Credential management is only available when a FIDO PIN is set on the YubiKey.
\b
Examples:
\b
List credentials (providing PIN via argument):
$ ykman fido credentials list --pin 123456
\b
Delete a credential by user name (PIN will be prompted for):
$ ykman fido credentials delete example_user
"""
def _init_credman(ctx, pin):
pin = _require_pin(ctx, pin, "Credential Management")
ctap2 = ctx.obj.get("ctap2")
client_pin = ClientPin(ctap2)
try:
token = client_pin.get_pin_token(pin, ClientPin.PERMISSION.CREDENTIAL_MGMT)
except CtapError as e:
logger.error("Ctap error", exc_info=e)
_fail_pin_error(ctx, e, "PIN error: %s")
return CredentialManagement(ctap2, client_pin.protocol, token)
@creds.command("list")
@click.pass_context
@click.option("-P", "--pin", help="PIN code.")
def creds_list(ctx, pin):
"""
List credentials.
"""
creds = _init_credman(ctx, pin)
for (rp_id, _, user_id, user_name) in _gen_creds(creds):
click.echo(_format_cred(rp_id, user_id, user_name))
@creds.command("delete")
@click.pass_context
@click.argument("query")
@click.option("-P", "--pin", help="PIN code.")
@click.option("-f", "--force", is_flag=True, help="Confirm deletion without prompting")
def creds_delete(ctx, query, pin, force):
"""
Delete a credential.
\b
QUERY A unique substring match of a credentials RP ID, user ID (hex) or name,
or credential ID.
"""
credman = _init_credman(ctx, pin)
hits = [
(rp_id, cred_id, user_id, user_name)
for (rp_id, cred_id, user_id, user_name) in _gen_creds(credman)
if query.lower() in user_name.lower()
or query.lower() in rp_id.lower()
or user_id.hex().startswith(query.lower())
or query.lower() in _format_cred(rp_id, user_id, user_name)
]
if len(hits) == 0:
cli_fail("No matches, nothing to be done.")
elif len(hits) == 1:
(rp_id, cred_id, user_id, user_name) = hits[0]
if force or click.confirm(
f"Delete credential {_format_cred(rp_id, user_id, user_name)}?"
):
try:
credman.delete_cred(cred_id)
except CtapError as e:
logger.error("Failed to delete resident credential", exc_info=e)
cli_fail("Failed to delete resident credential.")
else:
cli_fail("Multiple matches, make the query more specific.")
@fido.group("fingerprints")
def bio():
"""
Manage fingerprints.
Requires a YubiKey with fingerprint sensor.
Fingerprint management is only available when a FIDO PIN is set on the YubiKey.
\b
Examples:
\b
Register a new fingerprint (providing PIN via argument):
$ ykman fido fingerprints add "Left thumb" --pin 123456
\b
List already stored fingerprints (providing PIN via argument):
$ ykman fido fingerprints list --pin 123456
\b
Delete a stored fingerprint with ID "f691" (PIN will be prompted for):
$ ykman fido fingerprints delete f691
"""
def _init_bio(ctx, pin):
ctap2 = ctx.obj.get("ctap2")
if not ctap2 or "bioEnroll" not in ctap2.info.options:
cli_fail("Biometrics is not supported on this YubiKey.")
pin = _require_pin(ctx, pin, "Biometrics")
client_pin = ClientPin(ctap2)
try:
token = client_pin.get_pin_token(pin, ClientPin.PERMISSION.BIO_ENROLL)
except CtapError as e:
logger.error("Ctap error", exc_info=e)
_fail_pin_error(ctx, e, "PIN error: %s")
return FPBioEnrollment(ctap2, client_pin.protocol, token)
def _format_fp(template_id, name):
return f"{template_id.hex()}{f' ({name})' if name else ''}"
@bio.command("list")
@click.pass_context
@click.option("-P", "--pin", help="PIN code.")
def bio_list(ctx, pin):
"""
List registered fingerprint.
Lists fingerprints by ID and (if available) label.
"""
bio = _init_bio(ctx, pin)
for t_id, name in bio.enumerate_enrollments().items():
click.echo(f"ID: {_format_fp(t_id, name)}")
@bio.command("add")
@click.pass_context
@click.argument("name")
@click.option("-P", "--pin", help="PIN code.")
def bio_enroll(ctx, name, pin):
"""
Add a new fingerprint.
\b
NAME A short readable name for the fingerprint (eg. "Left thumb").
"""
if len(name.encode()) > 15:
ctx.fail("Fingerprint name must be a maximum of 15 characters")
bio = _init_bio(ctx, pin)
enroller = bio.enroll()
template_id = None
while template_id is None:
click.echo("Place your finger against the sensor now...")
try:
template_id = enroller.capture()
remaining = enroller.remaining
if remaining:
click.echo(f"{remaining} more scans needed.")
except CaptureError as e:
logger.error(f"Capture error: {e.code}")
click.echo("Capture failed. Re-center your finger, and try again.")
except CtapError as e:
logger.error("Failed to add fingerprint template", exc_info=e)
if e.code == CtapError.ERR.FP_DATABASE_FULL:
cli_fail(
"Fingerprint storage full. "
"Remove some fingerprints before adding new ones."
)
elif e.code == CtapError.ERR.USER_ACTION_TIMEOUT:
cli_fail("Failed to add fingerprint due to user inactivity.")
cli_fail(f"Failed to add fingerprint: {e.code.name}")
click.echo("Capture complete.")
bio.set_name(template_id, name)
@bio.command("rename")
@click.pass_context
@click.argument("template_id", metavar="ID")
@click.argument("name")
@click.option("-P", "--pin", help="PIN code.")
def bio_rename(ctx, template_id, name, pin):
"""
Set the label for a fingerprint.
\b
ID The ID of the fingerprint to rename (as shown in "list").
NAME A short readable name for the fingerprint (eg. "Left thumb").
"""
if len(name) >= 16:
ctx.fail("Fingerprint name must be a maximum of 15 characters")
bio = _init_bio(ctx, pin)
enrollments = bio.enumerate_enrollments()
key = bytes.fromhex(template_id)
if key not in enrollments:
cli_fail(f"No fingerprint matching ID={template_id}.")
bio.set_name(key, name)
@bio.command("delete")
@click.pass_context
@click.argument("template_id", metavar="ID")
@click.option("-P", "--pin", help="PIN code.")
@click.option("-f", "--force", is_flag=True, help="Confirm deletion without prompting")
def bio_delete(ctx, template_id, pin, force):
"""
Delete a fingerprint.
Delete a fingerprint from the YubiKey by its ID, which can be seen by running the
"list" subcommand.
"""
bio = _init_bio(ctx, pin)
enrollments = bio.enumerate_enrollments()
try:
key: Optional[bytes] = bytes.fromhex(template_id)
except ValueError:
key = None
if key not in enrollments:
# Match using template_id as NAME
matches = [k for k in enrollments if enrollments[k] == template_id]
if len(matches) == 0:
cli_fail(f"No fingerprint matching ID={template_id}")
elif len(matches) > 1:
cli_fail(
f"Multiple matches for NAME={template_id}. "
"Delete by template ID instead."
)
key = matches[0]
name = enrollments[key]
if force or click.confirm(f"Delete fingerprint {_format_fp(key, name)}?"):
try:
bio.remove_enrollment(key)
except CtapError as e:
logger.error("Failed to delete fingerprint template", exc_info=e)
cli_fail(f"Failed to delete fingerprint: {e.code.name}")
| bsd-2-clause | -6,043,912,827,052,858,000 | 32.010796 | 88 | 0.589346 | false |
kobotoolbox/kobocat | onadata/apps/logger/tests/test_briefcase_client.py | 1 | 6934 | # coding: utf-8
import os.path
from io import StringIO, BytesIO
from urllib.parse import urljoin
import requests
from django.contrib.auth import authenticate
from django.core.files.storage import get_storage_class
from django.core.files.uploadedfile import UploadedFile
from django.urls import reverse
from django.test import RequestFactory
from django_digest.test import Client as DigestClient
from httmock import urlmatch, HTTMock
from onadata.apps.logger.models import Instance, XForm
from onadata.apps.logger.views import formList, download_xform, xformsManifest
from onadata.apps.main.models import MetaData
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.main.views import profile, download_media_data
from onadata.libs.utils.briefcase_client import BriefcaseClient
from onadata.libs.utils.storage import delete_user_storage
storage = get_storage_class()()
@urlmatch(netloc=r'(.*\.)?testserver$')
def form_list_xml(url, request, **kwargs):
response = requests.Response()
factory = RequestFactory()
req = factory.get(url.path)
req.user = authenticate(username='bob', password='bob')
req.user.profile.require_auth = False
req.user.profile.save()
id_string = 'transportation_2011_07_25'
if url.path.endswith('formList'):
res = formList(req, username='bob')
elif url.path.endswith('form.xml'):
res = download_xform(req, username='bob', id_string=id_string)
elif url.path.find('xformsManifest') > -1:
res = xformsManifest(req, username='bob', id_string=id_string)
elif url.path.find('formid-media') > -1:
data_id = url.path[url.path.rfind('/') + 1:]
res = download_media_data(
req, username='bob', id_string=id_string, data_id=data_id)
response._content = get_streaming_content(res)
else:
res = formList(req, username='bob')
response.status_code = 200
if not response._content:
response._content = res.content
return response
def get_streaming_content(res):
tmp = BytesIO()
for chunk in res.streaming_content:
tmp.write(chunk)
content = tmp.getvalue()
tmp.close()
return content
@urlmatch(netloc=r'(.*\.)?testserver$')
def instances_xml(url, request, **kwargs):
response = requests.Response()
client = DigestClient()
client.set_authorization('bob', 'bob', 'Digest')
res = client.get('%s?%s' % (url.path, url.query))
if res.status_code == 302:
res = client.get(res['Location'])
response.encoding = res.get('content-type')
response._content = get_streaming_content(res)
else:
response._content = res.content
response.status_code = 200
return response
class TestBriefcaseClient(TestBase):
def setUp(self):
TestBase.setUp(self)
self._publish_transportation_form()
self._submit_transport_instance_w_attachment()
src = os.path.join(self.this_directory, "fixtures",
"transportation", "screenshot.png")
uf = UploadedFile(file=open(src, 'rb'), content_type='image/png')
count = MetaData.objects.count()
MetaData.media_upload(self.xform, uf)
self.assertEqual(MetaData.objects.count(), count + 1)
url = urljoin(
self.base_url,
reverse(profile, kwargs={'username': self.user.username})
)
self._logout()
self._create_user_and_login('deno', 'deno')
self.bc = BriefcaseClient(
username='bob', password='bob',
url=url,
user=self.user
)
def test_download_xform_xml(self):
"""
Download xform via briefcase api
"""
with HTTMock(form_list_xml):
self.bc.download_xforms()
is_local = storage.__class__.__name__ == 'FileSystemStorage'
forms_folder_path = os.path.join('deno',
'briefcase',
'forms',
self.xform.id_string)
forms_path = os.path.join(forms_folder_path,
'%s.xml' % self.xform.id_string)
form_media_path = os.path.join(forms_folder_path, 'form-media')
media_path = os.path.join(form_media_path, 'screenshot.png')
if is_local:
does_root_folder_exist = storage.exists(forms_folder_path)
does_media_folder_exist = storage.exists(form_media_path)
else:
# `django-storage.exists()` does not work with folders on AWS
sub_folders, files = storage.listdir(forms_folder_path)
does_root_folder_exist = bool(sub_folders or files)
does_media_folder_exist = 'form-media' in sub_folders
self.assertTrue(does_root_folder_exist)
self.assertTrue(storage.exists(forms_path))
self.assertTrue(does_media_folder_exist)
self.assertTrue(storage.exists(media_path))
"""
Download instance xml
"""
with HTTMock(instances_xml):
self.bc.download_instances(self.xform.id_string)
instance_folder_path = os.path.join(forms_folder_path, 'instances')
if is_local:
does_instances_folder_exist = storage.exists(instance_folder_path)
else:
sub_folders, _ = storage.listdir(forms_folder_path)
does_instances_folder_exist = 'instances' in sub_folders
self.assertTrue(does_instances_folder_exist)
instance = Instance.objects.all()[0]
instance_path = os.path.join(
instance_folder_path, 'uuid%s' % instance.uuid, 'submission.xml')
self.assertTrue(storage.exists(instance_path))
media_file = "1335783522563.jpg"
media_path = os.path.join(
instance_folder_path, 'uuid%s' % instance.uuid, media_file)
self.assertTrue(storage.exists(media_path))
def test_push(self):
with HTTMock(form_list_xml):
self.bc.download_xforms()
with HTTMock(instances_xml):
self.bc.download_instances(self.xform.id_string)
XForm.objects.all().delete()
xforms = XForm.objects.filter(
user=self.user, id_string=self.xform.id_string)
self.assertTrue(xforms.count() == 0)
instances = Instance.objects.filter(
xform__user=self.user, xform__id_string=self.xform.id_string)
self.assertTrue(instances.count() == 0)
self.bc.push()
xforms = XForm.objects.filter(
user=self.user, id_string=self.xform.id_string)
self.assertTrue(xforms.count() == 1)
instances = Instance.objects.filter(
xform__user=self.user, xform__id_string=self.xform.id_string)
self.assertTrue(instances.count() == 1)
def tearDown(self):
# remove media files
for username in ['bob', 'deno']:
delete_user_storage(username)
| bsd-2-clause | 5,599,805,507,777,540,000 | 37.098901 | 78 | 0.62792 | false |
mmlab/eice | EiCGraphAlgo/core/typeahead.py | 1 | 4270 | '''
Created on 17-sep.-2012
@author: ldevocht
'''
import urllib.parse, lxml.objectify, logging, configparser, re, ujson, requests
from core.resourceretriever import Resourceretriever
from core import resourceretriever, config_search
config = resourceretriever.config
mappings = resourceretriever.mappings
logger = logging.getLogger('pathFinder')
lookup_server = config.get('services', 'lookup_index')
#lookup_solr = Solr(lookup_server)
class TypeAhead:
def __init__(self):
self.session = requests.session()
def dbPediaPrefix(self, prefix):
server = config.get('services', 'lookup')
gateway = '{0}/api/search.asmx/PrefixSearch?MaxHits=7&QueryString={1}'.format(server,prefix)
requestUrl = urllib.parse.quote(gateway, ':/=?<>"*&')
logger.debug('Request %s' % requestUrl)
#rq = grequests.get(requestUrl)
#response = grequests.map([rq])
#raw_output = response[0].content
#raw_output = urllib.request.urlopen(requestUrl,timeout=2).read()
#s = requests.Session()
#s.headers.update({'Connection': 'close'})
r = self.session.get(requestUrl)
#(s.headers)
#print(r.headers)
raw_output = r.content
root = lxml.objectify.fromstring(raw_output)
results = list()
if hasattr(root, 'Result'):
logger.debug('Found %s results' % len(root.Result))
for result in root.Result:
if prefix.lower() in result.Label[0].text.lower() and hasattr(result.Classes, 'Class'):
klasses = result.Classes.Class
if hasattr(klasses, 'Label'):
klasse = klasses
else:
klasse = klasses[0]
item = dict()
item['label'] = result.Label[0].text
item['category']=klasse.Label.text.capitalize()
item['uri']=result.URI[0].text
logger.debug('Fetching local hits for %s' % len(item['uri']))
local_hits = Resourceretriever().getResource(item['uri'].strip("<>"),False)
if local_hits:
logger.debug('Found %s hits' % len(local_hits))
n_hits = 0
if local_hits:
for triple in local_hits:
if local_hits[triple][1] not in config_search.blacklist:
n_hits += 1
if n_hits > 8:
results.append(item)
else:
logger.debug('Found nothing for prefix %s' % prefix)
return results
def prefix(self, prefix,lookup_server=lookup_server):
results = list()
if len(prefix) > 2:
logger.debug('looking up %s on dbpedia lookup' % prefix)
results += self.dbPediaPrefix(prefix)
logger.debug('looking up %s on local index' % prefix)
if config.has_option('services','lookup_index'):
#query={'q':'lookup:"{0}*"'.format(re.escape(prefix).lower()),'fl':'url label type','timeAllowed':'100','rows':'7'}
#response = lookup_solr.search(**query)
query = '%sselect?q=lookup:"%s*"&fl=url label type&wt=json' % (lookup_server,re.escape(prefix).lower())
rsp = self.session.get(query)
#response = grequests.map([rq])
response = ujson.decode(rsp.content)['response']
if len(response['docs']) > 0:
for doc in response['docs']:
item = dict()
item['category']=doc['type'].split(' ')[0].rsplit('/')[-1].rsplit('#')[-1].strip('<>".')
if item['category'] == 'Agent':
item['category'] = 'Author'
item['uri']=doc['url']
item['label']=(doc['label'].split('.')[0].split('"^^')[0]).strip('\" <>.')
results.append(item)
logger.debug('done finding matches for %s' % prefix)
return results
#print(TypeAhead().prefix('Selver'))
#print(TypeAhead().dbPediaPrefix('Selver')) | agpl-3.0 | -1,328,228,545,254,396,400 | 45.423913 | 131 | 0.52623 | false |
gaeun/open-event-orga-server | app/api/helpers/utils.py | 1 | 7209 | import json
from hashlib import md5
from flask import request
from flask.ext.restplus import Resource as RestplusResource
from flask_restplus import Model, fields, reqparse
from app.helpers.data import update_version
from app.models.event import Event as EventModel
from .error_docs import (
notfound_error_model,
notauthorized_error_model,
validation_error_model,
invalidservice_error_model,
)
from .helpers import get_object_list, get_object_or_404, get_object_in_event, \
create_model, validate_payload, delete_model, update_model, \
handle_extra_payload, get_paginated_list, fix_attribute_names
DEFAULT_PAGE_START = 1
DEFAULT_PAGE_LIMIT = 20
POST_RESPONSES = {
400: ('Validation error', validation_error_model),
401: ('Authentication failure', notauthorized_error_model),
404: ('Event does not exist', notfound_error_model),
201: 'Resource created successfully'
}
PUT_RESPONSES = {
400: ('Validation Error', validation_error_model),
401: ('Authentication failure', notauthorized_error_model),
404: ('Object/Event not found', notfound_error_model)
}
SERVICE_RESPONSES = {
404: ('Service not found', notfound_error_model),
400: ('Service does not belong to event', invalidservice_error_model),
}
# Parameters for a paginated response
PAGE_PARAMS = {
'start': {
'description': 'Serial number to start from',
'type': int,
'default': DEFAULT_PAGE_START
},
'limit': {
'description': 'Limit on the number of results',
'type': int,
'default': DEFAULT_PAGE_LIMIT
},
}
# ETag Header (required=False by default)
ETAG_HEADER_DEFN = [
'If-None-Match', 'ETag saved by client for cached resource'
]
# Base Api Model for a paginated response
PAGINATED_MODEL = Model('PaginatedModel', {
'start': fields.Integer,
'limit': fields.Integer,
'count': fields.Integer,
'next': fields.String,
'previous': fields.String
})
# Custom Resource Class
class Resource(RestplusResource):
def dispatch_request(self, *args, **kwargs):
resp = super(Resource, self).dispatch_request(*args, **kwargs)
# ETag checking.
if request.method == 'GET':
old_etag = request.headers.get('If-None-Match', '')
# Generate hash
data = json.dumps(resp)
new_etag = md5(data).hexdigest()
if new_etag == old_etag:
# Resource has not changed
return '', 304
else:
# Resource has changed, send new ETag value
return resp, 200, {'ETag': new_etag}
elif request.method == 'POST':
# Grab just the response data
# Exclude status code and headers
resp_data = resp[0]
data = json.dumps(resp_data)
etag = md5(data).hexdigest()
# Add ETag to response headers
resp[2].update({'ETag': etag})
return resp
# Base class for Paginated Resource
class PaginatedResourceBase():
"""
Paginated Resource Helper class
This includes basic properties used in the class
"""
parser = reqparse.RequestParser()
parser.add_argument('start', type=int, default=DEFAULT_PAGE_START)
parser.add_argument('limit', type=int, default=DEFAULT_PAGE_LIMIT)
# DAO for Models
class BaseDAO:
"""
DAO for a basic independent model
"""
version_key = None
is_importing = False # temp key to set to True when an import operation is underway
def __init__(self, model, post_api_model=None, put_api_model=None):
self.model = model
self.post_api_model = post_api_model
self.put_api_model = put_api_model if put_api_model else post_api_model
def get(self, id_):
return get_object_or_404(self.model, id_)
def list(self, **kwargs):
return get_object_list(self.model, **kwargs)
def paginated_list(self, url=None, args={}, **kwargs):
return get_paginated_list(self.model, url=url, args=args, **kwargs)
def create(self, data, validate=True):
if validate:
data = self.validate(data, self.post_api_model)
item = create_model(self.model, data)
self.update_version(item.id)
return item
def update(self, id_, data, validate=True):
if validate:
data = self.validate_put(data, self.put_api_model)
item = update_model(self.model, id_, data)
self.update_version(id_)
return item
def delete(self, id_):
item = delete_model(self.model, id_)
self.update_version(id_)
return item
def validate(self, data, model=None, check_required=True):
if not model:
model = self.post_api_model
if model:
data = handle_extra_payload(data, model)
validate_payload(data, model, check_required=check_required)
data = fix_attribute_names(data, model)
return data
def validate_put(self, data, model=None):
"""
Abstraction over validate with check_required set to False
"""
return self.validate(data, model=model, check_required=False)
def update_version(self, event_id):
"""
Update version of the component of the event
"""
if self.version_key:
update_version(event_id, False, self.version_key)
# Helper functions
def _del(self, data, fields):
"""
Safe delete fields from payload
"""
data_copy = data.copy()
for field in fields:
if field in data:
del data_copy[field]
return data_copy
# DAO for Service Models
class ServiceDAO(BaseDAO):
"""
Data Access Object for service models like microlocations,
speakers and so.
"""
def get(self, event_id, sid):
return get_object_in_event(self.model, sid, event_id)
def list(self, event_id, **kwargs):
# Check if an event with `event_id` exists
get_object_or_404(EventModel, event_id)
return get_object_list(self.model, event_id=event_id, **kwargs)
def paginated_list(self, url=None, args={}, **kwargs):
return get_paginated_list(self.model, url=url, args=args, **kwargs)
def create(self, event_id, data, url, validate=True):
if validate:
data = self.validate(data)
item = create_model(self.model, data, event_id=event_id)
self.update_version(event_id)
# Return created resource with a 201 status code and its Location
# (url) in the header.
resource_location = url + '/' + str(item.id)
return item, 201, {'Location': resource_location}
def update(self, event_id, service_id, data, validate=True):
if validate:
data = self.validate_put(data)
item = update_model(self.model, service_id, data, event_id)
self.update_version(event_id)
return item
def delete(self, event_id, service_id):
item = delete_model(self.model, service_id, event_id=event_id)
self.update_version(event_id)
return item
# store task results in case of testing
# state and info
TASK_RESULTS = {}
| gpl-3.0 | 3,571,529,669,562,688,000 | 30.207792 | 88 | 0.625468 | false |
wradlib/wradlib | wradlib/tests/test_adjust.py | 1 | 7916 | #!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import numpy as np
import pytest
from wradlib import adjust
class Data:
# Arguments to be used throughout all test classes
raw_x, raw_y = np.meshgrid(np.arange(4).astype("f4"), np.arange(4).astype("f4"))
raw_coords = np.vstack((raw_x.ravel(), raw_y.ravel())).T
obs_coords = np.array([[1.0, 1.0], [2.0, 1.0], [1.0, 3.5], [3.5, 3.0]])
raw = np.array(
[
[
1.0,
2.0,
1.0,
0.0,
1.0,
2.0,
1.0,
2.0,
1.0,
0.0,
0.0,
3.0,
4.0,
0.0,
4.0,
0.0,
],
[
1.0,
2.0,
1.0,
0.0,
1.0,
2.0,
1.0,
2.0,
1.0,
0.0,
0.0,
3.0,
4.0,
0.0,
4.0,
0.0,
],
]
).T
obs = np.array([[2.0, 3, 0.0, 4.0], [2.0, 3, 0.0, 4.0]]).T
nnear_raws = 2
mingages = 3
class TestAdjustBase(Data):
def test___init__(self):
pass
def test__checkip(self):
pass
def test__check_shape(self):
pass
def test___call__(self):
pass
def test__get_valid_pairs(self):
pass
def test_xvalidate(self):
pass
class TestAdjustAddTest(Data):
def test_AdjustAdd_1(self):
adj = adjust.AdjustAdd(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array(
[
[1.62818784, 1.62818784],
[2.75926679, 2.75926679],
[2.09428144, 2.09428144],
[1.1466651, 1.1466651],
[1.51948941, 1.51948941],
[2.5, 2.5],
[2.5, 2.5],
[3.27498305, 3.27498305],
[1.11382822, 1.11382822],
[0.33900645, 0.33900645],
[0.89999998, 0.89999998],
[4.52409637, 4.52409637],
[3.08139533, 3.08139533],
[0.0, 0.0],
[3.99180328, 3.99180328],
[2.16913891, 2.16913891],
]
)
assert np.allclose(res, shouldbe)
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
assert np.allclose(res, shouldbe[:, 0])
class TestAdjustMultiplyTest(Data):
def test_AdjustMultiply_1(self):
adj = adjust.AdjustMultiply(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array(
[
[1.44937706, 1.44937706],
[3.04539442, 3.04539442],
[1.74463618, 1.74463618],
[0.0, 0.0],
[1.37804615, 1.37804615],
[2.66666675, 2.66666675],
[2.0, 2.0],
[3.74106812, 3.74106812],
[1.17057478, 1.17057478],
[0.0, 0.0],
[0.0, 0.0],
[6.14457822, 6.14457822],
[2.43439031, 2.43439031],
[0.0, 0.0],
[4.60765028, 4.60765028],
[0.0, 0.0],
]
)
assert np.allclose(res, shouldbe)
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
assert np.allclose(res, shouldbe[:, 0])
class TestAdjustMixed(Data):
def test_AdjustMixed_1(self):
adj = adjust.AdjustMixed(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array(
[
[1.51427719, 1.51427719],
[2.95735525, 2.95735525],
[1.85710269, 1.85710269],
[0.36806121, 0.36806121],
[1.43181512, 1.43181512],
[2.61538471, 2.61538471],
[2.15384617, 2.15384617],
[3.59765723, 3.59765723],
[1.18370627, 1.18370627],
[0.15027952, 0.15027952],
[0.30825174, 0.30825174],
[5.63558862, 5.63558862],
[2.49066845, 2.49066845],
[-0.29200733, -0.29200733],
[4.31646909, 4.31646909],
[0.67854041, 0.67854041],
]
)
assert np.allclose(res, shouldbe)
# test in case only one dataset is passed
res = adj(self.obs[:, 0], self.raw[:, 0])
assert np.allclose(res, shouldbe[:, 0])
class TestAdjustMFB(Data):
raw_coords = np.array([[0.0, 0.0], [1.0, 1.0]])
obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
raw = np.array([2.0, 2.0])
obs = np.array([4.0, 4.0])
mingages = 0
mfb_args = dict(method="mean")
def test_AdjustMFB_1(self):
adj = adjust.AdjustMFB(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=self.mfb_args,
)
res = adj(self.obs, self.raw)
shouldbe = np.array([4.0, 4.0])
assert np.allclose(res, shouldbe)
adj = adjust.AdjustMFB(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=dict(method="median"),
)
adj(self.obs, self.raw)
adj = adjust.AdjustMFB(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
mfb_args=dict(method="linregr", minslope=1.0, minr="0.7", maxp=0.5),
)
adj(self.obs, self.raw)
class TestAdjustNone(Data):
raw_coords = np.array([[0.0, 0.0], [1.0, 1.0]])
obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
raw = np.array([2.0, 2.0])
obs = np.array([4.0, 4.0])
mingages = 0
mfb_args = dict(method="mean")
def test_AdjustNone_1(self):
adj = adjust.AdjustNone(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array([2.0, 2.0])
assert np.allclose(res, shouldbe)
class TestGageOnly(Data):
raw_coords = np.array([[0.0, 0.0], [1.0, 1.0]])
obs_coords = np.array([[0.5, 0.5], [1.5, 1.5]])
raw = np.array([2.0, 2.0])
obs = np.array([4.0, 4.0])
mingages = 0
mfb_args = dict(method="mean")
def test_GageOnly_1(self):
adj = adjust.GageOnly(
self.obs_coords,
self.raw_coords,
nnear_raws=self.nnear_raws,
mingages=self.mingages,
)
res = adj(self.obs, self.raw)
shouldbe = np.array([4.0, 4.0])
assert np.allclose(res, shouldbe)
class TestAdjustHelper:
def test__get_neighbours_ix(self):
pass
def test__get_statfunc(self):
adjust._get_statfunc("median")
adjust._get_statfunc("best")
with pytest.raises(NameError):
adjust._get_statfunc("wradlib")
def test_best(self):
x = 7.5
y = np.array([0.0, 1.0, 0.0, 1.0, 0.0, 7.7, 8.0, 8.0, 8.0, 8.0])
assert adjust.best(x, y) == 7.7
| mit | 893,179,589,196,588,400 | 27.47482 | 84 | 0.456544 | false |
forkbong/qutebrowser | qutebrowser/misc/backendproblem.py | 1 | 17440 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2017-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Dialogs shown when there was a problem with a backend choice."""
import os
import sys
import functools
import html
import enum
import shutil
import argparse
import dataclasses
from typing import Any, List, Sequence, Tuple, Optional
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QDialog, QPushButton, QHBoxLayout, QVBoxLayout, QLabel,
QMessageBox, QWidget)
from PyQt5.QtNetwork import QSslSocket
from qutebrowser.config import config, configfiles
from qutebrowser.utils import (usertypes, version, qtutils, log, utils,
standarddir)
from qutebrowser.misc import objects, msgbox, savemanager, quitter
class _Result(enum.IntEnum):
"""The result code returned by the backend problem dialog."""
quit = QDialog.Accepted + 1
restart = QDialog.Accepted + 2
restart_webkit = QDialog.Accepted + 3
restart_webengine = QDialog.Accepted + 4
@dataclasses.dataclass
class _Button:
"""A button passed to BackendProblemDialog."""
text: str
setting: str
value: Any
default: bool = False
def _other_backend(backend: usertypes.Backend) -> Tuple[usertypes.Backend, str]:
"""Get the other backend enum/setting for a given backend."""
other_backend = {
usertypes.Backend.QtWebKit: usertypes.Backend.QtWebEngine,
usertypes.Backend.QtWebEngine: usertypes.Backend.QtWebKit,
}[backend]
other_setting = other_backend.name.lower()[2:]
return (other_backend, other_setting)
def _error_text(because: str, text: str, backend: usertypes.Backend) -> str:
"""Get an error text for the given information."""
other_backend, other_setting = _other_backend(backend)
if other_backend == usertypes.Backend.QtWebKit:
warning = ("<i>Note that QtWebKit hasn't been updated since "
"July 2017 (including security updates).</i>")
suffix = " (not recommended)"
else:
warning = ""
suffix = ""
return ("<b>Failed to start with the {backend} backend!</b>"
"<p>qutebrowser tried to start with the {backend} backend but "
"failed because {because}.</p>{text}"
"<p><b>Forcing the {other_backend.name} backend{suffix}</b></p>"
"<p>This forces usage of the {other_backend.name} backend by "
"setting the <i>backend = '{other_setting}'</i> option "
"(if you have a <i>config.py</i> file, you'll need to set "
"this manually). {warning}</p>".format(
backend=backend.name, because=because, text=text,
other_backend=other_backend, other_setting=other_setting,
warning=warning, suffix=suffix))
class _Dialog(QDialog):
"""A dialog which gets shown if there are issues with the backend."""
def __init__(self, *, because: str,
text: str,
backend: usertypes.Backend,
buttons: Sequence[_Button] = None,
parent: QWidget = None) -> None:
super().__init__(parent)
vbox = QVBoxLayout(self)
other_backend, other_setting = _other_backend(backend)
text = _error_text(because, text, backend)
label = QLabel(text)
label.setWordWrap(True)
label.setTextFormat(Qt.RichText)
vbox.addWidget(label)
hbox = QHBoxLayout()
buttons = [] if buttons is None else buttons
quit_button = QPushButton("Quit")
quit_button.clicked.connect(lambda: self.done(_Result.quit))
hbox.addWidget(quit_button)
backend_text = "Force {} backend".format(other_backend.name)
if other_backend == usertypes.Backend.QtWebKit:
backend_text += ' (not recommended)'
backend_button = QPushButton(backend_text)
backend_button.clicked.connect(functools.partial(
self._change_setting, 'backend', other_setting))
hbox.addWidget(backend_button)
for button in buttons:
btn = QPushButton(button.text)
btn.setDefault(button.default)
btn.clicked.connect(functools.partial(
self._change_setting, button.setting, button.value))
hbox.addWidget(btn)
vbox.addLayout(hbox)
def _change_setting(self, setting: str, value: str) -> None:
"""Change the given setting and restart."""
config.instance.set_obj(setting, value, save_yaml=True)
if setting == 'backend' and value == 'webkit':
self.done(_Result.restart_webkit)
elif setting == 'backend' and value == 'webengine':
self.done(_Result.restart_webengine)
else:
self.done(_Result.restart)
@dataclasses.dataclass
class _BackendImports:
"""Whether backend modules could be imported."""
webkit_error: Optional[str] = None
webengine_error: Optional[str] = None
class _BackendProblemChecker:
"""Check for various backend-specific issues."""
def __init__(self, *,
no_err_windows: bool,
save_manager: savemanager.SaveManager) -> None:
self._save_manager = save_manager
self._no_err_windows = no_err_windows
def _show_dialog(self, *args: Any, **kwargs: Any) -> None:
"""Show a dialog for a backend problem."""
if self._no_err_windows:
text = _error_text(*args, **kwargs)
print(text, file=sys.stderr)
sys.exit(usertypes.Exit.err_init)
dialog = _Dialog(*args, **kwargs)
status = dialog.exec()
self._save_manager.save_all(is_exit=True)
if status in [_Result.quit, QDialog.Rejected]:
pass
elif status == _Result.restart_webkit:
quitter.instance.restart(override_args={'backend': 'webkit'})
elif status == _Result.restart_webengine:
quitter.instance.restart(override_args={'backend': 'webengine'})
elif status == _Result.restart:
quitter.instance.restart()
else:
raise utils.Unreachable(status)
sys.exit(usertypes.Exit.err_init)
def _nvidia_shader_workaround(self) -> None:
"""Work around QOpenGLShaderProgram issues.
See https://bugs.launchpad.net/ubuntu/+source/python-qt4/+bug/941826
"""
self._assert_backend(usertypes.Backend.QtWebEngine)
utils.libgl_workaround()
def _xwayland_options(self) -> Tuple[str, List[_Button]]:
"""Get buttons/text for a possible XWayland solution."""
buttons = []
text = "<p>You can work around this in one of the following ways:</p>"
if 'DISPLAY' in os.environ:
# XWayland is available, but QT_QPA_PLATFORM=wayland is set
buttons.append(
_Button("Force XWayland", 'qt.force_platform', 'xcb'))
text += ("<p><b>Force Qt to use XWayland</b></p>"
"<p>This allows you to use the newer QtWebEngine backend "
"(based on Chromium). "
"This sets the <i>qt.force_platform = 'xcb'</i> option "
"(if you have a <i>config.py</i> file, you'll need to "
"set this manually).</p>")
else:
text += ("<p><b>Set up XWayland</b></p>"
"<p>This allows you to use the newer QtWebEngine backend "
"(based on Chromium). ")
return text, buttons
def _handle_wayland_webgl(self) -> None:
"""On older graphic hardware, WebGL on Wayland causes segfaults.
See https://github.com/qutebrowser/qutebrowser/issues/5313
"""
self._assert_backend(usertypes.Backend.QtWebEngine)
if os.environ.get('QUTE_SKIP_WAYLAND_WEBGL_CHECK'):
return
platform = objects.qapp.platformName()
if platform not in ['wayland', 'wayland-egl']:
return
# Only Qt 5.14 should be affected
if not qtutils.version_check('5.14', compiled=False):
return
if qtutils.version_check('5.15', compiled=False):
return
# Newer graphic hardware isn't affected
opengl_info = version.opengl_info()
if (opengl_info is None or
opengl_info.gles or
opengl_info.version is None or
opengl_info.version >= (4, 3)):
return
# If WebGL is turned off, we're fine
if not config.val.content.webgl:
return
text, buttons = self._xwayland_options()
buttons.append(_Button("Turn off WebGL (recommended)",
'content.webgl',
False))
text += ("<p><b>Disable WebGL (recommended)</b></p>"
"This sets the <i>content.webgl = False</i> option "
"(if you have a <i>config.py</i> file, you'll need to "
"set this manually).</p>")
self._show_dialog(backend=usertypes.Backend.QtWebEngine,
because=("of frequent crashes with Qt 5.14 on "
"Wayland with older graphics hardware"),
text=text,
buttons=buttons)
def _try_import_backends(self) -> _BackendImports:
"""Check whether backends can be imported and return BackendImports."""
# pylint: disable=unused-import
results = _BackendImports()
try:
from PyQt5 import QtWebKit
from PyQt5.QtWebKit import qWebKitVersion
from PyQt5 import QtWebKitWidgets
except (ImportError, ValueError) as e:
results.webkit_error = str(e)
else:
if not qtutils.is_new_qtwebkit():
results.webkit_error = "Unsupported legacy QtWebKit found"
try:
from PyQt5 import QtWebEngineWidgets
except (ImportError, ValueError) as e:
results.webengine_error = str(e)
return results
def _handle_ssl_support(self, fatal: bool = False) -> None:
"""Check for full SSL availability.
If "fatal" is given, show an error and exit.
"""
if QSslSocket.supportsSsl():
return
if qtutils.version_check('5.12.4'):
version_text = ("If you use OpenSSL 1.0 with a PyQt package from "
"PyPI (e.g. on Ubuntu 16.04), you will need to "
"build OpenSSL 1.1 from sources and set "
"LD_LIBRARY_PATH accordingly.")
else:
version_text = ("If you use OpenSSL 1.1 with a PyQt package from "
"PyPI (e.g. on Archlinux or Debian Stretch), you "
"need to set LD_LIBRARY_PATH to the path of "
"OpenSSL 1.0 or use Qt >= 5.12.4.")
text = ("Could not initialize QtNetwork SSL support. {} This only "
"affects downloads and :adblock-update.".format(version_text))
if fatal:
errbox = msgbox.msgbox(parent=None,
title="SSL error",
text="Could not initialize SSL support.",
icon=QMessageBox.Critical,
plain_text=False)
errbox.exec()
sys.exit(usertypes.Exit.err_init)
assert not fatal
log.init.warning(text)
def _check_backend_modules(self) -> None:
"""Check for the modules needed for QtWebKit/QtWebEngine."""
imports = self._try_import_backends()
if not imports.webkit_error and not imports.webengine_error:
return
elif imports.webkit_error and imports.webengine_error:
text = ("<p>qutebrowser needs QtWebKit or QtWebEngine, but "
"neither could be imported!</p>"
"<p>The errors encountered were:<ul>"
"<li><b>QtWebKit:</b> {webkit_error}"
"<li><b>QtWebEngine:</b> {webengine_error}"
"</ul></p>".format(
webkit_error=html.escape(imports.webkit_error),
webengine_error=html.escape(imports.webengine_error)))
errbox = msgbox.msgbox(parent=None,
title="No backend library found!",
text=text,
icon=QMessageBox.Critical,
plain_text=False)
errbox.exec()
sys.exit(usertypes.Exit.err_init)
elif objects.backend == usertypes.Backend.QtWebKit:
if not imports.webkit_error:
return
self._show_dialog(
backend=usertypes.Backend.QtWebKit,
because="QtWebKit could not be imported",
text="<p><b>The error encountered was:</b><br/>{}</p>".format(
html.escape(imports.webkit_error))
)
elif objects.backend == usertypes.Backend.QtWebEngine:
if not imports.webengine_error:
return
self._show_dialog(
backend=usertypes.Backend.QtWebEngine,
because="QtWebEngine could not be imported",
text="<p><b>The error encountered was:</b><br/>{}</p>".format(
html.escape(imports.webengine_error))
)
raise utils.Unreachable
def _handle_cache_nuking(self) -> None:
"""Nuke the QtWebEngine cache if the Qt version changed.
WORKAROUND for https://bugreports.qt.io/browse/QTBUG-72532
"""
if not configfiles.state.qt_version_changed:
return
# Only nuke the cache in cases where we know there are problems.
# It seems these issues started with Qt 5.12.
# They should be fixed with Qt 5.12.5:
# https://codereview.qt-project.org/c/qt/qtwebengine-chromium/+/265408
if qtutils.version_check('5.12.5', compiled=False):
return
log.init.info("Qt version changed, nuking QtWebEngine cache")
cache_dir = os.path.join(standarddir.cache(), 'webengine')
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
def _handle_serviceworker_nuking(self) -> None:
"""Nuke the service workers directory if the Qt version changed.
WORKAROUND for:
https://bugreports.qt.io/browse/QTBUG-72532
https://bugreports.qt.io/browse/QTBUG-82105
"""
if ('serviceworker_workaround' not in configfiles.state['general'] and
qtutils.version_check('5.14', compiled=False)):
# Nuke the service worker directory once for every install with Qt
# 5.14, given that it seems to cause a variety of segfaults.
configfiles.state['general']['serviceworker_workaround'] = '514'
affected = True
else:
# Otherwise, just nuke it when the Qt version changed.
affected = configfiles.state.qt_version_changed
if not affected:
return
service_worker_dir = os.path.join(standarddir.data(), 'webengine',
'Service Worker')
bak_dir = service_worker_dir + '-bak'
if not os.path.exists(service_worker_dir):
return
log.init.info("Qt version changed, removing service workers")
# Keep one backup around - we're not 100% sure what persistent data
# could be in there, but this folder can grow to ~300 MB.
if os.path.exists(bak_dir):
shutil.rmtree(bak_dir)
shutil.move(service_worker_dir, bak_dir)
def _assert_backend(self, backend: usertypes.Backend) -> None:
assert objects.backend == backend, objects.backend
def check(self) -> None:
"""Run all checks."""
self._check_backend_modules()
if objects.backend == usertypes.Backend.QtWebEngine:
self._handle_ssl_support()
self._nvidia_shader_workaround()
self._handle_wayland_webgl()
self._handle_cache_nuking()
self._handle_serviceworker_nuking()
else:
self._assert_backend(usertypes.Backend.QtWebKit)
self._handle_ssl_support(fatal=True)
def init(*, args: argparse.Namespace,
save_manager: savemanager.SaveManager) -> None:
"""Run all checks."""
checker = _BackendProblemChecker(no_err_windows=args.no_err_windows,
save_manager=save_manager)
checker.check()
| gpl-3.0 | -8,716,777,097,984,474,000 | 37.669623 | 84 | 0.585722 | false |
maxivanoff/fftoolbox-app | q/fftoolbox/multipole.py | 1 | 12213 | import logging
import numpy as np
from copy import deepcopy
from numpy.linalg import norm
from scipy.special import sph_harm as Y
mult_logger = logging.getLogger('multipole')
def Rlm(l, m, r, theta, phi):
return r**l * np.sqrt(4 * np.pi / (2 * l + 1)) * Y(m, l, theta, phi)
def Rlmc(l, m, r, theta, phi):
return r**l * np.sqrt(4 * np.pi / (2 * l + 1)) * Ylmc(l, m, theta, phi)
def Rlms(l, m, r, theta, phi):
return r**l * np.sqrt(4 * np.pi / (2 * l + 1)) * Ylms(l, m, theta, phi)
def Ylmc(l, m, theta, phi):
#v = np.sqrt(0.5) * (np.conj(Y(m, l, theta, phi)) + Y(m, l, theta, phi))
v = np.sqrt(0.5) * (Y(-m, l, theta, phi) + (-1)**m*Y(m, l, theta, phi))
#v = np.sqrt(0.5) * ((-1)**m*Y(-m, l, theta, phi) + Y(m, l, theta, phi))
if abs(v.imag) > 0.0001: raise ValueError("Non-zero imaginary part in Ylmc")
return v.real
def Ylms(l, m, theta, phi):
#v = 1j * np.sqrt(0.5) * (np.conj(Y(m, l, theta, phi)) - Y(m, l, theta, phi))
#v = 1j * np.sqrt(0.5) * (Y(-m, l, theta, phi) - (-1)**m*Y(m, l, theta, phi))
v = 1j * np.sqrt(0.5) * (-(-1)**m*Y(-m, l, theta, phi) + Y(m, l, theta, phi))
if abs(v.imag) > 0.0001: raise ValueError("Non-zero imaginary part in Ylms")
return v.real
class GroupOfAtoms(object):
def __init__(self, name=None):
self.name = name
self.atoms = list()
self.i = -1
def build_Pymol_rep(self, vmax=1.,r_sphere=0.2):
s = 'from pymol.cgo import *\nfrom pymol import cmd\nobj = [ BEGIN, LINES, ]\n' % (WORKDIR, geometry)
for site in self.sites:
q = self.molecule.ff.charges[s.name]
if q is None:
s_color = 'x = 0.0\ncolor = [COLOR, 1-x, 1-x, 1]\n'
elif q >= 0:
s_color = 'x = %f\ncolor = [COLOR, 1, 1-x, 1-x]\n' % (q/vmax)
elif q < 0:
s_color = 'x = %f\ncolor = [COLOR, 1-x, 1-x, 1]\n' % (-q/vmax)
s_sphere = 'sphere = [ SPHERE, %f, %f, %f,%f]\n' % (s.x, s.y, s.z, r_sphere)
s = s + s_color + s_sphere + 'obj += color+sphere\n'
s = s + 'obj.append(END)\ncmd.load_cgo(obj,"cgo01")\n'
file = open(filename,'w')
file.write(s)
file.close()
def set_sym_sites(self):
sites = {}
self.sym_sites = []
for i, name in enumerate(self.sites_names_eq):
if not name in sites:
sites[name] = i
self.sym_sites.append(sites[name])
def get_coordinates(self):
crds = np.zeros((len(self.sites), 3))
for i, s in enumerate(self.sites):
crds[i][:] = s.coordinates[:]
return crds
def get_sites(self, name):
return filter(lambda s: s.name==name, self.sites)
def get_atoms_by_element(self, element):
return filter(lambda a: a.element==element, self.atoms)
def get_atom(self, index):
return next(a for a in self.atoms if a.index==index)
@property
def atoms_names_noneq(self):
return [a.name for a in self.atoms_noneq]
@property
def atoms_names_eq(self):
return [a.name for a in self.atoms]
@property
def sites_names_noneq(self):
return [s.name for s in self.sites_noneq]
@property
def sites_names(self):
return self.sites_names_noneq
@property
def sites_names_eq(self):
return [s.name for s in self.sites]
@property
def sites(self):
sites = []
for atom in self:
sites += atom.sites
return sites
@property
def sites_noneq(self):
sites = []
for s in self.sites:
if not s.name in [ss.name for ss in sites]:
sites.append(s)
return sites
@property
def atoms_noneq(self):
atoms = []
for a in self.atoms:
if not a.name in [aa.name for aa in atoms]:
atoms.append(a)
return atoms
def __iter__(self):
return self
def next(self):
if self.i < len(self.atoms)-1:
self.i += 1
return self.atoms[self.i]
else:
self.i = -1
raise StopIteration
class Multipole(GroupOfAtoms):
"""
This is Multipole
"""
def __init__(self, name=None, origin=None):
GroupOfAtoms.__init__(self, name)
self.origin = origin
def set_multipole_matrix(self, multipoles=('cartesian', 2)):
if multipoles[0] == 'cartesian':
multipole = Cartesian(multipoles[1], self.get_coordinates(), self.sym_sites, self.origin)
elif multipoles[0] == 'spherical':
multipole = Spherical(multipoles[1], self.get_coordinates(), self.sym_sites, self.origin)
self.l = multipoles[1]
self.multipoles_names = multipole.names
self.QtoM = multipole.rotation_matrix_direct
self.QtoM_normed = np.zeros(self.QtoM.shape)
for i, u in enumerate(self.QtoM):
self.QtoM_normed[i,:] = u/np.linalg.norm(u)
self.MtoQ = multipole.rotation_matrix_inverse
def charges_to_multipoles(self, charges):
Q = np.array([])
for name in self.sites_names_noneq:
Q = np.append(Q, charges[name])
M = np.dot(self.QtoM, Q)
multipoles = {}
for multipole, m_value in zip(self.multipoles_names, M):
multipoles[multipole] = m_value
return multipoles
def multipoles_to_charges(self, multipoles):
if self.MtoQ is None:
raise ValueError('Cannot convert multipoles to charges')
M = np.array([])
for multipole in self.multipoles_names:
M = np.append(M, multipoles[multipole])
Q = np.dot(self.MtoQ, M)
charges = {}
for name, q_value in zip(self.sites_names_noneq, Q):
charges[name] = q_value
return charges
class MultipoleMatrix(object):
def __init__(self, sym_sites=None, formula=None):
# build matrix
rotation_matrix = np.zeros((len(self.names), len(sym_sites)))
for i, m_name in enumerate(self.names):
rotation_matrix[i][:] = formula.u(m_name).real
# reduce matrix
self.rotation_matrix_direct = np.zeros((len(self.names), max(sym_sites)+1))
for i, _ in enumerate(self.names):
self.rotation_matrix_direct[i] = np.bincount(sym_sites, weights=rotation_matrix[i])
try:
self.rotation_matrix_inverse = np.linalg.inv(self.rotation_matrix_direct)
except np.linalg.LinAlgError:
self.rotation_matrix_inverse = None
mult_logger.debug("Multipole conversion matrix is set up.\nmultipoles = %s; total number of components: %i \nQ to M matrix: %s" % (self.names, len(self.names), self.rotation_matrix_direct.shape))
class Spherical(MultipoleMatrix):
def __init__(self, l=None, coordinates=None, sym_sites=None, origin=None):
try:
self.names = []
for ll in xrange(l):
for mm in xrange(ll+1):
if mm==0:
self.names.append('%i%i' % (ll, mm))
else:
self.names.append('%i%ic' % (ll, mm))
self.names.append('%i%is' % (ll, mm))
except TypeError:
self.names = l
#cartesian to spherical (r, theta, phi) = (r, azimuth, polar)
def arctan(a,b):
if a==b==0:
return 0.
if b==0:
return (-1)*np.pi*np.sign(a)/2
else:
return np.arctan(a/b)
spherical = np.zeros(coordinates.shape)
x, y, z = coordinates[:,0], coordinates[:,1], coordinates[:,2]
#r = np.sqrt(x**2 + y**2 + z**2)
#phi = np.arccos(z/r)
#theta = np.array([])
#for xx, yy in zip(x,y):
# if yy>=0 and xx>0:
# s = 0
# if xx<=0:
# s = np.pi
# if xx>0 and yy<0:
# s = 2*np.pi
# if xx==0 and yy==0:
# s = 0
# theta = np.append(theta, arctan(yy,xx) + s)
#spherical[:,0] = r
#spherical[:,1] = theta
#spherical[:,2] = phi
xy2 = x**2 + y**2 # x2 + y2
spherical[:,0] = np.sqrt(xy2 + z**2) # r2 = x2 + y2 + z2
spherical[:,1] = np.arctan2(y, x) # theta = arctan(y/x)
spherical[:,2] = np.arctan2(np.sqrt(xy2), z) # phi = arctan(xy/z)
formula = SphericalFormulas(spherical, origin)
MultipoleMatrix.__init__(self, sym_sites, formula)
class Cartesian(MultipoleMatrix):
def __init__(self, l=None, coordinates=None, sym_sites=None, origin=None):
self.names = []
for i in xrange(l+1):
self.names += self.l_to_names(i)
formula = CartesianFormulas(coordinates, origin)
MultipoleMatrix.__init__(self, sym_sites, formula)
def l_to_names(self, l):
if l == 0: return ['charge']
if l == 1: return 'X Y Z'.split()
if l == 2: return 'XX YY ZZ XY XZ YZ'.split()
class Formulas(dict):
def __init__(self, coordinates=None, origin=None):
self.coordinates = coordinates
if origin == None:
self.origin = np.zeros(3)
else:
self.origin = origin
dict.__init__(self)
class SphericalFormulas(Formulas):
def __init__(self, coordinates=None, origin=None):
Formulas.__init__(self, coordinates, origin)
self[0] = Rlm
self['c'] = Rlmc
self['s'] = Rlms
def u(self, m_name):
l, m = [int(t) for t in m_name[:2]]
try:
x = m_name[2]
except IndexError:
x = 0
u = np.array([])
for crds in self.coordinates:
r, theta, phi = crds
u = np.append(u, self[x](l, m, r, theta, phi))
return u
class CartesianFormulas(Formulas):
def __init__(self, coordinates=None, origin=None):
Formulas.__init__(self, coordinates, origin)
self[0] = self.total_charge
self[1] = self.dipole
self[2] = self.quadrupole
self[3] = self.hexadecapole
def name_to_num(self, m_name):
def convert(a):
if a == 'X': return 0
if a == 'Y': return 1
if a == 'Z': return 2
if m_name == 'charge':
return
else:
return [convert(a) for a in m_name]
def u(self, m_name):
components = self.name_to_num(m_name)
if m_name == 'charge': c = 0
else: c = len(m_name)
u = np.array([])
for crds in self.coordinates:
u = np.append(u, self[c](crds, components))
return u
def total_charge(self, crds, components):
return 1.
def dipole(self, crds, components):
c = components[0]
return crds[c] - self.origin[c]
def quadrupole(self, crds, components):
a2 = np.sum(crds**2)
m, n = components
am = crds[m] - self.origin[m]
an = crds[n] - self.origin[n]
return 3.0 / 2.0 * am * an - 0.5 * a2 * self.delta(m,n)
def octapole(self, crds, components):
m, n, k = components
a2 = np.sum(crds**2)
am = crds[m] - self.origin[m]
an = crds[n] - self.origin[n]
ak = crds[k] - self.origin[k]
return 5. / 2. * am * an * ak - 0.5 * a2 * (am * self.delta(n,k) + an * self.delta(m,n) + ak * self.delta(m,n))
def hexadecapole(self, crds, components):
m, n, k, l = components
am = crds[m] - self.origin[m]
an = crds[n] - self.origin[n]
ak = crds[k] - self.origin[k]
al = crds[l] - self.origin[l]
return 1. / (1. * 2. * 3. * 4.) * (105. * am * an * ak * al - 15. * a2 * (am * an * self.delta(k,l) + am * ak * self.delta(n,l) + am * al * self.delta(n,k) + an * ak * self.delta(m,l) + an * al * self.delta(m,k) + ak * al * self.delta(m,n)) + 3. * a2**2 * (self.delta(m,n) * self.delta(k,l) + self.delta(m,k) * self.delta(n,l) + self.delta(m,l) * self.delta(n,k)))
def delta(self, i, j):
if i==j: return 1
else: return 0
| gpl-2.0 | 6,099,882,326,194,920,000 | 33.794872 | 372 | 0.524032 | false |
woobe/h2o | py/testdir_multi_jvm/test_rf_1ktrees_job_cancel_many_fvec.py | 1 | 2219 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_jobs, h2o_rf
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(3)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_1ktrees_job_cancel_many_fvec(self):
h2o.beta_features = True
SYNDATASETS_DIR = h2o.make_syn_dir()
# always match the run below!
# just using one file for now
for x in [1000]:
shCmdString = "perl " + h2o.find_file("syn_scripts/parity.pl") + " 128 4 "+ str(x) + " quad " + SYNDATASETS_DIR
h2o.spawn_cmd_and_wait('parity.pl', shCmdString.split(),4)
csvFilename = "parity_128_4_" + str(x) + "_quad.data"
csvFilename = "parity_128_4_" + str(1000) + "_quad.data"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
hex_key = csvFilename + ".hex"
parseResult = h2o_cmd.parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30)
print "kick off jobs, then cancel them"
for trial in range (1,5):
# random 0 or 1 delay
delay = random.uniform(0,1)
time.sleep(delay)
h2o.verboseprint("Trial", trial)
start = time.time()
h2o_cmd.runRF(parseResult=parseResult, trees=trial, max_depth=50, rfView=False, noPoll=True, timeoutSecs=30, retryDelaySecs=0.25)
print "RF #", trial, "started on ", csvFilename, 'took', time.time() - start, 'seconds'
### h2o_jobs.cancelAllJobs(timeoutSecs=10)
h2o.check_sandbox_for_errors()
# do one last good one
rfView = h2o_cmd.runRF(parseResult=parseResult, trees=trial, max_depth=50, timeoutSecs=600, retryDelaySecs=3)
(classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView, ntree=trial)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | -7,085,951,831,473,397,000 | 37.258621 | 141 | 0.604777 | false |
juliosmelo/soldo | utils/settings.py | 1 | 1331 | PGS_TOKEN = 'C888EE7F420841CF92D0B0063EDDFC7D'
PGS_EMAIL = '[email protected]'
# from datetime import datetime
# from datetime import date
# from datetime import timedelta
# dates = [d0]
# dates_two = list()
# def date_paginator(x, y):
# print x, y
# if pages == 1 and pages_mods == 0:
# _date = d0 + timedelta(days=30)
# date_paginator(d0, _date)
# else:
# for i in range(pages):
# _date = d0 + timedelta(days=30 * (i + 1))
# dates.append(_date)
# if pages_mods > 0 and pages_mods < 30:
# new_date = dates[-1:][0] + timedelta(days=pages_mods)
# dates.append(new_date)
# if dates:
# for i in range(len(dates) - 1):
# date_paginator(dates[i], dates[i + 1])
# class DateRangePagination:
# """docstring for DateRangePagination"""
# def __init__(self, initial_date):
# self.initial_date = datetime.strptime(initial_date, "%Y-%m-%d").date()
# self.dates = [self.initial_date]
# self.date_limit = datetime.now().date()
# def get_ranges(self):
# print self.initial_date
# def set_ranges():
# d0 = date(2008, 8, 18)
# d1 = date(2008, 11, 18)
# delta = d1 - d0
# pages = delta.days / 30
# pages_mods = delta.days % 30
# pass
# def get_days(self,):
# pass | mit | 1,225,591,867,755,594,800 | 23.666667 | 80 | 0.574005 | false |
MichaelAnckaert/Hermes | message.py | 1 | 1631 | """Message functionality for Hermes"""
from datetime import datetime
import json
__author__ = "Michael Anckaert"
__copyright__ = "Copyright 2012, Michael Anckaert"
__credits__ = ["Michael Anckaert"]
__license__ = "GPLv3"
__version__ = "0.0.1"
__maintainer__ = "Michael Anckaert"
__email__ = "[email protected]"
__status__ = "Development"
class MessageType(object):
types = {}
def __init__(self, name):
if name in MessageType.types:
print " W: Message type '{0}' already exists".format(name)
raise ValueError("Message type '{}' already exists.".format(name))
self.name = name
self.rest_enabled = False
MessageType.types['name'] = self
def enable_rest(self):
self.rest_enabled = True
def disable_rest(self):
self.rest_enabled = False
def get_message_type(self, name):
if name in MessageType.types.items():
return MessageType.types[name]
else:
return None
class Message(object):
def __init__(self, type, content):
if MessageType.get_message_type(type):
self.type = type
self.content = content
self.id = None
self.status = "UNKNOWN"
self.received = datetime.now().strftime("%d-%m-%Y %H:%M")
self.response = None
return
print " W: Unknown message type '{0}' ".format(type)
raise ValueError("Wrong message type!")
def __str__(self):
return json.dumps({'message': {'id': self.id, 'status': self.status, 'received': self.received, 'response': self.response}})
| gpl-3.0 | 2,294,136,335,230,464,300 | 27.12069 | 132 | 0.591048 | false |
DedMemez/ODS-August-2017 | suit/SuitDNA.py | 1 | 7589 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.suit.SuitDNA
from panda3d.core import Datagram, DatagramIterator, VBase4
import random
from direct.directnotify.DirectNotifyGlobal import *
from toontown.toonbase import TTLocalizer, ToontownGlobals
import random
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
notify = directNotify.newCategory('SuitDNA')
suitHeadTypes = ['f',
'p',
'ym',
'mm',
'ds',
'hh',
'cr',
'tbc',
'bf',
'b',
'dt',
'ac',
'bs',
'sd',
'le',
'bw',
'sc',
'pp',
'tw',
'bc',
'nc',
'mb',
'ls',
'rb',
'cc',
'tm',
'nd',
'gh',
'ms',
'tf',
'm',
'mh',
'sk',
'cm',
'vp',
'db',
'kc',
'ss',
'iw',
'ru']
suitATypes = ['ym',
'hh',
'tbc',
'dt',
'bs',
'le',
'bw',
'pp',
'nc',
'rb',
'nd',
'tf',
'm',
'mh',
'vp',
'ss',
'ru']
suitBTypes = ['p',
'ds',
'b',
'ac',
'sd',
'bc',
'ls',
'tm',
'ms',
'kc',
'iw']
suitCTypes = ['f',
'mm',
'cr',
'bf',
'sc',
'tw',
'mb',
'cc',
'gh',
'sk',
'cm',
'db']
suitDepts = ['c',
'l',
'm',
's',
't']
suitDeptZones = [ToontownGlobals.BossbotHQ,
ToontownGlobals.LawbotHQ,
ToontownGlobals.CashbotHQ,
ToontownGlobals.SellbotHQ,
ToontownGlobals.TechbotHQ]
suitDeptFullnames = {'c': TTLocalizer.Bossbot,
'l': TTLocalizer.Lawbot,
'm': TTLocalizer.Cashbot,
's': TTLocalizer.Sellbot,
't': TTLocalizer.Techbot}
suitDeptFullnamesP = {'c': TTLocalizer.BossbotP,
'l': TTLocalizer.LawbotP,
'm': TTLocalizer.CashbotP,
's': TTLocalizer.SellbotP,
't': TTLocalizer.TechbotP}
suitDeptFilenames = {'c': 'boss',
'l': 'law',
'm': 'cash',
's': 'sell',
't': 'tech'}
suitDeptModelPaths = {'c': '**/CorpIcon',
0: '**/CorpIcon',
'l': '**/LegalIcon',
1: '**/LegalIcon',
'm': '**/MoneyIcon',
2: '**/MoneyIcon',
's': '**/SalesIcon',
3: '**/SalesIcon',
't': '**/TechIcon',
4: '**/TechIcon'}
corpPolyColor = VBase4(0.95, 0.75, 0.75, 1.0)
legalPolyColor = VBase4(0.75, 0.75, 0.95, 1.0)
moneyPolyColor = VBase4(0.65, 0.95, 0.85, 1.0)
salesPolyColor = VBase4(0.95, 0.75, 0.95, 1.0)
techPolyColor = VBase4(0.6, 0.48, 0.7, 1.0)
suitDeptColors = {'c': corpPolyColor,
'l': legalPolyColor,
'm': moneyPolyColor,
's': salesPolyColor,
't': techPolyColor}
suitsPerLevel = [1,
1,
1,
1,
1,
1,
1,
1]
suitsPerDept = 8
goonTypes = ['pg', 'sg', 'fg1']
def getSuitBodyType(name):
if name in suitATypes:
return 'a'
if name in suitBTypes:
return 'b'
if name in suitCTypes:
return 'c'
print 'Unknown body type for suit name: ', name
def getSuitDept(name):
index = suitHeadTypes.index(name)
for dept in xrange(len(suitDepts)):
if index < suitsPerDept * (dept + 1):
return suitDepts[dept]
print 'Unknown dept for suit name: ', name
def getDeptFullname(dept):
return suitDeptFullnames[dept]
def getDeptFullnameP(dept):
return suitDeptFullnamesP[dept]
def getSuitDeptFullname(name):
return suitDeptFullnames[getSuitDept(name)]
def getSuitType(name):
index = suitHeadTypes.index(name)
return index % suitsPerDept + 1
def getSuitName(deptIndex, typeIndex):
return suitHeadTypes[suitsPerDept * deptIndex + typeIndex]
def getRandomSuitType(level, rng = random):
return random.randint(max(level - 4, 1), min(level, 8))
def getRandomIndexByDept(dept):
return suitsPerDept * suitDepts.index(dept) + random.randint(0, suitsPerDept - 1)
def getRandomSuitByDept(dept):
return suitHeadTypes[getRandomIndexByDept(dept)]
def getSuitsInDept(dept):
start = dept * suitsPerDept
end = start + suitsPerDept
return suitHeadTypes[start:end]
def getLevelByIndex(index):
return index % suitsPerDept + 1
class SuitDNA:
def __init__(self, str = None, type = None, dna = None, r = None, b = None, g = None):
if str != None:
self.makeFromNetString(str)
elif type != None:
if type == 's':
self.newSuit()
else:
self.type = 'u'
return
def __str__(self):
if self.type == 's':
return 'type = %s\nbody = %s, dept = %s, name = %s' % ('suit',
self.body,
self.dept,
self.name)
elif self.type == 'b':
return 'type = boss cog\ndept = %s' % self.dept
else:
return 'type undefined'
def makeNetString(self):
dg = PyDatagram()
dg.addFixedString(self.type, 1)
if self.type == 's':
dg.addFixedString(self.name, 3)
dg.addFixedString(self.dept, 1)
elif self.type == 'b':
dg.addFixedString(self.dept, 1)
elif self.type == 'u':
notify.error('undefined avatar')
else:
notify.error('unknown avatar type: ', self.type)
return dg.getMessage()
def makeFromNetString(self, string):
dg = PyDatagram(string)
dgi = PyDatagramIterator(dg)
self.type = dgi.getFixedString(1)
if self.type == 's':
self.name = dgi.getFixedString(3)
self.dept = dgi.getFixedString(1)
self.body = getSuitBodyType(self.name)
elif self.type == 'b':
self.dept = dgi.getFixedString(1)
else:
notify.error('unknown avatar type: ', self.type)
return None
def __defaultGoon(self):
self.type = 'g'
self.name = goonTypes[0]
def __defaultSuit(self):
self.type = 's'
self.name = 'ds'
self.dept = getSuitDept(self.name)
self.body = getSuitBodyType(self.name)
def newSuit(self, name = None):
if name == None:
self.__defaultSuit()
else:
self.type = 's'
self.name = name
self.dept = getSuitDept(self.name)
self.body = getSuitBodyType(self.name)
return
def newBossCog(self, dept):
self.type = 'b'
self.dept = dept
def newSuitRandom(self, level = None, dept = None):
self.type = 's'
if level == None:
level = random.choice(range(1, len(suitsPerLevel)))
elif level < 0 or level > len(suitsPerLevel):
notify.error('Invalid suit level: %d' % level)
if dept == None:
dept = random.choice(suitDepts)
self.dept = dept
index = suitDepts.index(dept)
base = index * suitsPerDept
offset = 0
if level > 1:
for i in xrange(1, level):
offset = offset + suitsPerLevel[i - 1]
bottom = base + offset
top = bottom + suitsPerLevel[level - 1]
self.name = suitHeadTypes[random.choice(range(bottom, top))]
self.body = getSuitBodyType(self.name)
return
def newGoon(self, name = None):
if type == None:
self.__defaultGoon()
else:
self.type = 'g'
if name in goonTypes:
self.name = name
else:
notify.error('unknown goon type: ', name)
return
def getType(self):
if self.type == 's':
type = 'suit'
elif self.type == 'b':
type = 'boss'
else:
notify.error('Invalid DNA type: ', self.type)
return type | apache-2.0 | 5,145,229,808,531,475,000 | 21.501548 | 90 | 0.548557 | false |
normanjaeckel/OpenSlides | server/tests/integration/motions/test_polls.py | 1 | 53361 | from decimal import Decimal
import pytest
from django.conf import settings
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from openslides.core.config import config
from openslides.motions.models import Motion, MotionOption, MotionPoll, MotionVote
from openslides.poll.models import BasePoll
from openslides.utils.auth import get_group_model
from openslides.utils.autoupdate import inform_changed_data
from tests.common_groups import GROUP_ADMIN_PK, GROUP_DEFAULT_PK, GROUP_DELEGATE_PK
from tests.count_queries import count_queries
from tests.test_case import TestCase
@pytest.mark.django_db(transaction=False)
def test_motion_poll_db_queries():
"""
Tests that only the following db queries are done:
* 1 request to get the polls,
* 1 request to get all options for all polls,
* 1 request to get all votes for all options,
* 1 request to get all users for all votes,
* 1 request to get all poll groups,
= 5 queries
"""
create_motion_polls()
assert count_queries(MotionPoll.get_elements)() == 5
@pytest.mark.django_db(transaction=False)
def test_motion_vote_db_queries():
"""
Tests that only 1 query is done when fetching MotionVotes
"""
create_motion_polls()
assert count_queries(MotionVote.get_elements)() == 1
@pytest.mark.django_db(transaction=False)
def test_motion_option_db_queries():
"""
Tests that only the following db queries are done:
* 1 request to get the options,
* 1 request to get all votes for all options,
= 2 queries
"""
create_motion_polls()
assert count_queries(MotionOption.get_elements)() == 2
def create_motion_polls():
"""
Creates 1 Motion with 5 polls with 5 options each which have 2 votes each
"""
motion = Motion.objects.create(title="test_motion_wfLrsjEHXBmPplbvQ65N")
group1 = get_group_model().objects.get(pk=1)
group2 = get_group_model().objects.get(pk=2)
for index in range(5):
poll = MotionPoll.objects.create(
motion=motion, title=f"test_title_{index}", pollmethod="YN", type="named"
)
poll.groups.add(group1)
poll.groups.add(group2)
for j in range(5):
option = MotionOption.objects.create(poll=poll)
for k in range(2):
user = get_user_model().objects.create_user(
username=f"test_username_{index}{j}{k}",
password="test_password_kbzj5L8ZtVxBllZzoW6D",
)
MotionVote.objects.create(
user=user,
option=option,
value=("Y" if k == 0 else "N"),
weight=Decimal(1),
)
poll.voted.add(user)
class CreateMotionPoll(TestCase):
"""
Tests creating polls of motions.
"""
def advancedSetUp(self):
self.motion = Motion(
title="test_title_Aiqueigh2dae9phabiqu",
text="test_text_Neekoh3zou6li5rue8iL",
)
self.motion.save()
def test_simple(self):
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_ailai4toogh3eefaa2Vo",
"pollmethod": "YNA",
"type": "named",
"motion_id": self.motion.id,
"onehundred_percent_base": "YN",
"majority_method": "simple",
},
)
self.assertHttpStatusVerbose(response, status.HTTP_201_CREATED)
self.assertTrue(MotionPoll.objects.exists())
poll = MotionPoll.objects.get()
self.assertEqual(poll.title, "test_title_ailai4toogh3eefaa2Vo")
self.assertEqual(poll.pollmethod, "YNA")
self.assertEqual(poll.type, "named")
self.assertEqual(poll.motion.id, self.motion.id)
self.assertTrue(poll.options.exists())
def test_default_method(self):
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_ailai4toogh3eefaa2Vo",
"type": "named",
"motion_id": self.motion.id,
"onehundred_percent_base": "YN",
"majority_method": "simple",
},
)
self.assertHttpStatusVerbose(response, status.HTTP_201_CREATED)
self.assertTrue(MotionPoll.objects.exists())
poll = MotionPoll.objects.get()
self.assertEqual(poll.pollmethod, "YNA")
def test_missing_keys(self):
complete_request_data = {
"title": "test_title_OoCh9aitaeyaeth8nom1",
"type": "named",
"motion_id": self.motion.id,
"onehundred_percent_base": "YN",
"majority_method": "simple",
}
for key in complete_request_data.keys():
request_data = {
_key: value
for _key, value in complete_request_data.items()
if _key != key
}
response = self.client.post(reverse("motionpoll-list"), request_data)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.exists())
def test_with_groups(self):
group1 = get_group_model().objects.get(pk=1)
group2 = get_group_model().objects.get(pk=2)
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_Thoo2eiphohhi1eeXoow",
"pollmethod": "YNA",
"type": "named",
"motion_id": self.motion.id,
"onehundred_percent_base": "YN",
"majority_method": "simple",
"groups_id": [1, 2],
},
)
self.assertHttpStatusVerbose(response, status.HTTP_201_CREATED)
poll = MotionPoll.objects.get()
self.assertTrue(group1 in poll.groups.all())
self.assertTrue(group2 in poll.groups.all())
def test_with_empty_groups(self):
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_Thoo2eiphohhi1eeXoow",
"pollmethod": MotionPoll.POLLMETHOD_YNA,
"type": MotionPoll.TYPE_NAMED,
"motion_id": self.motion.id,
"onehundred_percent_base": MotionPoll.PERCENT_BASE_YN,
"majority_method": MotionPoll.MAJORITY_SIMPLE,
"groups_id": [],
},
)
self.assertHttpStatusVerbose(response, status.HTTP_201_CREATED)
poll = MotionPoll.objects.get()
self.assertFalse(poll.groups.exists())
def test_not_supported_type(self):
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_yaiyeighoh0Iraet3Ahc",
"pollmethod": MotionPoll.POLLMETHOD_YNA,
"type": "not_existing",
"motion_id": self.motion.id,
"onehundred_percent_base": MotionPoll.PERCENT_BASE_YN,
"majority_method": MotionPoll.MAJORITY_SIMPLE,
},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.exists())
def test_not_allowed_type(self):
setattr(settings, "ENABLE_ELECTRONIC_VOTING", False)
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_3jdWIXbKBa7ZXutf3RYf",
"pollmethod": MotionPoll.POLLMETHOD_YN,
"type": MotionPoll.TYPE_NAMED,
"motion_id": self.motion.id,
"onehundred_percent_base": MotionPoll.PERCENT_BASE_YN,
"majority_method": MotionPoll.MAJORITY_SIMPLE,
},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.exists())
setattr(settings, "ENABLE_ELECTRONIC_VOTING", True)
def test_not_supported_pollmethod(self):
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_SeVaiteYeiNgie5Xoov8",
"pollmethod": "not_existing",
"type": "named",
"motion_id": self.motion.id,
"onehundred_percent_base": MotionPoll.PERCENT_BASE_YN,
"majority_method": MotionPoll.MAJORITY_SIMPLE,
},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.exists())
def test_create_with_votes(self):
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_0X5LifVkKiSh8OPGQM8e",
"pollmethod": MotionPoll.POLLMETHOD_YN,
"type": MotionPoll.TYPE_ANALOG,
"motion_id": self.motion.id,
"onehundred_percent_base": MotionPoll.PERCENT_BASE_YNA,
"majority_method": MotionPoll.MAJORITY_SIMPLE,
"votes": {
"Y": 1,
"N": 2,
"votesvalid": "-2",
"votesinvalid": "-2",
"votescast": "-2",
},
},
)
self.assertHttpStatusVerbose(response, status.HTTP_201_CREATED)
poll = MotionPoll.objects.get()
self.assertEqual(poll.state, MotionPoll.STATE_FINISHED)
self.assertTrue(MotionVote.objects.exists())
def test_create_with_votes_publish_immediately(self):
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_iXhJX0jmNl3Nvadsi8JO",
"pollmethod": MotionPoll.POLLMETHOD_YN,
"type": MotionPoll.TYPE_ANALOG,
"motion_id": self.motion.id,
"onehundred_percent_base": MotionPoll.PERCENT_BASE_YNA,
"majority_method": MotionPoll.MAJORITY_SIMPLE,
"votes": {
"Y": 1,
"N": 2,
"votesvalid": "-2",
"votesinvalid": "-2",
"votescast": "-2",
},
"publish_immediately": "1",
},
)
self.assertHttpStatusVerbose(response, status.HTTP_201_CREATED)
poll = MotionPoll.objects.get()
self.assertEqual(poll.state, MotionPoll.STATE_PUBLISHED)
self.assertTrue(MotionVote.objects.exists())
def test_create_with_invalid_votes(self):
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_phSl1IALPIoDyM9uI2Kq",
"pollmethod": MotionPoll.POLLMETHOD_YN,
"type": MotionPoll.TYPE_ANALOG,
"motion_id": self.motion.id,
"onehundred_percent_base": MotionPoll.PERCENT_BASE_YNA,
"majority_method": MotionPoll.MAJORITY_SIMPLE,
"votes": {"Y": 1, "N": 2, "votesvalid": "-2", "votesinvalid": "-2"},
"publish_immediately": "1",
},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.exists())
self.assertFalse(MotionVote.objects.exists())
def test_create_with_votes_wrong_type(self):
response = self.client.post(
reverse("motionpoll-list"),
{
"title": "test_title_PgvqRIvuKuVImEpQJAMZ",
"pollmethod": MotionPoll.POLLMETHOD_YN,
"type": MotionPoll.TYPE_NAMED,
"motion_id": self.motion.id,
"onehundred_percent_base": MotionPoll.PERCENT_BASE_YNA,
"majority_method": MotionPoll.MAJORITY_SIMPLE,
"votes": {"Y": 1, "N": 2, "votesvalid": "-2", "votesinvalid": "-2"},
"publish_immediately": "1",
},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.exists())
self.assertFalse(MotionVote.objects.exists())
class UpdateMotionPoll(TestCase):
"""
Tests updating polls of motions.
"""
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
self.motion = Motion(
title="test_title_Aiqueigh2dae9phabiqu",
text="test_text_Neekoh3zou6li5rue8iL",
)
self.motion.save()
self.group = get_group_model().objects.get(pk=1)
self.poll = MotionPoll.objects.create(
motion=self.motion,
title="test_title_beeFaihuNae1vej2ai8m",
pollmethod="YNA",
type="named",
onehundred_percent_base="YN",
majority_method="simple",
)
self.poll.create_options()
self.poll.groups.add(self.group)
def test_patch_title(self):
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"title": "test_title_Aishohh1ohd0aiSut7gi"},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.title, "test_title_Aishohh1ohd0aiSut7gi")
def test_prevent_patching_motion(self):
motion = Motion(
title="test_title_phohdah8quukooHeetuz",
text="test_text_ue2yeisaech1ahBohhoo",
)
motion.save()
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]), {"motion_id": motion.id}
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.motion.id, self.motion.id) # unchanged
def test_patch_pollmethod(self):
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]), {"pollmethod": "YN"}
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.pollmethod, "YN")
self.assertEqual(poll.onehundred_percent_base, "YN")
def test_patch_invalid_pollmethod(self):
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]), {"pollmethod": "invalid"}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
poll = MotionPoll.objects.get()
self.assertEqual(poll.pollmethod, "YNA")
def test_patch_type(self):
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]), {"type": "analog"}
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.type, "analog")
def test_patch_invalid_type(self):
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]), {"type": "invalid"}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
poll = MotionPoll.objects.get()
self.assertEqual(poll.type, "named")
def test_patch_not_allowed_type(self):
setattr(settings, "ENABLE_ELECTRONIC_VOTING", False)
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"type": BasePoll.TYPE_NAMED},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
poll = MotionPoll.objects.get()
self.assertEqual(poll.type, BasePoll.TYPE_NAMED)
setattr(settings, "ENABLE_ELECTRONIC_VOTING", True)
def test_patch_100_percent_base(self):
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"onehundred_percent_base": "cast"},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.onehundred_percent_base, "cast")
def test_patch_wrong_100_percent_base(self):
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"onehundred_percent_base": "invalid"},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
poll = MotionPoll.objects.get()
self.assertEqual(poll.onehundred_percent_base, "YN")
def test_patch_majority_method(self):
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"majority_method": "two_thirds"},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.majority_method, "two_thirds")
def test_patch_wrong_majority_method(self):
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"majority_method": "invalid majority method"},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
poll = MotionPoll.objects.get()
self.assertEqual(poll.majority_method, "simple")
def test_patch_groups_to_empty(self):
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]), {"groups_id": []}
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertFalse(poll.groups.exists())
def test_patch_groups(self):
group2 = get_group_model().objects.get(pk=2)
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"groups_id": [group2.id]},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.groups.count(), 1)
self.assertEqual(poll.groups.get(), group2)
def test_patch_title_started(self):
self.poll.state = 2
self.poll.save()
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"title": "test_title_1FjLGeQqsi9GgNzPp73S"},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.title, "test_title_1FjLGeQqsi9GgNzPp73S")
def test_patch_wrong_state(self):
self.poll.state = 2
self.poll.save()
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"type": BasePoll.TYPE_NAMED},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
poll = MotionPoll.objects.get()
self.assertEqual(poll.type, BasePoll.TYPE_NAMED)
def test_patch_majority_method_state_not_created(self):
self.poll.state = 2
self.poll.save()
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"majority_method": "two_thirds"},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.majority_method, "two_thirds")
def test_patch_100_percent_base_state_not_created(self):
self.poll.state = 2
self.poll.save()
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"onehundred_percent_base": "cast"},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.onehundred_percent_base, "cast")
def test_patch_wrong_100_percent_base_state_not_created(self):
self.poll.state = 2
self.poll.pollmethod = MotionPoll.POLLMETHOD_YN
self.poll.save()
response = self.client.patch(
reverse("motionpoll-detail", args=[self.poll.pk]),
{"onehundred_percent_base": MotionPoll.PERCENT_BASE_YNA},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.onehundred_percent_base, "YN")
class VoteMotionPollAnalog(TestCase):
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
self.motion = Motion(
title="test_title_OoK9IeChe2Jeib9Deeji",
text="test_text_eichui1oobiSeit9aifo",
)
self.motion.save()
self.poll = MotionPoll.objects.create(
motion=self.motion,
title="test_title_tho8PhiePh8upaex6phi",
pollmethod="YNA",
type=BasePoll.TYPE_ANALOG,
)
self.poll.create_options()
def start_poll(self):
self.poll.state = MotionPoll.STATE_STARTED
self.poll.save()
def make_admin_delegate(self):
admin = get_user_model().objects.get(username="admin")
admin.groups.add(GROUP_DELEGATE_PK)
admin.groups.remove(GROUP_ADMIN_PK)
inform_changed_data(admin)
def test_start_poll(self):
response = self.client.post(reverse("motionpoll-start", args=[self.poll.pk]))
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.state, MotionPoll.STATE_STARTED)
self.assertEqual(poll.votesvalid, None)
self.assertEqual(poll.votesinvalid, None)
self.assertEqual(poll.votescast, None)
self.assertFalse(poll.get_votes().exists())
def test_stop_poll(self):
self.start_poll()
response = self.client.post(reverse("motionpoll-stop", args=[self.poll.pk]))
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(self.poll.state, MotionPoll.STATE_STARTED)
def test_vote(self):
self.start_poll()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{
"data": {
"Y": "1",
"N": "2.35",
"A": "-1",
"votesvalid": "4.64",
"votesinvalid": "-2",
"votescast": "-2",
},
},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.votesvalid, Decimal("4.64"))
self.assertEqual(poll.votesinvalid, Decimal("-2"))
self.assertEqual(poll.votescast, Decimal("-2"))
self.assertEqual(poll.get_votes().count(), 3)
self.assertEqual(poll.state, MotionPoll.STATE_FINISHED)
option = poll.options.get()
self.assertEqual(option.yes, Decimal("1"))
self.assertEqual(option.no, Decimal("2.35"))
self.assertEqual(option.abstain, Decimal("-1"))
def test_vote_no_permissions(self):
self.start_poll()
self.make_admin_delegate()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": {}}
)
self.assertHttpStatusVerbose(response, status.HTTP_403_FORBIDDEN)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_no_data(self):
self.start_poll()
response = self.client.post(reverse("motionpoll-vote", args=[self.poll.pk]), {})
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_missing_data(self):
self.start_poll()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{"data": {"Y": "4", "N": "22.6"}},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_wrong_data_format(self):
self.start_poll()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": [1, 2, 5]}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_wrong_vote_data(self):
self.start_poll()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{"data": {"Y": "some string", "N": "-2", "A": "3"}},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_state_finished(self):
self.start_poll()
self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{
"data": {
"Y": "3",
"N": "1",
"A": "5",
"votesvalid": "-2",
"votesinvalid": "1",
"votescast": "-1",
},
},
)
self.poll.state = 3
self.poll.save()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{
"data": {
"Y": "1",
"N": "2.35",
"A": "-1",
"votesvalid": "4.64",
"votesinvalid": "-2",
"votescast": "3",
},
},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.votesvalid, Decimal("4.64"))
self.assertEqual(poll.votesinvalid, Decimal("-2"))
self.assertEqual(poll.votescast, Decimal("3"))
self.assertEqual(poll.get_votes().count(), 3)
option = poll.options.get()
self.assertEqual(option.yes, Decimal("1"))
self.assertEqual(option.no, Decimal("2.35"))
self.assertEqual(option.abstain, Decimal("-1"))
class VoteMotionPollNamed(TestCase):
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
self.motion = Motion(
title="test_title_OoK9IeChe2Jeib9Deeji",
text="test_text_eichui1oobiSeit9aifo",
)
self.motion.save()
self.group = get_group_model().objects.get(pk=GROUP_DELEGATE_PK)
self.admin = get_user_model().objects.get(username="admin")
self.poll = MotionPoll.objects.create(
motion=self.motion,
title="test_title_tho8PhiePh8upaex6phi",
pollmethod="YNA",
type=BasePoll.TYPE_NAMED,
)
self.poll.create_options()
self.poll.groups.add(self.group)
def start_poll(self):
self.poll.state = MotionPoll.STATE_STARTED
self.poll.save()
def make_admin_delegate(self):
self.admin.groups.add(GROUP_DELEGATE_PK)
self.admin.groups.remove(GROUP_ADMIN_PK)
inform_changed_data(self.admin)
def make_admin_present(self):
self.admin.is_present = True
self.admin.save()
def test_start_poll(self):
response = self.client.post(reverse("motionpoll-start", args=[self.poll.pk]))
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.state, MotionPoll.STATE_STARTED)
self.assertEqual(poll.votesvalid, Decimal("0"))
self.assertEqual(poll.votesinvalid, Decimal("0"))
self.assertEqual(poll.votescast, Decimal("0"))
self.assertFalse(poll.get_votes().exists())
def test_vote(self):
self.start_poll()
self.make_admin_delegate()
self.make_admin_present()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": "N"}
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.votesvalid, Decimal("1"))
self.assertEqual(poll.votesinvalid, Decimal("0"))
self.assertEqual(poll.votescast, Decimal("1"))
self.assertEqual(poll.get_votes().count(), 1)
option = poll.options.get()
self.assertEqual(option.yes, Decimal("0"))
self.assertEqual(option.no, Decimal("1"))
self.assertEqual(option.abstain, Decimal("0"))
vote = option.votes.get()
self.assertEqual(vote.user, self.admin)
self.assertEqual(vote.weight, Decimal("1"))
def test_vote_with_voteweight(self):
config["users_activate_vote_weight"] = True
self.start_poll()
self.make_admin_delegate()
self.make_admin_present()
self.admin.vote_weight = weight = Decimal("3.5")
self.admin.save()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": "A"}
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.votesvalid, weight)
self.assertEqual(poll.votesinvalid, Decimal("0"))
self.assertEqual(poll.votescast, Decimal("1"))
self.assertEqual(poll.get_votes().count(), 1)
self.assertEqual(poll.amount_users_voted_with_individual_weight(), weight)
option = poll.options.get()
self.assertEqual(option.yes, Decimal("0"))
self.assertEqual(option.no, Decimal("0"))
self.assertEqual(option.abstain, weight)
vote = option.votes.get()
self.assertEqual(vote.weight, weight)
def test_vote_without_voteweight(self):
self.admin.vote_weight = Decimal("3.5")
self.admin.save()
self.test_vote()
def test_change_vote(self):
self.start_poll()
self.make_admin_delegate()
self.make_admin_present()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": "N"}
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": "A"}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
poll = MotionPoll.objects.get()
self.assertEqual(poll.votesvalid, Decimal("1"))
self.assertEqual(poll.votesinvalid, Decimal("0"))
self.assertEqual(poll.votescast, Decimal("1"))
self.assertEqual(poll.get_votes().count(), 1)
option = poll.options.get()
self.assertEqual(option.yes, Decimal("0"))
self.assertEqual(option.no, Decimal("1"))
self.assertEqual(option.abstain, Decimal("0"))
vote = option.votes.get()
self.assertEqual(vote.user, self.admin)
def test_vote_anonymous(self):
self.poll.groups.add(GROUP_DEFAULT_PK)
self.start_poll()
config["general_system_enable_anonymous"] = True
guest_client = APIClient()
response = guest_client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": "Y"}
)
self.assertHttpStatusVerbose(response, status.HTTP_403_FORBIDDEN)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_wrong_state(self):
self.make_admin_present()
self.make_admin_delegate()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": {}}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_wrong_group(self):
self.start_poll()
self.make_admin_present()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": {}}
)
self.assertHttpStatusVerbose(response, status.HTTP_403_FORBIDDEN)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_not_present(self):
self.start_poll()
self.make_admin_delegate()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": {}}
)
self.assertHttpStatusVerbose(response, status.HTTP_403_FORBIDDEN)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_missing_data(self):
self.start_poll()
self.make_admin_delegate()
self.make_admin_present()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": {}}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_wrong_data_format(self):
self.start_poll()
self.make_admin_delegate()
self.make_admin_present()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": [1, 2, 5]}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def setup_vote_delegation(self, with_delegation=True):
""" user -> admin """
self.start_poll()
self.make_admin_delegate()
self.make_admin_present()
self.user, self.user_password = self.create_user()
self.user.groups.add(GROUP_DELEGATE_PK)
if with_delegation:
self.user.vote_delegated_to = self.admin
self.user.save()
inform_changed_data(self.admin) # put the admin into the cache to update
# its vote_delegated_to_id field
def test_vote_delegation(self):
self.setup_vote_delegation()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{"data": "N", "user_id": self.user.pk}, # user not present
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.votesvalid, Decimal("1"))
self.assertEqual(poll.votesinvalid, Decimal("0"))
self.assertEqual(poll.votescast, Decimal("1"))
self.assertEqual(poll.get_votes().count(), 1)
option = poll.options.get()
self.assertEqual(option.yes, Decimal("0"))
self.assertEqual(option.no, Decimal("1"))
self.assertEqual(option.abstain, Decimal("0"))
vote = option.votes.get()
self.assertEqual(vote.user, self.user)
self.assertEqual(vote.delegated_user, self.admin)
def test_vote_delegation_and_self_vote(self):
self.test_vote_delegation()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": "Y"}
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.votesvalid, Decimal("2"))
self.assertEqual(poll.votesinvalid, Decimal("0"))
self.assertEqual(poll.votescast, Decimal("2"))
self.assertEqual(poll.get_votes().count(), 2)
option = poll.options.get()
self.assertEqual(option.yes, Decimal("1"))
self.assertEqual(option.no, Decimal("1"))
self.assertEqual(option.abstain, Decimal("0"))
vote = option.votes.get(user_id=self.admin.pk)
self.assertEqual(vote.user, self.admin)
self.assertEqual(vote.delegated_user, self.admin)
def test_vote_delegation_forbidden(self):
self.setup_vote_delegation(False)
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{"data": "N", "user_id": self.user.pk},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_delegation_not_present(self):
self.setup_vote_delegation()
self.admin.is_present = False
self.admin.save()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{"data": "N", "user_id": self.user.pk},
)
self.assertHttpStatusVerbose(response, status.HTTP_403_FORBIDDEN)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_delegation_delegate_not_in_group(self):
self.setup_vote_delegation()
self.admin.groups.remove(GROUP_DELEGATE_PK)
self.admin.save()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{"data": "N", "user_id": self.user.pk},
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.get_votes().count(), 1)
vote = poll.get_votes()[0]
self.assertEqual(vote.value, "N")
self.assertEqual(vote.user, self.user)
self.assertEqual(vote.delegated_user, self.admin)
def test_vote_delegation_delegator_not_in_group(self):
self.setup_vote_delegation()
self.user.groups.remove(GROUP_DELEGATE_PK)
self.user.save()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{"data": "N", "user_id": self.user.pk},
)
self.assertHttpStatusVerbose(response, status.HTTP_403_FORBIDDEN)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_delegation_delegator_self_vote_not_allowed(self):
self.setup_vote_delegation()
# Make the user a delegate and present
self.admin.groups.add(GROUP_DELEGATE_PK)
self.admin.groups.remove(GROUP_ADMIN_PK)
self.user.is_present = True
self.user.save()
# Use the user to make the request to vote for himself
user_client = APIClient()
user_client.login(username=self.user.username, password=self.user_password)
response = user_client.post(
reverse("motionpoll-vote", args=[self.poll.pk]),
{"data": "N"},
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
class VoteMotionPollPseudoanonymous(TestCase):
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
self.motion = Motion(
title="test_title_Chaebaenges1aebe8iev",
text="test_text_cah2aigh6ahc8OhNguQu",
)
self.motion.save()
self.group = get_group_model().objects.get(pk=GROUP_DELEGATE_PK)
self.admin = get_user_model().objects.get(username="admin")
self.poll = MotionPoll.objects.create(
motion=self.motion,
title="test_title_yohphei9Iegohqu9ki7m",
pollmethod="YNA",
type=BasePoll.TYPE_PSEUDOANONYMOUS,
)
self.poll.create_options()
self.poll.groups.add(self.group)
def start_poll(self):
self.poll.state = MotionPoll.STATE_STARTED
self.poll.save()
def make_admin_delegate(self):
self.admin.groups.add(GROUP_DELEGATE_PK)
self.admin.groups.remove(GROUP_ADMIN_PK)
inform_changed_data(self.admin)
def make_admin_present(self):
self.admin.is_present = True
self.admin.save()
def test_start_poll(self):
response = self.client.post(reverse("motionpoll-start", args=[self.poll.pk]))
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.state, MotionPoll.STATE_STARTED)
self.assertEqual(poll.votesvalid, Decimal("0"))
self.assertEqual(poll.votesinvalid, Decimal("0"))
self.assertEqual(poll.votescast, Decimal("0"))
self.assertFalse(poll.get_votes().exists())
def test_vote(self):
self.start_poll()
self.make_admin_delegate()
self.make_admin_present()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": "N"}
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.votesvalid, Decimal("1"))
self.assertEqual(poll.votesinvalid, Decimal("0"))
self.assertEqual(poll.votescast, Decimal("1"))
self.assertEqual(poll.get_votes().count(), 1)
self.assertEqual(poll.amount_users_voted_with_individual_weight(), 1)
option = poll.options.get()
self.assertEqual(option.yes, Decimal("0"))
self.assertEqual(option.no, Decimal("1"))
self.assertEqual(option.abstain, Decimal("0"))
self.assertTrue(self.admin in poll.voted.all())
vote = option.votes.get()
self.assertEqual(vote.user, None)
def test_change_vote(self):
self.start_poll()
self.make_admin_delegate()
self.make_admin_present()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": "N"}
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": "A"}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
option = MotionPoll.objects.get().options.get()
self.assertEqual(option.yes, Decimal("0"))
self.assertEqual(option.no, Decimal("1"))
self.assertEqual(option.abstain, Decimal("0"))
vote = option.votes.get()
self.assertEqual(vote.user, None)
def test_vote_anonymous(self):
self.poll.groups.add(GROUP_DEFAULT_PK)
self.start_poll()
config["general_system_enable_anonymous"] = True
guest_client = APIClient()
response = guest_client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": "Y"}
)
self.assertHttpStatusVerbose(response, status.HTTP_403_FORBIDDEN)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_wrong_state(self):
self.make_admin_present()
self.make_admin_delegate()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": {}}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_wrong_group(self):
self.start_poll()
self.make_admin_present()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": {}}
)
self.assertHttpStatusVerbose(response, status.HTTP_403_FORBIDDEN)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_not_present(self):
self.start_poll()
self.make_admin_delegate()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": {}}
)
self.assertHttpStatusVerbose(response, status.HTTP_403_FORBIDDEN)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_missing_data(self):
self.start_poll()
self.make_admin_delegate()
self.make_admin_present()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": {}}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
def test_vote_wrong_data_format(self):
self.start_poll()
self.make_admin_delegate()
self.make_admin_present()
response = self.client.post(
reverse("motionpoll-vote", args=[self.poll.pk]), {"data": [1, 2, 5]}
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertFalse(MotionPoll.objects.get().get_votes().exists())
class StopMotionPoll(TestCase):
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
self.motion = Motion(
title="test_title_eiri4iipeemaeGhahkae",
text="test_text_eegh7quoochaiNgiyeix",
)
self.motion.save()
self.poll = MotionPoll.objects.create(
motion=self.motion,
title="test_title_Hu9Miebopaighee3EDie",
pollmethod="YNA",
type=BasePoll.TYPE_NAMED,
)
self.poll.create_options()
def test_stop_poll(self):
self.poll.state = MotionPoll.STATE_STARTED
self.poll.save()
response = self.client.post(reverse("motionpoll-stop", args=[self.poll.pk]))
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
self.assertEqual(MotionPoll.objects.get().state, MotionPoll.STATE_FINISHED)
def test_stop_wrong_state(self):
response = self.client.post(reverse("motionpoll-stop", args=[self.poll.pk]))
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(MotionPoll.objects.get().state, MotionPoll.STATE_CREATED)
class PublishMotionPoll(TestCase):
def advancedSetUp(self):
self.motion = Motion(
title="test_title_lai8Ho5gai9aijahRasu",
text="test_text_KieGhosh8ahWiguHeu2D",
)
self.motion.save()
self.poll = MotionPoll.objects.create(
motion=self.motion,
title="test_title_Nufae0iew7Iorox2thoo",
pollmethod="YNA",
type=BasePoll.TYPE_PSEUDOANONYMOUS,
onehundred_percent_base="YN",
majority_method="simple",
)
self.poll.create_options()
option = self.poll.options.get()
self.user, _ = self.create_user()
self.vote = MotionVote.objects.create(
option=option, user=None, weight=Decimal(2), value="N"
)
def test_publish_poll(self):
self.poll.state = MotionPoll.STATE_FINISHED
self.poll.save()
response = self.client.post(reverse("motionpoll-publish", args=[self.poll.pk]))
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
self.assertEqual(MotionPoll.objects.get().state, MotionPoll.STATE_PUBLISHED)
def test_publish_wrong_state(self):
response = self.client.post(reverse("motionpoll-publish", args=[self.poll.pk]))
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(MotionPoll.objects.get().state, MotionPoll.STATE_CREATED)
class PseudoanonymizeMotionPoll(TestCase):
def setUp(self):
self.client = APIClient()
self.client.login(username="admin", password="admin")
self.motion = Motion(
title="test_title_lai8Ho5gai9aijahRasu",
text="test_text_KieGhosh8ahWiguHeu2D",
)
self.motion.save()
self.poll = MotionPoll.objects.create(
motion=self.motion,
title="test_title_Nufae0iew7Iorox2thoo",
pollmethod="YNA",
type=BasePoll.TYPE_NAMED,
state=MotionPoll.STATE_FINISHED,
)
self.poll.create_options()
self.option = self.poll.options.get()
self.user1, _ = self.create_user()
self.vote1 = MotionVote.objects.create(
user=self.user1, option=self.option, value="Y", weight=Decimal(1)
)
self.poll.voted.add(self.user1)
self.user2, _ = self.create_user()
self.vote2 = MotionVote.objects.create(
user=self.user2, option=self.option, value="N", weight=Decimal(1)
)
self.poll.voted.add(self.user2)
def test_pseudoanonymize_poll(self):
response = self.client.post(
reverse("motionpoll-pseudoanonymize", args=[self.poll.pk])
)
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.get_votes().count(), 2)
self.assertEqual(poll.amount_users_voted_with_individual_weight(), 2)
self.assertEqual(poll.votesvalid, Decimal("2"))
self.assertEqual(poll.votesinvalid, Decimal("0"))
self.assertEqual(poll.votescast, Decimal("2"))
option = poll.options.get()
self.assertEqual(option.yes, Decimal("1"))
self.assertEqual(option.no, Decimal("1"))
self.assertEqual(option.abstain, Decimal("0"))
self.assertTrue(self.user1 in poll.voted.all())
self.assertTrue(self.user2 in poll.voted.all())
for vote in poll.get_votes().all():
self.assertTrue(vote.user is None)
def test_pseudoanonymize_wrong_state(self):
self.poll.state = MotionPoll.STATE_CREATED
self.poll.save()
response = self.client.post(
reverse("motionpoll-pseudoanonymize", args=[self.poll.pk])
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
poll = MotionPoll.objects.get()
self.assertTrue(poll.get_votes().filter(user=self.user1).exists())
self.assertTrue(poll.get_votes().filter(user=self.user2).exists())
def test_pseudoanonymize_wrong_type(self):
self.poll.type = MotionPoll.TYPE_ANALOG
self.poll.save()
response = self.client.post(
reverse("motionpoll-pseudoanonymize", args=[self.poll.pk])
)
self.assertHttpStatusVerbose(response, status.HTTP_400_BAD_REQUEST)
poll = MotionPoll.objects.get()
self.assertTrue(poll.get_votes().filter(user=self.user1).exists())
self.assertTrue(poll.get_votes().filter(user=self.user2).exists())
class ResetMotionPoll(TestCase):
def advancedSetUp(self):
self.motion = Motion(
title="test_title_cheiJ1ieph5ohng9queu",
text="test_text_yahng6fiegaL7mooZ2of",
)
self.motion.save()
self.poll = MotionPoll.objects.create(
motion=self.motion,
title="test_title_oozie2Ui9xie0chaghie",
pollmethod="YNA",
type=BasePoll.TYPE_ANALOG,
state=MotionPoll.STATE_FINISHED,
)
self.poll.create_options()
self.option = self.poll.options.get()
self.user1, _ = self.create_user()
self.vote1 = MotionVote.objects.create(
user=self.user1, option=self.option, value="Y", weight=Decimal(1)
)
self.poll.voted.add(self.user1)
self.user2, _ = self.create_user()
self.vote2 = MotionVote.objects.create(
user=self.user2, option=self.option, value="N", weight=Decimal(1)
)
self.poll.voted.add(self.user2)
def test_reset_poll(self):
response = self.client.post(reverse("motionpoll-reset", args=[self.poll.pk]))
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
poll = MotionPoll.objects.get()
self.assertEqual(poll.get_votes().count(), 0)
self.assertEqual(poll.amount_users_voted_with_individual_weight(), 0)
self.assertEqual(poll.votesvalid, None)
self.assertEqual(poll.votesinvalid, None)
self.assertEqual(poll.votescast, None)
option = poll.options.get()
self.assertEqual(option.yes, Decimal("0"))
self.assertEqual(option.no, Decimal("0"))
self.assertEqual(option.abstain, Decimal("0"))
self.assertFalse(option.votes.exists())
class TestMotionPollWithVoteDelegationAutoupdate(TestCase):
def advancedSetUp(self):
""" Set up user -> other_user delegation. """
self.motion = Motion(
title="test_title_dL91JqhMTiQuQLSDRItZ",
text="test_text_R7nURdXKVEfEnnJBXJYa",
)
self.motion.save()
self.delegate_group = get_group_model().objects.get(pk=GROUP_DELEGATE_PK)
self.other_user, _ = self.create_user()
self.user, user_password = self.create_user()
self.user.groups.add(self.delegate_group)
self.user.is_present = True
self.user.vote_delegated_to = self.other_user
self.user.save()
self.user_client = APIClient()
self.user_client.login(username=self.user.username, password=user_password)
self.poll = MotionPoll.objects.create(
motion=self.motion,
title="test_title_Q3EuRaALSCCPJuQ2tMqj",
pollmethod="YNA",
type=BasePoll.TYPE_NAMED,
onehundred_percent_base="YN",
majority_method="simple",
)
self.poll.create_options()
self.poll.groups.add(self.delegate_group)
self.poll.save()
def test_start_poll(self):
response = self.client.post(reverse("motionpoll-start", args=[self.poll.pk]))
self.assertHttpStatusVerbose(response, status.HTTP_200_OK)
| mit | -2,000,771,081,450,811,000 | 39.090909 | 88 | 0.603287 | false |
tswast/google-cloud-python | videointelligence/google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py | 1 | 14088 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.videointelligence.v1 VideoIntelligenceService API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import grpc
from google.cloud.videointelligence_v1.gapic import enums
from google.cloud.videointelligence_v1.gapic import (
video_intelligence_service_client_config,
)
from google.cloud.videointelligence_v1.gapic.transports import (
video_intelligence_service_grpc_transport,
)
from google.cloud.videointelligence_v1.proto import video_intelligence_pb2
from google.cloud.videointelligence_v1.proto import video_intelligence_pb2_grpc
from google.longrunning import operations_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-videointelligence"
).version
class VideoIntelligenceServiceClient(object):
"""Service that implements Google Cloud Video Intelligence API."""
SERVICE_ADDRESS = "videointelligence.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.videointelligence.v1.VideoIntelligenceService"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VideoIntelligenceServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.VideoIntelligenceServiceGrpcTransport,
Callable[[~.Credentials, type], ~.VideoIntelligenceServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = video_intelligence_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=video_intelligence_service_grpc_transport.VideoIntelligenceServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = video_intelligence_service_grpc_transport.VideoIntelligenceServiceGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def annotate_video(
self,
input_uri=None,
input_content=None,
features=None,
video_context=None,
output_uri=None,
location_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Performs asynchronous video annotation. Progress and results can be
retrieved through the ``google.longrunning.Operations`` interface.
``Operation.metadata`` contains ``AnnotateVideoProgress`` (progress).
``Operation.response`` contains ``AnnotateVideoResponse`` (results).
Example:
>>> from google.cloud import videointelligence_v1
>>> from google.cloud.videointelligence_v1 import enums
>>>
>>> client = videointelligence_v1.VideoIntelligenceServiceClient()
>>>
>>> input_uri = 'gs://cloud-samples-data/video/cat.mp4'
>>> features_element = enums.Feature.LABEL_DETECTION
>>> features = [features_element]
>>>
>>> response = client.annotate_video(input_uri=input_uri, features=features)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
input_uri (str): Input video location. Currently, only `Google Cloud
Storage <https://cloud.google.com/storage/>`__ URIs are supported, which
must be specified in the following format: ``gs://bucket-id/object-id``
(other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For
more information, see `Request
URIs <https://cloud.google.com/storage/docs/reference-uris>`__. A video
URI may include wildcards in ``object-id``, and thus identify multiple
videos. Supported wildcards: '\*' to match 0 or more characters; '?' to
match 1 character. If unset, the input video should be embedded in the
request as ``input_content``. If set, ``input_content`` should be unset.
input_content (bytes): The video data bytes. If unset, the input video(s) should be specified
via ``input_uri``. If set, ``input_uri`` should be unset.
features (list[~google.cloud.videointelligence_v1.types.Feature]): Required. Requested video annotation features.
video_context (Union[dict, ~google.cloud.videointelligence_v1.types.VideoContext]): Additional video context and/or feature-specific parameters.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.videointelligence_v1.types.VideoContext`
output_uri (str): Optional. Location where the output (in JSON format) should be stored.
Currently, only `Google Cloud
Storage <https://cloud.google.com/storage/>`__ URIs are supported, which
must be specified in the following format: ``gs://bucket-id/object-id``
(other URI formats return ``google.rpc.Code.INVALID_ARGUMENT``). For
more information, see `Request
URIs <https://cloud.google.com/storage/docs/reference-uris>`__.
location_id (str): Optional. Cloud region where annotation should take place. Supported
cloud regions: ``us-east1``, ``us-west1``, ``europe-west1``,
``asia-east1``. If no region is specified, a region will be determined
based on video file location.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.videointelligence_v1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "annotate_video" not in self._inner_api_calls:
self._inner_api_calls[
"annotate_video"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.annotate_video,
default_retry=self._method_configs["AnnotateVideo"].retry,
default_timeout=self._method_configs["AnnotateVideo"].timeout,
client_info=self._client_info,
)
request = video_intelligence_pb2.AnnotateVideoRequest(
input_uri=input_uri,
input_content=input_content,
features=features,
video_context=video_context,
output_uri=output_uri,
location_id=location_id,
)
operation = self._inner_api_calls["annotate_video"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
video_intelligence_pb2.AnnotateVideoResponse,
metadata_type=video_intelligence_pb2.AnnotateVideoProgress,
)
| apache-2.0 | -8,513,084,120,351,419,000 | 45.039216 | 156 | 0.626704 | false |
BeeeOn/server | t/restui/t1007-types-list-detail.py | 1 | 10281 | #! /usr/bin/env python3
import config
config.import_libs()
import unittest
import socket
import json
from rest import GET, POST, PUT, DELETE
class TestTypesListDetail(unittest.TestCase):
"""
Create a session for testing.
"""
def setUp(self):
req = POST(config.ui_host, config.ui_port, "/auth")
req.body(config.PERMIT_LOGIN)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
self.session = result["data"]["id"]
def tearDown(self):
req = DELETE(config.ui_host, config.ui_port, "/auth")
req.authorize(self.session)
response, content = req()
self.assertEqual(204, response.status)
"""
List all available types.
"""
def test1_list_all(self):
req = GET(config.ui_host, config.ui_port, "/types")
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
self.assertEqual(30, len(result["data"]))
def test2_detail_of_non_existing(self):
req = GET(config.ui_host, config.ui_port, "/types/12904232")
req.authorize(self.session)
response, content = req()
self.assertEqual(404, response.status)
result = json.loads(content)
self.assertEqual("error", result["status"])
self.assertEqual("requested resource does not exist", result["message"])
def test3_detail_of_battery(self):
req = GET(config.ui_host, config.ui_port, "/types/battery")
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
self.assertEqual("battery", result["data"]["name"])
self.assertEqual("%", result["data"]["unit"])
def assure_range(self, id, name, min, max, step):
req = GET(config.ui_host, config.ui_port, "/types/" + id)
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
type = result["data"]
self.assertEqual(name, type["name"])
self.assertTrue("range" in type)
if min is not None:
self.assertTrue("min" in type["range"])
self.assertEqual(min, type["range"]["min"])
else:
self.assertFalse("min" in type["range"])
if max is not None:
self.assertTrue("max" in type["range"])
self.assertEqual(max, type["range"]["max"])
else:
self.assertFalse("max" in type["range"])
if step is not None:
self.assertTrue("step" in type["range"])
self.assertEqual(step, type["range"]["step"])
else:
self.assertFalse("step" in type["range"])
def test4_check_types_with_ranges(self):
self.assure_range("battery", "battery", 0, 100, 1)
self.assure_range("brightness", "brightness", 0, 100, 1)
self.assure_range("co2", "CO2", 0, 1000000, 1)
self.assure_range("humidity", "humidity", 0, 100, 1)
self.assure_range("luminance", "luminance", 0, 1000000, 1)
self.assure_range("noise", "noise", 0, 200, 1)
self.assure_range("performance", "performance", 0, 100, 1)
self.assure_range("pressure", "pressure", 800, 1100, 1)
self.assure_range("rssi", "signal", 0, 100, 1)
self.assure_range("temperature", "temperature", -273.15, 200, 0.01)
self.assure_range("ultraviolet", "UV", 0, 11, 0.1)
self.assure_range("color_temperature", "color temperature", 1700, 27000, 1)
self.assure_range("color", "color", 0, 16777215, 1)
def assure_values(self, id, name, values):
req = GET(config.ui_host, config.ui_port, "/types/" + id)
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
type = result["data"]
self.assertEqual(name, type["name"])
self.assertTrue("values" in type)
self.assertEqual(len(values), len(type["values"]))
for key in values:
self.assertTrue(key in type["values"])
self.assertEqual(values[key], type["values"][key])
def test5_check_types_with_values(self):
self.assure_values("availability", "availability", {"0": "unavailable", "1": "available"})
self.assure_values("fire", "fire", {"0": "no fire", "1": "fire"})
self.assure_values("motion", "motion", {"0": "no motion", "1": "motion"})
self.assure_values("open_close", "open/close", {"0": "closed", "1": "open"})
self.assure_values("on_off", "on/off", {"0": "off", "1": "on"})
self.assure_values("security_alert", "security alert", {"0": "ease", "1": "alert"})
self.assure_values("shake", "shake", {"0": "ease", "1": "shake"})
def assure_levels(self, id, name, levels):
req = GET(config.ui_host, config.ui_port, "/types/" + id)
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
type = result["data"]
self.assertEqual(name, type["name"])
self.assertTrue("levels" in type)
self.assertEqual(len(levels), len(type["levels"]))
for i in range(len(levels)):
if levels[i][0] is None:
self.assertFalse("min" in type["levels"][i])
else:
self.assertTrue("min" in type["levels"][i])
self.assertEqual(levels[i][0], type["levels"][i]["min"])
if levels[i][1] is None:
self.assertFalse("max" in type["levels"][i])
else:
self.assertTrue("max" in type["levels"][i])
self.assertEqual(levels[i][1], type["levels"][i]["max"])
if levels[i][2] is None:
self.assertFalse("attention" in type["levels"][i])
else:
self.assertTrue("attention" in type["levels"][i])
self.assertEqual(levels[i][2], type["levels"][i]["attention"])
if levels[i][3] is None:
self.assertFalse("name" in type["levels"][i])
else:
self.assertTrue("name" in type["levels"][i])
self.assertEqual(levels[i][3], type["levels"][i]["name"])
def test6_check_types_with_levels(self):
self.assure_levels("battery", "battery", [
(0, 10, "single", "critical"),
(11, 25, "single", "low"),
(26, 80, None, "medium"),
(81, 100, None, "high")
])
self.assure_levels("co2", "CO2", [
(None, 450, None, "normal outdoor"),
(451, 1000, None, "normal indoor"),
(1001, 2500, None, "poor air"),
(2501, 5000, "single", "adverse health effects"),
(5001, 10000, "repeat", "dangerous after few hours"),
(10001, 30000, "repeat" , "dangerous after several minutes"),
(30001, None, "alert", "extreme and dangerous")
])
self.assure_levels("fire", "fire", [
(1, 1, "alert", None)
])
self.assure_levels("motion", "motion", [
(1, 1, "single", None)
])
self.assure_levels("noise", "noise", [
(None, 80, None, "normal"),
(81, 90, None, "acceptable"),
(91, 99, "single", "loud"),
(100, 111, "repeat", "dangerous for several minutes stay"),
(112, 139, "repeat", "dangerous for few minutes stay"),
(140, None, "alert", "immediate nerve damage possible"),
])
self.assure_levels("performance", "performance", [
(0, 0, None, "idle"),
(95, None, None, "high load")
])
self.assure_levels("rssi", "signal", [
(None, 25, None, "poor"),
(26, 80, None, "good"),
(81, 100, None, "high")
])
self.assure_levels("security_alert", "security alert", [
(1, 1, "alert", None)
])
self.assure_levels("ultraviolet", "UV", [
(None, 2.9, None, "low"),
(3, 5.9, None, "moderate"),
(6, 7.9, "single", "high"),
(8, 10.9, "single", "very high"),
(11, None, "single", "extreme")
])
def test7_check_enums(self):
req = GET(config.ui_host, config.ui_port, "/types/enum/MOD_BOILER_STATUS")
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
enum = result["data"]
self.assertEqual("boiler status", enum["name"])
self.assertTrue("values" in enum)
self.assertEqual(5, len(enum["values"]))
self.assertEqual("undefined", enum["values"]["0"])
self.assertEqual("heating", enum["values"]["1"])
self.assertEqual("heating water", enum["values"]["2"])
self.assertEqual("failure", enum["values"]["3"])
self.assertEqual("shutdown", enum["values"]["4"])
def test8_check_bitmap_with_flags(self):
req = GET(config.ui_host, config.ui_port, "/types/bitmap/MOD_CURRENT_BOILER_OT_FAULT_FLAGS")
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
bitmap = result["data"]
self.assertEqual("OT Fault Flags", bitmap["name"])
self.assertTrue("flags" in bitmap)
self.assertEqual(6, len(bitmap["flags"]))
self.assertEqual("service request", bitmap["flags"]["0"]["name"])
self.assertEqual("lockout reset enabled", bitmap["flags"]["1"]["name"])
self.assertEqual("low water pressure", bitmap["flags"]["2"]["name"])
self.assertEqual("gas/flame fault", bitmap["flags"]["3"]["name"])
self.assertEqual("air pressure fault", bitmap["flags"]["4"]["name"])
self.assertEqual("water overheated", bitmap["flags"]["5"]["name"])
def test9_check_bitmap_with_group(self):
req = GET(config.ui_host, config.ui_port, "/types/bitmap/MOD_CURRENT_BOILER_OT_OEM_FAULTS")
req.authorize(self.session)
response, content = req()
self.assertEqual(200, response.status)
result = json.loads(content)
self.assertEqual("success", result["status"])
bitmap = result["data"]
self.assertEqual("OT OEM Faults", bitmap["name"])
self.assertTrue("groups" in bitmap)
self.assertEqual(1, len(bitmap["groups"]))
self.assertEqual("OEM specific", bitmap["groups"][0]["name"])
self.assertEqual(8, len(bitmap["groups"][0]["mapping"]))
self.assertEqual(0, bitmap["groups"][0]["mapping"][0])
self.assertEqual(1, bitmap["groups"][0]["mapping"][1])
self.assertEqual(2, bitmap["groups"][0]["mapping"][2])
self.assertEqual(3, bitmap["groups"][0]["mapping"][3])
self.assertEqual(4, bitmap["groups"][0]["mapping"][4])
self.assertEqual(5, bitmap["groups"][0]["mapping"][5])
self.assertEqual(6, bitmap["groups"][0]["mapping"][6])
self.assertEqual(7, bitmap["groups"][0]["mapping"][7])
if __name__ == '__main__':
import sys
import taprunner
unittest.main(testRunner=taprunner.TAPTestRunner(stream = sys.stdout))
| bsd-3-clause | 227,601,853,684,804,400 | 32.271845 | 94 | 0.654897 | false |
fusionbox/django-darkknight | darkknight/forms.py | 1 | 4549 | import re
import os
from django import forms
from django.db import transaction
from django.utils.translation import ugettext as _
from django.forms.formsets import BaseFormSet
from localflavor.us.us_states import US_STATES
from django_countries import countries
from OpenSSL import crypto
from darkknight.models import CertificateSigningRequest, SSLKey
from darkknight.signals import key_created
KEY_SIZE = 2048
WWW = 'www.'
def creat(filename, mode):
fd = os.open(filename, os.O_CREAT | os.O_WRONLY | os.O_EXCL, mode)
return os.fdopen(fd, 'w')
class GenerateForm(forms.Form):
countryName = forms.ChoiceField(
choices=countries,
label=_("Country Name"),
initial='US',
)
stateOrProvinceName = forms.CharField(
label=_("State or province name"),
help_text=_("Enter its full name"),
)
localityName = forms.CharField(
label=_("Locality name"),
help_text=_("eg, city name"),
)
organizationName = forms.CharField(
label=_("Organisation Name"),
help_text=_("eg, company name"),
)
organizationalUnitName = forms.CharField(
label=_("Organisation Unit"),
help_text=_("Section, Department, ... eg, IT Departement"),
required=False,
)
commonName = forms.CharField(
label=_("Common Name"),
help_text=_("Domain name, including 'www.' if applicable. "
"eg, www.example.com")
)
emailAddress = forms.EmailField(
label=_("Email address"),
required=False,
)
subjectAlternativeNames = forms.CharField(
label=_('Subject Alternative Names (SAN)'),
required=False,
help_text=_('Please put one domain name per line'),
widget=forms.Textarea,
)
def clean_countryName(self):
country = self.cleaned_data['countryName']
if not re.match('^[a-z]{2}$', country, flags=re.IGNORECASE):
raise forms.ValidationError(_("Please enter a two-letters code"))
return country.upper()
def clean_subjectAlternativeNames(self):
sans = list(filter(bool, (
domain.strip() for domain in self.cleaned_data['subjectAlternativeNames'].splitlines()
)))
return sans
def clean(self):
cd = super(GenerateForm, self).clean()
if cd.get('countryName') == 'US':
try:
if cd['stateOrProvinceName'] not in set(i[1] for i in US_STATES):
self.add_error('stateOrProvinceName', 'State should be the full state name, eg "Colorado"')
except KeyError:
pass
return cd
class GenerateBaseFormSet(BaseFormSet):
def __init__(self, *args, **kwargs):
super(GenerateBaseFormSet, self).__init__(*args, **kwargs)
for form in self.forms:
form.empty_permitted = False
def generate(self):
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, KEY_SIZE)
key_obj = SSLKey()
csr_list = [self._generate_csr(pkey, key_obj, data) for data in self.cleaned_data]
with transaction.atomic():
key = crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)
assert not os.path.exists(key_obj.key_path)
with creat(key_obj.key_path, 0000) as f:
f.write(key)
key_obj.save()
CertificateSigningRequest.objects.bulk_create(csr_list)
key_created.send(sender=self, instance=key_obj, private_key=key)
return key_obj
def _generate_csr(self, pkey, key_obj, cleaned_data):
req = crypto.X509Req()
req.set_pubkey(pkey)
subject = req.get_subject()
for attr, value in cleaned_data.items():
if value:
if attr == 'subjectAlternativeNames':
req.add_extensions([
crypto.X509Extension('subjectAltName', False, ", ".join(
"DNS.{i}:{domain}".format(i=i, domain=domain)
for i, domain in enumerate(value)
))
])
else:
setattr(subject, attr, value)
cn = cleaned_data['commonName']
# Strip www. from the common name
if cn.startswith(WWW):
cn = cn[len(WWW):]
req.sign(pkey, "sha256")
csr = crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)
csr_obj = CertificateSigningRequest(domain=cn, key=key_obj, content=csr)
return csr_obj
| bsd-2-clause | -862,029,955,554,943,000 | 31.963768 | 111 | 0.591778 | false |
debian789/suescunet | apps/codigos/admin.py | 1 | 1478 | from django.contrib import admin
from models import mdl_codigos
from apps.elementos_comunes.models import mdl_lenguaje,mdl_sistema_operativo
from actions import export_as_csv
## crea el listado de opciones en el administrado !!!
class codigosAdmin(admin.ModelAdmin):
list_display = ('titulo','lenguaje','archivo','imagen_azul','esta_publicado','url')
list_filter = ('publicado','so','lenguaje')
search_fields = ('titulo','codigo')
list_editable = ('archivo',)
actions = [export_as_csv]
raw_id_fields = ('lenguaje',)
filter_horizontal = ('so',)
def imagen_azul(self,obj):
url = obj.imagen_azul_publicado()
tag = '<img src="%s">'% url
return tag
imagen_azul.allow_tags = True #permite que tenga tag html
imagen_azul.admin_order_field = 'publicado' #permite ordenarlos por publicado
class CodigosInline(admin.StackedInline):
model = mdl_codigos
extra = 1
class LenguajesAdmin(admin.ModelAdmin):
actions = [export_as_csv]
inlines = [CodigosInline]
#class SitemaOperativoAdmin(admin.ModelAdmin):
# fiter_vertical = ('so',)
#class AgregadorAdmin(admin.ModelAdmin):
# filter_vertical = ('enlaces',)
admin.site.register(mdl_sistema_operativo)
#admin.site.register(Agregador,AgregadorAdmin)
#admin.site.register(mdl_sistema_operativo)
#admin.site.register(mdl_lenguaje,LenguajesAdmin)
admin.site.register(mdl_lenguaje)
admin.site.register(mdl_codigos,codigosAdmin)
#admin.site.register(soAdmin)
#admin.site.register(mdl_lenguaje,LenguajesAdmin) | gpl-2.0 | 5,772,595,188,927,756,000 | 31.152174 | 85 | 0.747632 | false |
tliron/ronin | ronin/sdl/__init__.py | 1 | 4444 | # Copyright 2016-2018 Tal Liron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from ..contexts import current_context
from ..extensions import Extension
from ..pkg_config import _add_cflags_to_executor, _add_libs_to_executor
from ..utils.strings import stringify, bool_stringify, UNESCAPED_STRING_RE
from ..utils.platform import which
from subprocess import check_output, CalledProcessError
DEFAULT_SDL_CONFIG_COMMAND = 'sdl2-config'
def configure_sdl(config_command=None,
static=None,
prefix=None,
exec_prefix=None):
"""
Configures the current context's `SDL <https://www.libsdl.org/>`__ support.
:param config_command: config command; defaults to "sdl2-config"
:type config_command: str or ~types.FunctionType
:param static: whether to link statically; defaults to False
:type static: bool
:param prefix: sdl-config prefix
:type prefix: str or ~types.FunctionType
:param exec_prefix: sdl-config exec-prefix
:type exec_prefix: str or ~types.FunctionType
"""
with current_context(False) as ctx:
ctx.sdl.config_command = config_command or DEFAULT_SDL_CONFIG_COMMAND
ctx.sdl.static = static
ctx.sdl.prefix = prefix
ctx.sdl.exec_prefix = exec_prefix
class SDL(Extension):
"""
The `SDL <https://www.libsdl.org/>`__ library, configured using the sdl2-config tool that
comes with SDL's development distribution. Supports gcc-like executors.
Note that you may also use :class:`~ronin.pkg_config.Package` to use SDL. However, this tool
offers some special options you might need.
"""
def __init__(self, command=None, static=None, prefix=None, exec_prefix=None):
"""
:param command: ``sdl-config`` command; defaults to the context's ``sdl.config_command``
:type command: str or ~types.FunctionType
:param static: whether to link statically; defaults to the context's ``sdl.config_static``
:type static: bool
:param prefix: sdl-config prefix; defaults to the context's ``sdl.prefix``
:type prefix: str or ~types.FunctionType
:param exec_prefix: sdl-config exec-prefix; defaults to the context's ``sdl.exec_prefix``
:type exec_prefix: str or ~types.FunctionType
"""
super(SDL, self).__init__()
self.command = command
self.static = static
self.prefix = prefix
self.exec_prefix = exec_prefix
def apply_to_executor_gcc_compile(self, executor):
_add_cflags_to_executor(executor, self._parse('--cflags'))
def apply_to_executor_gcc_link(self, executor):
with current_context() as ctx:
sdl_config_static = bool_stringify(ctx.fallback(self.static, 'sdl.static', False))
_add_libs_to_executor(executor, self._parse('--static-libs'
if sdl_config_static else '--libs'))
def _parse(self, flags):
with current_context() as ctx:
sdl_config_command = which(ctx.fallback(self.command, 'sdl.config_command',
DEFAULT_SDL_CONFIG_COMMAND))
sdl_config_prefix = stringify(ctx.fallback(self.prefix, 'sdl.prefix'))
sdl_config_exec_prefix = stringify(ctx.fallback(self.exec_prefix, 'sdl.exec_prefix'))
args = [sdl_config_command, flags]
if sdl_config_prefix is not None:
args.append('--prefix={}'.format(sdl_config_prefix))
if sdl_config_exec_prefix is not None:
args.append('--exec-prefix={}'.format(sdl_config_exec_prefix))
try:
output = check_output(args).decode().strip()
return UNESCAPED_STRING_RE.split(output)
except CalledProcessError:
raise Exception("failed to run: '{}'".format(' '.join(args)))
| apache-2.0 | -7,181,409,825,519,541,000 | 41.730769 | 98 | 0.649865 | false |
jeremiah-c-leary/vhdl-style-guide | vsg/tests/sequential/test_rule_006.py | 1 | 1193 |
import os
import unittest
from vsg.rules import sequential
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_006_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_006_test_input.fixed.vhd'), lExpected)
class test_sequential_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_006(self):
oRule = sequential.rule_006()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'sequential')
self.assertEqual(oRule.identifier, '006')
lExpected = [19, 21]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_006(self):
oRule = sequential.rule_006()
oRule.fixable = True
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| gpl-3.0 | 3,324,378,810,418,496,500 | 24.934783 | 106 | 0.678122 | false |
tvotyakov/codeeval | easy/hidden-digits/code.py | 1 | 1115 | #!python3
def hidden_digits(in_str):
'''(string) -> string
Finds all visible and hidden digits in the in_str string
and return them out in order of their appearance as one string.
>>> hidden_digits('abcdefghik')
'012345678'
>>> hidden_digits('Xa,}A#5N}{xOBwYBHIlH,#W')
'05'
>>> hidden_digits("(ABW>'yy^'M{X-K}q,")
'NONE'
>>> hidden_digits('6240488')
'6240488'
'''
hidden_digit_map = 'abcdefghij'
result = ''
for ch in in_str:
if ch.isdigit():
result += ch
else:
pos = hidden_digit_map.find(ch)
if pos != -1:
result += str(pos)
return 'NONE' if result == '' else result
if __name__ == '__main__':
import sys
if (len(sys.argv) <= 1):
import doctest
doctest.testmod()
else:
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test = test.rstrip('\n')
if not test: continue # ignore an empty line
print(hidden_digits(test))
test_cases.close()
| gpl-2.0 | 8,614,142,024,831,928,000 | 25.875 | 67 | 0.510314 | false |
acjones617/k-means | lib/exec.py | 1 | 1524 | import normalize as n
import cluster as c
import jsonpickle as j
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument('matrix')
parser.add_argument('options')
args = parser.parse_args()
matrix = ast.literal_eval(args.matrix)
options = ast.literal_eval(args.options)
# steps:
# 1. normalize data
# 2. randomly pick center points
# 3. assign points to a cluster
# 4. re-pick cluster center points
# 5. repeat
# 6. assign clusters to original data
# 7. send back to node
# steps:
# 1. normalize data
normal_matrix = n.normalize(matrix)
# 2. randomly pick cluster center points
cluster_centers = c.init(normal_matrix, options['clusters'])
# 3. assign points to a cluster
# 4. re-pick cluster center points
# 5. repeat steps 3 and 4
clusters = []
has_converged = False
for i in range(options['iterations']):
old_clusters = clusters
cluster_points, clusters = c.assign_points(normal_matrix, cluster_centers)
if c.converged(old_clusters, clusters):
has_converged = True
break
cluster_centers = c.reselect_centers(cluster_points, options['clusters'])
# final assignment of points if never converged
if (not has_converged):
cluster_points, clusters = c.assign_points(normal_matrix, cluster_centers)
# 6. assign clusters to original data
final_matrix = n.reassign(matrix, cluster_points)
# 7. send back to node - need to convert cluster centers to list first
print j.encode({
'finalMatrix' : final_matrix,
'clusterCenters': cluster_centers
})
| mit | 4,464,520,530,550,727,700 | 24.4 | 78 | 0.727034 | false |
mlperf/training_results_v0.7 | Google/benchmarks/ssd/implementations/ssd-research-TF-tpu-v4-512/ssd_main.py | 1 | 11062 | # Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script for SSD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import multiprocessing
import sys
import threading
from absl import app
import tensorflow.compat.v1 as tf
from REDACTED.mlp_log import mlp_log
from REDACTED.ssd import coco_metric
from REDACTED.ssd import dataloader
from REDACTED.ssd import ssd_constants
from REDACTED.ssd import ssd_model
from REDACTED.util import train_and_eval_runner
# copybara:strip_begin
from REDACTED.REDACTED.multiprocessing import REDACTEDprocess
# copybara:strip_end
tf.flags.DEFINE_string(
'resnet_checkpoint',
'/REDACTED/mb-d/home/tpu-perf-team/ssd_checkpoint/resnet34_bs2048_2',
'Location of the ResNet checkpoint to use for model '
'initialization.')
tf.flags.DEFINE_string('hparams', '',
'Comma separated k=v pairs of hyperparameters.')
tf.flags.DEFINE_integer(
'num_shards', default=8, help='Number of shards (TPU cores) for '
'training.')
tf.flags.DEFINE_integer('train_batch_size', 64, 'training batch size')
tf.flags.DEFINE_integer('eval_batch_size', 1, 'evaluation batch size')
tf.flags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '
'evaluation.')
tf.flags.DEFINE_integer(
'iterations_per_loop', 1000, 'Number of iterations per TPU training loop')
tf.flags.DEFINE_string(
'training_file_pattern',
'REDACTEDtrain*',
'Glob for training data files (e.g., COCO train - minival set)')
tf.flags.DEFINE_string(
'validation_file_pattern',
'REDACTEDval*',
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
tf.flags.DEFINE_bool(
'use_fake_data', False,
'Use fake data to reduce the input preprocessing overhead (for unit tests)')
tf.flags.DEFINE_string(
'val_json_file',
'REDACTEDinstances_val2017.json',
'COCO validation JSON containing golden bounding boxes.')
tf.flags.DEFINE_integer('num_examples_per_epoch', 118287,
'Number of examples in one epoch')
tf.flags.DEFINE_integer('num_epochs', 64, 'Number of epochs for training')
tf.flags.DEFINE_multi_integer(
'input_partition_dims',
default=None,
help=('Number of partitions on each dimension of the input. Each TPU core'
' processes a partition of the input image in parallel using spatial'
' partitioning.'))
tf.flags.DEFINE_bool('run_cocoeval', True, 'Whether to run cocoeval')
FLAGS = tf.flags.FLAGS
_STOP = -1
def construct_run_config(iterations_per_loop):
"""Construct the run config."""
# Parse hparams
hparams = ssd_model.default_hparams()
hparams.parse(FLAGS.hparams)
return dict(
hparams.values(),
num_shards=FLAGS.num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
resnet_checkpoint=FLAGS.resnet_checkpoint,
val_json_file=FLAGS.val_json_file,
model_dir=FLAGS.model_dir,
iterations_per_loop=iterations_per_loop,
steps_per_epoch=FLAGS.num_examples_per_epoch // FLAGS.train_batch_size,
eval_samples=FLAGS.eval_samples,
transpose_input=False if FLAGS.input_partition_dims is not None else True,
use_spatial_partitioning=True
if FLAGS.input_partition_dims is not None else False,
)
# copybara:strip_begin
def REDACTED_predict_post_processing():
"""REDACTED batch-processes the predictions."""
q_in, q_out = REDACTEDprocess.get_user_data()
predict_post_processing(q_in, q_out)
# copybara:strip_end
def predict_post_processing(q_in, q_out):
"""Run post-processing on CPU for predictions."""
coco_gt = coco_metric.create_coco(FLAGS.val_json_file, use_cpp_extension=True)
current_step, predictions = q_in.get()
while current_step != _STOP and q_out is not None:
q_out.put((current_step,
coco_metric.compute_map(
predictions,
coco_gt,
use_cpp_extension=True,
nms_on_tpu=True)))
current_step, predictions = q_in.get()
def main(argv):
del argv # Unused.
params = construct_run_config(FLAGS.iterations_per_loop)
mlp_log.mlperf_print(key='cache_clear', value=True)
mlp_log.mlperf_print(key='init_start', value=None)
mlp_log.mlperf_print('global_batch_size', FLAGS.train_batch_size)
mlp_log.mlperf_print('opt_base_learning_rate', params['base_learning_rate'])
mlp_log.mlperf_print(
'opt_learning_rate_decay_boundary_epochs',
[params['first_lr_drop_epoch'], params['second_lr_drop_epoch']])
mlp_log.mlperf_print('opt_weight_decay', params['weight_decay'])
mlp_log.mlperf_print(
'model_bn_span', FLAGS.train_batch_size // FLAGS.num_shards *
params['distributed_group_size'])
mlp_log.mlperf_print('max_samples', ssd_constants.NUM_CROP_PASSES)
mlp_log.mlperf_print('train_samples', FLAGS.num_examples_per_epoch)
mlp_log.mlperf_print('eval_samples', FLAGS.eval_samples)
params['batch_size'] = FLAGS.train_batch_size // FLAGS.num_shards
input_partition_dims = FLAGS.input_partition_dims
train_steps = FLAGS.num_epochs * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
eval_steps = int(math.ceil(FLAGS.eval_samples / FLAGS.eval_batch_size))
runner = train_and_eval_runner.TrainAndEvalRunner(FLAGS.iterations_per_loop,
train_steps, eval_steps,
FLAGS.num_shards)
train_input_fn = dataloader.SSDInputReader(
FLAGS.training_file_pattern,
params['transpose_input'],
is_training=True,
use_fake_data=FLAGS.use_fake_data,
params=params)
eval_input_fn = dataloader.SSDInputReader(
FLAGS.validation_file_pattern,
is_training=False,
use_fake_data=FLAGS.use_fake_data,
distributed_eval=True,
count=eval_steps * FLAGS.eval_batch_size,
params=params)
def init_fn():
tf.train.init_from_checkpoint(params['resnet_checkpoint'], {
'resnet/': 'resnet%s/' % ssd_constants.RESNET_DEPTH,
})
if FLAGS.run_cocoeval:
# copybara:strip_begin
q_in, q_out = REDACTEDprocess.get_user_data()
processes = [
REDACTEDprocess.Process(target=REDACTED_predict_post_processing) for _ in range(4)
]
# copybara:strip_end_and_replace_begin
# q_in = multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE)
# q_out = multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE)
# processes = [
# multiprocessing.Process(
# target=predict_post_processing, args=(q_in, q_out))
# for _ in range(self.num_multiprocessing_workers)
# ]
# copybara:replace_end
for p in processes:
p.start()
def log_eval_results_fn():
"""Print out MLPerf log."""
result = q_out.get()
success = False
while result[0] != _STOP:
if not success:
steps_per_epoch = (
FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
epoch = (result[0] + FLAGS.iterations_per_loop) // steps_per_epoch
mlp_log.mlperf_print(
'eval_accuracy',
result[1]['COCO/AP'],
metadata={'epoch_num': epoch})
mlp_log.mlperf_print('eval_stop', None, metadata={'epoch_num': epoch})
if result[1]['COCO/AP'] > ssd_constants.EVAL_TARGET:
success = True
mlp_log.mlperf_print(
'run_stop', None, metadata={'status': 'success'})
result = q_out.get()
if not success:
mlp_log.mlperf_print('run_stop', None, metadata={'status': 'abort'})
log_eval_result_thread = threading.Thread(target=log_eval_results_fn)
log_eval_result_thread.start()
runner.initialize(train_input_fn, eval_input_fn,
functools.partial(ssd_model.ssd_model_fn,
params), FLAGS.train_batch_size,
FLAGS.eval_batch_size, input_partition_dims, init_fn)
mlp_log.mlperf_print('init_stop', None)
mlp_log.mlperf_print('run_start', None)
def eval_init_fn(cur_step):
"""Executed before every eval."""
steps_per_epoch = FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
mlp_log.mlperf_print(
'block_start',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': FLAGS.iterations_per_loop // steps_per_epoch
})
mlp_log.mlperf_print(
'eval_start',
None,
metadata={
'epoch_num': epoch + FLAGS.iterations_per_loop // steps_per_epoch
})
def eval_finish_fn(cur_step, eval_output, _):
steps_per_epoch = FLAGS.num_examples_per_epoch // FLAGS.train_batch_size
epoch = cur_step // steps_per_epoch
mlp_log.mlperf_print(
'block_stop',
None,
metadata={
'first_epoch_num': epoch,
'epoch_count': FLAGS.iterations_per_loop // steps_per_epoch
})
if FLAGS.run_cocoeval:
q_in.put((cur_step, eval_output['detections']))
runner.train_and_eval(eval_init_fn, eval_finish_fn)
if FLAGS.run_cocoeval:
for _ in processes:
q_in.put((_STOP, None))
for p in processes:
try:
p.join(timeout=10)
except Exception: # pylint: disable=broad-except
pass
q_out.put((_STOP, None))
log_eval_result_thread.join()
# Clear out all the queues to avoid deadlock.
while not q_out.empty():
q_out.get()
while not q_in.empty():
q_in.get()
if __name__ == '__main__':
# copybara:strip_begin
user_data = (multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE),
multiprocessing.Queue(maxsize=ssd_constants.QUEUE_SIZE))
in_compile_test = False
for arg in sys.argv:
if arg == '--xla_jf_exit_process_on_compilation_success=true':
in_compile_test = True
break
if in_compile_test:
# Exiting from XLA's C extension skips REDACTEDprocess's multiprocessing clean
# up. Don't use REDACTED process when xla is in compilation only mode.
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
else:
with REDACTEDprocess.main_handler(user_data=user_data):
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
# copybara:strip_end
# copybara:insert tf.logging.set_verbosity(tf.logging.INFO)
# copybara:insert app.run(main)
| apache-2.0 | 66,469,063,065,002,300 | 35.388158 | 90 | 0.653679 | false |
openwebcc/ba | maintenance/rawdata/als/hef/071011_hef14_fix_ala.py | 1 | 2755 | #!/usr/bin/python
#
# fix incorrect syntax for echoes in .ala files of 071011_hef14
#
import re
import os
import sys
sys.path.append('/home/institut/rawdata/www/lib')
from Laser.Util import las
if __name__ == '__main__':
""" fix incorrect return number and number of returns for given pulse syntax """
import argparse
parser = argparse.ArgumentParser(description='fix incorrect syntax for echoes in .ala files')
parser.add_argument('--ala', dest='ala', required=True, help='path to input .ala file')
parser.add_argument('--out', dest='out', required=True, help='path to cleaned output file')
args = parser.parse_args()
# init utility library
util = las.rawdata()
# open output file
o = open(args.out,'w')
# loop through input file, read pairs of line and clean up
with open(args.ala) as f:
prev_line = None
curr_line = None
for line in f:
if not prev_line:
prev_line = util.parse_line(line)
continue
else:
curr_line = util.parse_line(line)
# alter previous and current line in one turn if gpstimes are the same
if prev_line[0] == curr_line[0]:
# set return numbers of previous echo
prev_line[-2] = '1'
prev_line[-1] = '2'
# set return numbers of current echo
curr_line[-2] = '2'
curr_line[-1] = '2'
# write out both lines
o.write('%s\n' % ' '.join(prev_line))
o.write('%s\n' % ' '.join(curr_line))
# set previous line to None
prev_line = None
continue
else:
# write previous line with 1 1 as no second echo is present
prev_line[-2] = '1'
prev_line[-1] = '1'
o.write('%s\n' % ' '.join(prev_line))
# assign current line as next previous line
prev_line = curr_line[:]
# write last record from loop if any
if prev_line:
o.write('%s\n' % ' '.join(prev_line))
# create log file
with open("%s.txt" % args.out, "w") as log:
log.write("the corresponding file was created with %s\n" % __file__)
log.write("it contains fixed return numbers for first and second returns\n")
log.write("\n")
log.write("input file with incorrect return numbers: %s\n" % re.sub("raw/str/ala","raw/bad/ala",args.ala[:-4]) )
log.write("output file with correct return numbers: %s\n" % args.out)
log.write("\n")
# close cleaned output file
o.close()
| gpl-3.0 | -4,989,434,933,116,422,000 | 33.873418 | 120 | 0.536116 | false |
google/retiming | models/networks.py | 1 | 16657 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from third_party.models.networks import init_net
###############################################################################
# Helper Functions
###############################################################################
def define_LNR(nf=64, texture_channels=16, texture_res=16, n_textures=25, gpu_ids=[]):
"""Create a layered neural renderer.
Parameters:
nf (int) -- the number of channels in the first/last conv layers
texture_channels (int) -- the number of channels in the neural texture
texture_res (int) -- the size of each individual texture map
n_textures (int) -- the number of individual texture maps
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a layered neural rendering model.
"""
net = LayeredNeuralRenderer(nf, texture_channels, texture_res, n_textures)
return init_net(net, gpu_ids)
def define_kp2uv(nf=64, gpu_ids=[]):
"""Create a keypoint-to-UV model.
Parameters:
nf (int) -- the number of channels in the first/last conv layers
Returns a keypoint-to-UV model.
"""
net = kp2uv(nf)
return init_net(net, gpu_ids)
def cal_alpha_reg(prediction, lambda_alpha_l1, lambda_alpha_l0):
"""Calculate the alpha regularization term.
Parameters:
prediction (tensor) - - composite of predicted alpha layers
lambda_alpha_l1 (float) - - weight for the L1 regularization term
lambda_alpha_l0 (float) - - weight for the L0 regularization term
Returns the alpha regularization loss term
"""
assert prediction.max() <= 1.
assert prediction.min() >= 0.
loss = 0.
if lambda_alpha_l1 > 0:
loss += lambda_alpha_l1 * torch.mean(prediction)
if lambda_alpha_l0 > 0:
# Pseudo L0 loss using a squished sigmoid curve.
l0_prediction = (torch.sigmoid(prediction * 5.0) - 0.5) * 2.0
loss += lambda_alpha_l0 * torch.mean(l0_prediction)
return loss
##############################################################################
# Classes
##############################################################################
class MaskLoss(nn.Module):
"""Define the loss which encourages the predicted alpha matte to match the mask (trimap)."""
def __init__(self):
super(MaskLoss, self).__init__()
self.loss = nn.L1Loss(reduction='none')
def __call__(self, prediction, target):
"""Calculate loss given predicted alpha matte and trimap.
Balance positive and negative regions. Exclude 'unknown' region from loss.
Parameters:
prediction (tensor) - - predicted alpha
target (tensor) - - trimap
Returns: the computed loss
"""
mask_err = self.loss(prediction, target)
pos_mask = F.relu(target)
neg_mask = F.relu(-target)
pos_mask_loss = (pos_mask * mask_err).sum() / (1 + pos_mask.sum())
neg_mask_loss = (neg_mask * mask_err).sum() / (1 + neg_mask.sum())
loss = .5 * (pos_mask_loss + neg_mask_loss)
return loss
class ConvBlock(nn.Module):
"""Helper module consisting of a convolution, optional normalization and activation, with padding='same'."""
def __init__(self, conv, in_channels, out_channels, ksize=4, stride=1, dil=1, norm=None, activation='relu'):
"""Create a conv block.
Parameters:
conv (convolutional layer) - - the type of conv layer, e.g. Conv2d, ConvTranspose2d
in_channels (int) - - the number of input channels
in_channels (int) - - the number of output channels
ksize (int) - - the kernel size
stride (int) - - stride
dil (int) - - dilation
norm (norm layer) - - the type of normalization layer, e.g. BatchNorm2d, InstanceNorm2d
activation (str) -- the type of activation: relu | leaky | tanh | none
"""
super(ConvBlock, self).__init__()
self.k = ksize
self.s = stride
self.d = dil
self.conv = conv(in_channels, out_channels, ksize, stride=stride, dilation=dil)
if norm is not None:
self.norm = norm(out_channels)
else:
self.norm = None
if activation == 'leaky':
self.activation = nn.LeakyReLU(0.2)
elif activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'tanh':
self.activation = nn.Tanh()
else:
self.activation = None
def forward(self, x):
"""Forward pass. Compute necessary padding and cropping because pytorch doesn't have pad=same."""
height, width = x.shape[-2:]
if isinstance(self.conv, nn.modules.ConvTranspose2d):
desired_height = height * self.s
desired_width = width * self.s
pady = 0
padx = 0
else:
# o = [i + 2*p - k - (k-1)*(d-1)]/s + 1
# padding = .5 * (stride * (output-1) + (k-1)(d-1) + k - input)
desired_height = height // self.s
desired_width = width // self.s
pady = .5 * (self.s * (desired_height - 1) + (self.k - 1) * (self.d - 1) + self.k - height)
padx = .5 * (self.s * (desired_width - 1) + (self.k - 1) * (self.d - 1) + self.k - width)
x = F.pad(x, [int(np.floor(padx)), int(np.ceil(padx)), int(np.floor(pady)), int(np.ceil(pady))])
x = self.conv(x)
if x.shape[-2] != desired_height or x.shape[-1] != desired_width:
cropy = x.shape[-2] - desired_height
cropx = x.shape[-1] - desired_width
x = x[:, :, int(np.floor(cropy / 2.)):-int(np.ceil(cropy / 2.)),
int(np.floor(cropx / 2.)):-int(np.ceil(cropx / 2.))]
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class ResBlock(nn.Module):
"""Define a residual block."""
def __init__(self, channels, ksize=4, stride=1, dil=1, norm=None, activation='relu'):
"""Initialize the residual block, which consists of 2 conv blocks with a skip connection."""
super(ResBlock, self).__init__()
self.convblock1 = ConvBlock(nn.Conv2d, channels, channels, ksize=ksize, stride=stride, dil=dil, norm=norm,
activation=activation)
self.convblock2 = ConvBlock(nn.Conv2d, channels, channels, ksize=ksize, stride=stride, dil=dil, norm=norm,
activation=None)
def forward(self, x):
identity = x
x = self.convblock1(x)
x = self.convblock2(x)
x += identity
return x
class kp2uv(nn.Module):
"""UNet architecture for converting keypoint image to UV map.
Same person UV map format as described in https://arxiv.org/pdf/1802.00434.pdf.
"""
def __init__(self, nf=64):
super(kp2uv, self).__init__(),
self.encoder = nn.ModuleList([
ConvBlock(nn.Conv2d, 3, nf, ksize=4, stride=2),
ConvBlock(nn.Conv2d, nf, nf * 2, ksize=4, stride=2, norm=nn.InstanceNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 2, nf * 4, ksize=4, stride=2, norm=nn.InstanceNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=2, norm=nn.InstanceNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=2, norm=nn.InstanceNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=3, stride=1, norm=nn.InstanceNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=3, stride=1, norm=nn.InstanceNorm2d, activation='leaky')])
self.decoder = nn.ModuleList([
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 4, ksize=4, stride=2, norm=nn.InstanceNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 4, ksize=4, stride=2, norm=nn.InstanceNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 2, ksize=4, stride=2, norm=nn.InstanceNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 2 * 2, nf, ksize=4, stride=2, norm=nn.InstanceNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 2, nf, ksize=4, stride=2, norm=nn.InstanceNorm2d)])
# head to predict body part class (25 classes - 24 body parts, 1 background.)
self.id_pred = ConvBlock(nn.Conv2d, nf + 3, 25, ksize=3, stride=1, activation='none')
# head to predict UV coordinates for every body part class
self.uv_pred = ConvBlock(nn.Conv2d, nf + 3, 2 * 24, ksize=3, stride=1, activation='tanh')
def forward(self, x):
"""Forward pass through UNet, handling skip connections.
Parameters:
x (tensor) - - rendered keypoint image, shape [B, 3, H, W]
Returns:
x_id (tensor): part id class probabilities
x_uv (tensor): uv coordinates for each part id
"""
skips = [x]
for i, layer in enumerate(self.encoder):
x = layer(x)
if i < 5:
skips.append(x)
for layer in self.decoder:
x = torch.cat((x, skips.pop()), 1)
x = layer(x)
x = torch.cat((x, skips.pop()), 1)
x_id = self.id_pred(x)
x_uv = self.uv_pred(x)
return x_id, x_uv
class LayeredNeuralRenderer(nn.Module):
"""Layered Neural Rendering model for video decomposition.
Consists of neural texture, UNet, upsampling module.
"""
def __init__(self, nf=64, texture_channels=16, texture_res=16, n_textures=25):
super(LayeredNeuralRenderer, self).__init__(),
"""Initialize layered neural renderer.
Parameters:
nf (int) -- the number of channels in the first/last conv layers
texture_channels (int) -- the number of channels in the neural texture
texture_res (int) -- the size of each individual texture map
n_textures (int) -- the number of individual texture maps
"""
# Neural texture is implemented as 'n_textures' concatenated horizontally
self.texture = nn.Parameter(torch.randn(1, texture_channels, texture_res, n_textures * texture_res))
# Define UNet
self.encoder = nn.ModuleList([
ConvBlock(nn.Conv2d, texture_channels + 1, nf, ksize=4, stride=2),
ConvBlock(nn.Conv2d, nf, nf * 2, ksize=4, stride=2, norm=nn.BatchNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 2, nf * 4, ksize=4, stride=2, norm=nn.BatchNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=2, norm=nn.BatchNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=2, norm=nn.BatchNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=1, dil=2, norm=nn.BatchNorm2d, activation='leaky'),
ConvBlock(nn.Conv2d, nf * 4, nf * 4, ksize=4, stride=1, dil=2, norm=nn.BatchNorm2d, activation='leaky')])
self.decoder = nn.ModuleList([
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 4, ksize=4, stride=2, norm=nn.BatchNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 4, ksize=4, stride=2, norm=nn.BatchNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 4 * 2, nf * 2, ksize=4, stride=2, norm=nn.BatchNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 2 * 2, nf, ksize=4, stride=2, norm=nn.BatchNorm2d),
ConvBlock(nn.ConvTranspose2d, nf * 2, nf, ksize=4, stride=2, norm=nn.BatchNorm2d)])
self.final_rgba = ConvBlock(nn.Conv2d, nf, 4, ksize=4, stride=1, activation='tanh')
# Define upsampling block, which outputs a residual
upsampling_ic = texture_channels + 5 + nf
self.upsample_block = nn.Sequential(
ConvBlock(nn.Conv2d, upsampling_ic, nf, ksize=3, stride=1, norm=nn.InstanceNorm2d),
ResBlock(nf, ksize=3, stride=1, norm=nn.InstanceNorm2d),
ResBlock(nf, ksize=3, stride=1, norm=nn.InstanceNorm2d),
ResBlock(nf, ksize=3, stride=1, norm=nn.InstanceNorm2d),
ConvBlock(nn.Conv2d, nf, 4, ksize=3, stride=1, activation='none'))
def render(self, x):
"""Pass inputs for a single layer through UNet.
Parameters:
x (tensor) - - sampled texture concatenated with person IDs
Returns RGBA for the input layer and the final feature maps.
"""
skips = [x]
for i, layer in enumerate(self.encoder):
x = layer(x)
if i < 5:
skips.append(x)
for layer in self.decoder:
x = torch.cat((x, skips.pop()), 1)
x = layer(x)
rgba = self.final_rgba(x)
return rgba, x
def forward(self, uv_map, id_layers, uv_map_upsampled=None, crop_params=None):
"""Forward pass through layered neural renderer.
Steps:
1. Sample from the neural texture using uv_map
2. Input uv_map and id_layers into UNet
2a. If doing upsampling, then pass upsampled inputs and results through upsampling module
3. Composite RGBA outputs.
Parameters:
uv_map (tensor) - - UV maps for all layers, with shape [B, (2*L), H, W]
id_layers (tensor) - - person ID for all layers, with shape [B, L, H, W]
uv_map_upsampled (tensor) - - upsampled UV maps to input to upsampling module (if None, skip upsampling)
crop_params
"""
b_sz = uv_map.shape[0]
n_layers = uv_map.shape[1] // 2
texture = self.texture.repeat(b_sz, 1, 1, 1)
composite = None
layers = []
sampled_textures = []
for i in range(n_layers):
# Get RGBA for this layer.
uv_map_i = uv_map[:, i * 2:(i + 1) * 2, ...]
uv_map_i = uv_map_i.permute(0, 2, 3, 1)
sampled_texture = F.grid_sample(texture, uv_map_i, mode='bilinear', padding_mode='zeros')
inputs = torch.cat([sampled_texture, id_layers[:, i:i + 1]], 1)
rgba, last_feat = self.render(inputs)
if uv_map_upsampled is not None:
uv_map_up_i = uv_map_upsampled[:, i * 2:(i + 1) * 2, ...]
uv_map_up_i = uv_map_up_i.permute(0, 2, 3, 1)
sampled_texture_up = F.grid_sample(texture, uv_map_up_i, mode='bilinear', padding_mode='zeros')
id_layers_up = F.interpolate(id_layers[:, i:i + 1], size=sampled_texture_up.shape[-2:],
mode='bilinear')
inputs_up = torch.cat([sampled_texture_up, id_layers_up], 1)
upsampled_size = inputs_up.shape[-2:]
rgba = F.interpolate(rgba, size=upsampled_size, mode='bilinear')
last_feat = F.interpolate(last_feat, size=upsampled_size, mode='bilinear')
if crop_params is not None:
starty, endy, startx, endx = crop_params
rgba = rgba[:, :, starty:endy, startx:endx]
last_feat = last_feat[:, :, starty:endy, startx:endx]
inputs_up = inputs_up[:, :, starty:endy, startx:endx]
rgba_residual = self.upsample_block(torch.cat((rgba, inputs_up, last_feat), 1))
rgba += .01 * rgba_residual
rgba = torch.clamp(rgba, -1, 1)
sampled_texture = sampled_texture_up
# Update the composite with this layer's RGBA output
if composite is None:
composite = rgba
else:
alpha = rgba[:, 3:4] * .5 + .5
composite = rgba * alpha + composite * (1.0 - alpha)
layers.append(rgba)
sampled_textures.append(sampled_texture)
outputs = {
'reconstruction': composite,
'layers': torch.stack(layers, 1),
'sampled texture': sampled_textures, # for debugging
}
return outputs
| apache-2.0 | 2,665,017,518,263,958,500 | 44.386921 | 117 | 0.576514 | false |
samirelanduk/pygtop | pygtop/targets.py | 1 | 14667 | """Contains target-specific objects and functions."""
from . import gtop
from . import pdb
from .interactions import Interaction, get_interaction_by_id
from .exceptions import NoSuchTargetError, NoSuchTargetFamilyError
from .shared import DatabaseLink, Gene, strip_html
def get_target_by_id(target_id):
"""Returns a Target object of the target with the given ID.
:param int target_id: The GtoP ID of the Target desired.
:rtype: :py:class:`Target`
:raises: :class:`.NoSuchTargetError`: if no such target exists in the database"""
if not isinstance(target_id, int):
raise TypeError("target_id must be int, not '%s'" % str(target_id))
json_data = gtop.get_json_from_gtop("targets/%i" % target_id)
if json_data:
return Target(json_data)
else:
raise NoSuchTargetError("There is no target with ID %i" % target_id)
def get_all_targets():
"""Returns a list of all targets in the Guide to PHARMACOLOGY database. This
can take a few seconds.
:returns: list of :py:class:`Target` objects"""
json_data = gtop.get_json_from_gtop("targets")
return [Target(t) for t in json_data]
def get_targets_by(criteria):
"""Get all targets which specify the criteria dictionary.
:param dict criteria: A dictionary of `field=value` pairs. See the\
`GtoP target web services page <http://www.guidetopharmacology.org/\
webServices.jsp#targets>`_ for key/value pairs which can be supplied.
:returns: list of :py:class:`Target` objects."""
if not isinstance(criteria, dict):
raise TypeError("criteria must be dict, not '%s'" % str(criteria))
search_string = "&".join(["%s=%s" % (key, criteria[key]) for key in criteria])
json_data = gtop.get_json_from_gtop("targets?%s" % search_string)
if json_data:
return [Target(t) for t in json_data]
else:
return []
def get_target_by_name(name):
"""Returns the target which matches the name given.
:param str name: The name of the target to search for. Note that synonyms \
will not be searched.
:rtype: :py:class:`Target`
:raises: :class:`.NoSuchTargetError`: if no such target exists in the database."""
if not isinstance(name, str):
raise TypeError("name must be str, not '%s'" % str(name))
targets = get_targets_by({"name": name})
if targets:
return targets[0]
else:
raise NoSuchTargetError("There is no target with name %s" % name)
def get_target_family_by_id(family_id):
"""Returns a TargetFamily object of the family with the given ID.
:param int family_id: The GtoP ID of the TargetFamily desired.
:rtype: :py:class:`TargetFamily`
:raises: :class:`.NoSuchTargetFamilyError`: if no such family exists in the database"""
if not isinstance(family_id, int):
raise TypeError("family_id must be int, not '%s'" % str(family_id))
json_data = gtop.get_json_from_gtop("targets/families/%i" % family_id)
if json_data:
return TargetFamily(json_data)
else:
raise NoSuchTargetFamilyError("There is no Target Family with ID %i" % family_id)
def get_all_target_families():
"""Returns a list of all target families in the Guide to PHARMACOLOGY database.
:returns: list of :py:class:`TargetFamily` objects"""
json_data = gtop.get_json_from_gtop("targets/families")
return [TargetFamily(f) for f in json_data]
class Target:
"""A Guide to PHARMACOLOGY target object.
:param json_data: A dictionary obtained from the web services."""
def __init__(self, json_data):
self.json_data = json_data
self._target_id = json_data["targetId"]
self._name = json_data["name"]
self._abbreviation = json_data["abbreviation"]
self._systematic_name = json_data["systematicName"]
self._target_type = json_data["type"]
self._family_ids = json_data["familyIds"]
self._subunit_ids = json_data["subunitIds"]
self._complex_ids = json_data["complexIds"]
def __repr__(self):
return "<Target %i (%s)>" % (self._target_id, self._name)
def target_id(self):
"""Returns the target's GtoP ID.
:rtype: int"""
return self._target_id
@strip_html
def name(self):
"""Returns the target's name.
:param bool strip_html: If ``True``, the name will have HTML entities stripped.
:rtype: str"""
return self._name
@strip_html
def abbreviation(self):
"""Returns the target's abbreviated name.
:param bool strip_html: If ``True``, the abbreviation will have HTML entities stripped.
:rtype: str"""
return self._abbreviation
@strip_html
def systematic_name(self):
"""Returns the target's systematic name.
:param bool strip_html: If ``True``, the name will have HTML entities stripped.
:rtype: str"""
return self._systematic_name
def target_type(self):
"""Returns the target's type.
:rtype: str"""
return self._target_type
def family_ids(self):
"""Returns the the family IDs of any families this target is a member of.
:returns: list of ``int``"""
return self._family_ids
def families(self):
"""Returns a list of all target families of which this target is a member.
:returns: list of :py:class:`TargetFamily` objects"""
return [get_target_family_by_id(i) for i in self._family_ids]
def subunit_ids(self):
"""Returns the the target IDs of all targets which are subunits of this
target.
:returns: list of ``int``"""
return self._subunit_ids
def subunits(self):
"""Returns a list of all targets which are subunits of this target.
:returns: list of :py:class:`Target` objects"""
return [get_target_by_id(id_) for id_ in self._subunit_ids]
def complex_ids(self):
"""Returns the the target IDs of all targets of which this target is a
subunit.
:returns: list of ``int``"""
return self._complex_ids
def complexes(self):
"""Returns a list of all targets of which this target is a subunit.
:returns: list of :py:class:`Target` objects"""
return [get_target_by_id(id_) for id_ in self._complex_ids]
@strip_html
def synonyms(self):
"""Returns any synonyms for this target.
:param bool strip_html: If ``True``, the synonyms will have HTML entities stripped.
:returns: list of str"""
return [synonym["name"] for synonym in self._get_synonym_json()]
def database_links(self, species=None):
"""Returns any database links for this target.
:param str species: If given, only links belonging to this species will be returned.
:returns: list of :class:`.DatabaseLink` objects."""
if species:
return [DatabaseLink(link_json) for link_json in self._get_database_json()
if link_json["species"] and link_json["species"].lower() == species.lower()]
else:
return [DatabaseLink(link_json) for link_json in self._get_database_json()]
def genes(self, species=None):
"""Returns any genes for this target.
:param str species: If given, only genes belonging to this species will be returned.
:returns: list of :class:`.Gene` objects."""
if species:
return [Gene(gene_json) for gene_json in self._get_gene_json()
if gene_json["species"] and gene_json["species"].lower() == species.lower()]
else:
return [Gene(gene_json) for gene_json in self._get_gene_json()]
def interactions(self, species=None):
"""Returns any interactions for this target.
:param str species: If given, only interactions belonging to this species will be returned.
:returns: list of :class:`.Interaction` objects."""
if species:
return [Interaction(interaction_json) for interaction_json in self._get_interactions_json()
if interaction_json["targetSpecies"] and interaction_json["targetSpecies"].lower() == species.lower()]
else:
return [Interaction(interaction_json) for interaction_json in self._get_interactions_json()]
get_interaction_by_id = get_interaction_by_id
"""Returns an Interaction object of a given ID belonging to the target.
:param int interaction_id: The interactions's ID.
:rtype: :py:class:`.Interaction`
:raises: :class:`.NoSuchInteractionError`: if no such interaction exists in the database."""
def ligands(self, species=None):
"""Returns any ligands that this target interacts with.
:param str species: If given, only ligands belonging to this species will be returned.
:returns: list of :class:`.DatabaseLink` objects."""
ligands = []
for interaction in self.interactions(species=species):
ligand = interaction.ligand()
if ligand not in ligands:
ligands.append(ligand)
return ligands
@pdb.ask_about_molecupy
def gtop_pdbs(self, species=None):
"""Returns a list of PDBs which the Guide to PHARMACOLOGY says contain
this target.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:returns: list of ``str`` PDB codes"""
if species is None:
return [pdb["pdbCode"] for pdb in self._get_pdb_json() if pdb["pdbCode"]]
else:
return [pdb["pdbCode"] for pdb in self._get_pdb_json()
if pdb["pdbCode"] and pdb["species"].lower() == species.lower()]
@pdb.ask_about_molecupy
def uniprot_pdbs(self, species=None):
"""Queries the RSCB PDB database with the targets's uniprot accessions.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:param str species: If given, only PDBs belonging to this species will be returned.
:returns: list of ``str`` PDB codes"""
uniprot_accessions = [
link.accession() for link in self.database_links(species=species)
if link.database() == "UniProtKB"
]
if uniprot_accessions:
results = pdb.query_rcsb_advanced("UpAccessionIdQuery", {
"accessionIdList": ",".join(uniprot_accessions)
})
return [result.split(":")[0] for result in results] if results else []
else:
return []
@pdb.ask_about_molecupy
def all_pdbs(self, species=None):
"""Get a list of PDB codes using all means available - annotated and
external.
:param bool as_molecupy: Returns the PDBs as \
`molecuPy <http://molecupy.readthedocs.io>`_ PDB objects.
:param str species: If given, only PDBs belonging to this species will be returned.
:returns: list of ``str`` PDB codes"""
return list(set(
self.gtop_pdbs(species=species) +
self.uniprot_pdbs(species=species)
))
def _get_synonym_json(self):
json_object = gtop.get_json_from_gtop(
"targets/%i/synonyms" % self._target_id
)
return json_object if json_object else []
def _get_database_json(self):
json_object = gtop.get_json_from_gtop(
"targets/%i/databaseLinks" % self._target_id
)
return json_object if json_object else []
def _get_gene_json(self):
json_object = gtop.get_json_from_gtop(
"targets/%i/geneProteinInformation" % self._target_id
)
return json_object if json_object else []
def _get_interactions_json(self):
json_object = gtop.get_json_from_gtop(
"targets/%i/interactions" % self._target_id
)
return json_object if json_object else []
def _get_pdb_json(self):
json_object = gtop.get_json_from_gtop(
"targets/%i/pdbStructure" % self._target_id
)
return json_object if json_object else []
class TargetFamily:
"""A Guide to PHARMACOLOGY target family object.
:param json_data: A dictionary obtained from the web services."""
def __init__(self, json_data):
self.json_data = json_data
self._family_id = json_data["familyId"]
self._name = json_data["name"]
self._target_ids = json_data["targetIds"]
self._parent_family_ids = json_data["parentFamilyIds"]
self._sub_family_ids = json_data["subFamilyIds"]
def __repr__(self):
return "<'%s' TargetFamily>" % self._name
def family_id(self):
"""Returns the family's GtoP ID.
:rtype: int"""
return self._family_id
@strip_html
def name(self):
"""Returns the family's name.
:param bool strip_html: If ``True``, the name will have HTML entities stripped.
:rtype: str"""
return self._name
def target_ids(self):
"""Returns the the target IDs of all targets in this family. Note that only
immediate children are shown - if a family has subfamilies then it will
not return any targets here - you must look in the sub-families.
:returns: list of ``int``"""
return self._target_ids
def targets(self):
"""Returns a list of all targets in this family. Note that only
immediate children are shown - if a family has subfamilies then it will
not return any targets here - you must look in the sub-families.
:returns: list of :py:class:`Target` objects"""
return [get_target_by_id(i) for i in self._target_ids]
def parent_family_ids(self):
"""Returns the the target IDs of all target families of which this
family is a member.
:returns: list of ``int``"""
return self._parent_family_ids
def parent_families(self):
"""Returns a list of all target families of which this family is a member.
:returns: list of :py:class:`TargetFamily` objects"""
return [get_target_family_by_id(i) for i in self._parent_family_ids]
def sub_family_ids(self):
"""Returns the the target IDs of all arget families which are a member
of this family.
:returns: list of ``int``"""
return self._sub_family_ids
def sub_families(self):
"""Returns a list of all target families which are a member of this family.
:returns: list of :py:class:`TargetFamily` objects"""
return [get_target_family_by_id(i) for i in self._sub_family_ids]
| mit | 6,307,949,579,907,811,000 | 30.678186 | 115 | 0.628281 | false |
easyw/kicad-3d-models-in-freecad | cadquery/FCAD_script_generator/Fuse/main_generator_SMD.py | 1 | 13644 | # -*- coding: utf8 -*-
#!/usr/bin/python
#
# This is derived from a cadquery script for generating PDIP models in X3D format
#
# from https://bitbucket.org/hyOzd/freecad-macros
# author hyOzd
# This is a
# Dimensions are from Microchips Packaging Specification document:
# DS00000049BY. Body drawing is the same as QFP generator#
## requirements
## cadquery FreeCAD plugin
## https://github.com/jmwright/cadquery-freecad-module
## to run the script just do: freecad make_gwexport_fc.py modelName
## e.g. c:\freecad\bin\freecad make_gw_export_fc.py SOIC_8
## the script will generate STEP and VRML parametric models
## to be used with kicad StepUp script
#* These are a FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* *
#* cadquery script for generating QFP/SOIC/SSOP/TSSOP models in STEP AP214 *
#* Copyright (c) 2015 *
#* Maurice https://launchpad.net/~easyw *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
__title__ = "make chip Resistors 3D models"
__author__ = "maurice"
__Comment__ = 'make chip Resistors 3D models exported to STEP and VRML for Kicad StepUP script'
___ver___ = "1.3.2 10/02/2017"
# thanks to Frank Severinsen Shack for including vrml materials
# maui import cadquery as cq
# maui from Helpers import show
from math import tan, radians, sqrt
from collections import namedtuple
global save_memory
save_memory = False #reducing memory consuming for all generation params
import sys, os
import datetime
from datetime import datetime
sys.path.append("../_tools")
import exportPartToVRML as expVRML
import shaderColors
body_color_key = "white body"
body_color = shaderColors.named_colors[body_color_key].getDiffuseFloat()
pins_color_key = "metal grey pins"
pins_color = shaderColors.named_colors[pins_color_key].getDiffuseFloat()
top_color_key = "resistor black body"
top_color = shaderColors.named_colors[top_color_key].getDiffuseFloat()
# maui start
import FreeCAD, Draft, FreeCADGui
import ImportGui
import FreeCADGui as Gui
import yaml
#from Gui.Command import *
outdir=os.path.dirname(os.path.realpath(__file__)+"/../_3Dmodels")
scriptdir=os.path.dirname(os.path.realpath(__file__))
sys.path.append(outdir)
sys.path.append(scriptdir)
if FreeCAD.GuiUp:
from PySide import QtCore, QtGui
# Licence information of the generated models.
#################################################################################################
STR_licAuthor = "kicad StepUp"
STR_licEmail = "ksu"
STR_licOrgSys = "kicad StepUp"
STR_licPreProc = "OCC"
STR_licOrg = "FreeCAD"
LIST_license = ["",]
#################################################################################################
try:
# Gui.SendMsgToActiveView("Run")
#from Gui.Command import *
Gui.activateWorkbench("CadQueryWorkbench")
import cadquery as cq
from Helpers import show
# CadQuery Gui
except Exception as e: # catch *all* exceptions
print(e)
msg="missing CadQuery 0.3.0 or later Module!\r\n\r\n"
msg+="https://github.com/jmwright/cadquery-freecad-module/wiki\n"
reply = QtGui.QMessageBox.information(None,"Info ...",msg)
# maui end
# Import cad_tools
from cqToolsExceptions import *
# Import cad_tools
import cq_cad_tools
# Explicitly load all needed functions
from cq_cad_tools import FuseObjs_wColors, GetListOfObjects, restore_Main_Tools, \
exportSTEP, close_CQ_Example, exportVRML, saveFCdoc, z_RotateObject, Color_Objects, \
CutObjs_wColors, checkRequirements
#checking requirements
checkRequirements(cq)
# Sphinx workaround #1
try:
QtGui
except NameError:
QtGui = None
#
try:
close_CQ_Example(App, Gui)
except: # catch *all* exceptions
print("CQ 030 doesn't open example file")
def make_chip(model, all_params):
# dimensions for chip capacitors
length = all_params[model]['length'] # package length
width = all_params[model]['width'] # package width
height = all_params[model]['height'] # package height
pin_band = all_params[model]['pin_band'] # pin band
pin_thickness = all_params[model]['pin_thickness'] # pin thickness
if pin_thickness == 'auto':
pin_thickness = height/10.
edge_fillet = all_params[model]['edge_fillet'] # fillet of edges
if edge_fillet == 'auto':
edge_fillet = pin_thickness
# Create a 3D box based on the dimension variables above and fillet it
case = cq.Workplane("XY").workplane(offset=pin_thickness). \
box(length-2*pin_thickness, width, height-2*pin_thickness,centered=(True, True, False))
top = cq.Workplane("XY").workplane(offset=height-pin_thickness).box(length-2*pin_band, width, pin_thickness,centered=(True, True, False))
# Create a 3D box based on the dimension variables above and fillet it
pin1 = cq.Workplane("XY").box(pin_band, width, height)
pin1.edges("|Y").fillet(edge_fillet)
pin1=pin1.translate((-length/2+pin_band/2,0,height/2)).rotate((0,0,0), (0,0,1), 0)
pin2 = cq.Workplane("XY").box(pin_band, width, height)
pin2.edges("|Y").fillet(edge_fillet)
pin2=pin2.translate((length/2-pin_band/2,0,height/2)).rotate((0,0,0), (0,0,1), 0)
pins = pin1.union(pin2)
#body_copy.ShapeColor=result.ShapeColor
# extract case from pins
# case = case.cut(pins)
pins = pins.cut(case)
return (case, top, pins)
#import step_license as L
import add_license as Lic
if __name__ == "__main__" or __name__ == "main_generator_SMD":
destination_dir = '/Fuse.3dshapes'
expVRML.say(expVRML.__file__)
FreeCAD.Console.PrintMessage('\r\nRunning...\r\n')
full_path=os.path.realpath(__file__)
expVRML.say(full_path)
scriptdir=os.path.dirname(os.path.realpath(__file__))
expVRML.say(scriptdir)
sub_path = full_path.split(scriptdir)
expVRML.say(sub_path)
sub_dir_name =full_path.split(os.sep)[-2]
expVRML.say(sub_dir_name)
sub_path = full_path.split(sub_dir_name)[0]
expVRML.say(sub_path)
models_dir=sub_path+"_3Dmodels"
#expVRML.say(models_dir)
#stop
try:
with open('cq_parameters_SMD.yaml', 'r') as f:
all_params = yaml.load(f)
except yaml.YAMLError as exc:
print(exc)
from sys import argv
models = []
if len(sys.argv) < 3:
FreeCAD.Console.PrintMessage('No variant name is given! building:\n')
model_to_build = list(all_params.keys())[0]
print(model_to_build)
else:
model_to_build = sys.argv[2]
if model_to_build == "all":
models = all_params
save_memory=True
else:
models = [model_to_build]
for model in models:
if not model in all_params.keys():
print("Parameters for %s doesn't exist in 'all_params', skipping." % model)
continue
ModelName = model
CheckedModelName = ModelName.replace('.', '').replace('-', '_').replace('(', '').replace(')', '')
Newdoc = App.newDocument(CheckedModelName)
App.setActiveDocument(CheckedModelName)
Gui.ActiveDocument=Gui.getDocument(CheckedModelName)
body, pins, top = make_chip(model, all_params)
show(body)
show(pins)
show(top)
doc = FreeCAD.ActiveDocument
objs = GetListOfObjects(FreeCAD, doc)
Color_Objects(Gui,objs[0],body_color)
Color_Objects(Gui,objs[1],top_color)
Color_Objects(Gui,objs[2],pins_color)
col_body=Gui.ActiveDocument.getObject(objs[0].Name).DiffuseColor[0]
col_top=Gui.ActiveDocument.getObject(objs[1].Name).DiffuseColor[0]
col_pin=Gui.ActiveDocument.getObject(objs[2].Name).DiffuseColor[0]
material_substitutions={
col_body[:-1]:body_color_key,
col_pin[:-1]:pins_color_key,
col_top[:-1]:top_color_key
}
expVRML.say(material_substitutions)
del objs
objs=GetListOfObjects(FreeCAD, doc)
FuseObjs_wColors(FreeCAD, FreeCADGui, doc.Name, objs[0].Name, objs[1].Name)
objs=GetListOfObjects(FreeCAD, doc)
FuseObjs_wColors(FreeCAD, FreeCADGui, doc.Name, objs[0].Name, objs[1].Name)
doc.Label = CheckedModelName
objs=GetListOfObjects(FreeCAD, doc)
objs[0].Label = CheckedModelName
restore_Main_Tools()
#rotate if required
rotation = all_params[model]['rotation']
if (rotation!=0):
z_RotateObject(doc, rotation)
#out_dir=destination_dir+all_params[variant].dest_dir_prefix+'/'
script_dir=os.path.dirname(os.path.realpath(__file__))
#models_dir=script_dir+"/../_3Dmodels"
expVRML.say(models_dir)
out_dir=models_dir+destination_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
#out_dir="./generated_qfp/"
# export STEP model
exportSTEP(doc, ModelName, out_dir)
if LIST_license[0]=="":
LIST_license=Lic.LIST_int_license
LIST_license.append("")
Lic.addLicenseToStep(out_dir+'/', ModelName+".step", LIST_license,\
STR_licAuthor, STR_licEmail, STR_licOrgSys, STR_licOrg, STR_licPreProc)
# scale and export Vrml model
scale=1/2.54
#exportVRML(doc,ModelName,scale,out_dir)
objs=GetListOfObjects(FreeCAD, doc)
expVRML.say("######################################################################")
expVRML.say(objs)
expVRML.say("######################################################################")
export_objects, used_color_keys = expVRML.determineColors(Gui, objs, material_substitutions)
export_file_name=out_dir+os.sep+ModelName+'.wrl'
colored_meshes = expVRML.getColoredMesh(Gui, export_objects , scale)
expVRML.writeVRMLFile(colored_meshes, export_file_name, used_color_keys, LIST_license)
# Save the doc in Native FC format
if save_memory == False:
Gui.SendMsgToActiveView("ViewFit")
Gui.activeDocument().activeView().viewAxometric()
# Save the doc in Native FC format
saveFCdoc(App, Gui, doc, ModelName,out_dir, False)
check_Model=True
if save_memory == True or check_Model==True:
doc=FreeCAD.ActiveDocument
FreeCAD.closeDocument(doc.Name)
step_path=os.path.join(out_dir,ModelName+u'.step')
if check_Model==True:
#ImportGui.insert(step_path,ModelName)
ImportGui.open(step_path)
docu = FreeCAD.ActiveDocument
if cq_cad_tools.checkUnion(docu) == True:
FreeCAD.Console.PrintMessage('step file is correctly Unioned\n')
else:
FreeCAD.Console.PrintError('step file is NOT Unioned\n')
stop
FC_majorV=int(FreeCAD.Version()[0])
FC_minorV=int(FreeCAD.Version()[1][0:FreeCAD.Version()[1].find('.')])
print("Minor version: "+str(FC_minorV))
if FC_majorV == 0 and FC_minorV >= 17:
for o in docu.Objects:
if hasattr(o,'Shape'):
chks=cq_cad_tools.checkBOP(o.Shape)
print ('chks ',chks)
print (cq_cad_tools.mk_string(o.Label))
if chks != True:
msg='shape \''+o.Name+'\' \''+cq_cad_tools.mk_string(o.Label)+'\' is INVALID!\n'
FreeCAD.Console.PrintError(msg)
FreeCAD.Console.PrintWarning(chks[0])
stop
else:
msg='shape \''+o.Name+'\' \''+cq_cad_tools.mk_string(o.Label)+'\' is valid\n'
FreeCAD.Console.PrintMessage(msg)
else:
FreeCAD.Console.PrintError('BOP check requires FC 0.17+\n')
# Save the doc in Native FC format
saveFCdoc(App, Gui, docu, ModelName,out_dir, False)
doc=FreeCAD.ActiveDocument
FreeCAD.closeDocument(doc.Name)
| gpl-2.0 | -5,632,057,004,259,708,000 | 38.547826 | 141 | 0.591469 | false |
kalyptorisk/daversy | src/daversy/db/oracle/index.py | 1 | 3580 | from daversy.utils import *
from daversy.db.object import Index, IndexColumn
class IndexColumnBuilder(object):
""" Represents a builder for a column in an index. """
DbClass = IndexColumn
XmlTag = 'index-column'
Query = """
SELECT c.column_name, lower(c.descend) AS sort, i.index_name,
i.table_name, c.column_position AS position,
e.column_expression AS expression
FROM sys.user_indexes i, sys.user_ind_columns c,
sys.user_ind_expressions e
WHERE i.index_name = c.index_name
AND i.table_name = c.table_name
AND c.index_name = e.index_name (+)
AND c.column_position = e.column_position (+)
ORDER BY i.index_name, c.column_position
"""
PropertyList = odict(
('COLUMN_NAME', Property('name')),
('SORT', Property('sort')),
('EXPRESSION', Property('expression', exclude=True)),
('INDEX_NAME', Property('index-name', exclude=True)),
('TABLE_NAME', Property('table-name', exclude=True)),
('POSITION', Property('position', exclude=True)),
)
@staticmethod
def addToState(state, column):
table = state.tables.get(column['table-name'])
real = table and table.columns.get(column.name)
if column.expression and not real: # function-based columns have no name
column.name = column.expression
index = state.indexes.get(column['index-name'])
if index:
index.columns[column.name] = column
class IndexBuilder(object):
""" Represents a builder for a index on a table. """
DbClass = Index
XmlTag = 'index'
Query = """
SELECT i.index_name, i.table_name,
decode(i.uniqueness, 'UNIQUE', 'true', 'false') AS is_unique,
decode(i.index_type, 'BITMAP', 'true') AS is_bitmap,
DECODE(i.compression, 'ENABLED', i.prefix_length) AS "COMPRESS"
FROM sys.user_indexes i
WHERE i.index_type IN ('NORMAL', 'FUNCTION-BASED NORMAL', 'BITMAP')
ORDER BY i.index_name
"""
PropertyList = odict(
('INDEX_NAME', Property('name')),
('IS_UNIQUE', Property('unique')),
('IS_BITMAP', Property('bitmap')),
('TABLE_NAME', Property('table-name')),
('COMPRESS', Property('compress'))
)
@staticmethod
def addToState(state, index):
# ensure that the table exists and the index is not for a PK/UK
table = state.tables.get(index['table-name'])
if table:
if table.primary_keys.has_key(index.name) or table.unique_keys.has_key(index.name):
return
state.indexes[index.name] = index
@staticmethod
def isAllowed(state, index):
return state.tables.get(index['table-name'])
@staticmethod
def createSQL(index):
sql = "CREATE %(unique)s %(bitmap)s INDEX %(name)s ON %(table-name)s (\n" \
" %(column_sql)s\n)%(suffix)s\n/\n"
column_def = ["%(name)-30s %(sort)s" % column for column
in index.columns.values()]
column_sql = ",\n ".join(column_def)
unique = index.unique == 'true' and 'UNIQUE' or ''
bitmap = index.bitmap == 'true' and 'BITMAP' or ''
suffix = ''
if index.compress:
suffix = ' COMPRESS '+index.compress
return render(sql, index, unique=unique, bitmap=bitmap,
suffix=suffix, column_sql=column_sql)
| gpl-2.0 | 1,298,916,235,426,265,300 | 35.907216 | 95 | 0.569832 | false |
patrick91/pycon | backend/notifications/aws.py | 1 | 2184 | import typing
from urllib.parse import urljoin
import boto3
from django.conf import settings
from newsletters.exporter import Endpoint
from users.models import User
def _get_client():
return boto3.client("pinpoint", region_name="eu-central-1")
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i : i + n] # noqa
def send_endpoints_to_pinpoint(endpoints: typing.Iterable[Endpoint]):
# batch only supports 100 at the time
endpoint_chunks = chunks(list(endpoints), 100)
for endpoints_chunk in endpoint_chunks:
data = {"Item": [endpoint.to_item() for endpoint in endpoints_chunk]}
client = _get_client()
client.update_endpoints_batch(
ApplicationId=settings.PINPOINT_APPLICATION_ID, EndpointBatchRequest=data
)
def send_notification(
template_name: str,
users: typing.List[User],
substitutions: typing.Dict[str, typing.List[str]],
):
client = _get_client()
client.send_users_messages(
ApplicationId=settings.PINPOINT_APPLICATION_ID,
SendUsersMessageRequest={
"MessageConfiguration": {
"EmailMessage": {
"FromAddress": "[email protected]",
"Substitutions": substitutions,
}
},
"TemplateConfiguration": {"EmailTemplate": {"Name": template_name}},
"Users": {str(user.id): {} for user in users},
},
)
# TODO: validate that it has been sent correctly
def send_comment_notification(comment):
submission = comment.submission
users: typing.Set[User] = set([submission.speaker])
# also send notification to all other commenters
users = users.union(set([comment.author for comment in submission.comments.all()]))
# don't notify current user
users.discard(comment.author)
if not users:
return
submission_url = urljoin(
settings.FRONTEND_URL, f"/en/submission/{submission.hashid}"
)
substitutions = {
"submission_url": [submission_url],
"submission": [submission.title],
}
send_notification("pycon-11-new-comment-on-submission", users, substitutions)
| mit | 7,613,541,312,755,594,000 | 27.736842 | 87 | 0.642399 | false |
jcushman/pywb | pywb/rewrite/cookie_rewriter.py | 1 | 4929 | from Cookie import SimpleCookie, CookieError
#=================================================================
class WbUrlBaseCookieRewriter(object):
""" Base Cookie rewriter for wburl-based requests.
"""
def __init__(self, url_rewriter):
self.url_rewriter = url_rewriter
def rewrite(self, cookie_str, header='Set-Cookie'):
results = []
cookie = SimpleCookie()
try:
cookie.load(cookie_str)
except CookieError:
return results
for name, morsel in cookie.iteritems():
morsel = self.rewrite_cookie(name, morsel)
if morsel:
path = morsel.get('path')
if path:
inx = path.find(self.url_rewriter.rel_prefix)
if inx > 0:
morsel['path'] = path[inx:]
results.append((header, morsel.OutputString()))
return results
def _remove_age_opts(self, morsel):
# remove expires as it refers to archived time
if morsel.get('expires'):
del morsel['expires']
# don't use max-age, just expire at end of session
if morsel.get('max-age'):
del morsel['max-age']
# for now, also remove secure to avoid issues when
# proxying over plain http (TODO: detect https?)
if morsel.get('secure'):
del morsel['secure']
#=================================================================
class RemoveAllCookiesRewriter(WbUrlBaseCookieRewriter):
def rewrite(self, cookie_str, header='Set-Cookie'):
return []
#=================================================================
class MinimalScopeCookieRewriter(WbUrlBaseCookieRewriter):
"""
Attempt to rewrite cookies to minimal scope possible
If path present, rewrite path to current rewritten url only
If domain present, remove domain and set to path prefix
"""
def rewrite_cookie(self, name, morsel):
# if domain set, no choice but to expand cookie path to root
if morsel.get('domain'):
del morsel['domain']
morsel['path'] = self.url_rewriter.rel_prefix
# else set cookie to rewritten path
elif morsel.get('path'):
morsel['path'] = self.url_rewriter.rewrite(morsel['path'])
self._remove_age_opts(morsel)
return morsel
#=================================================================
class HostScopeCookieRewriter(WbUrlBaseCookieRewriter):
"""
Attempt to rewrite cookies to current host url..
If path present, rewrite path to current host. Only makes sense in live
proxy or no redirect mode, as otherwise timestamp may change.
If domain present, remove domain and set to path prefix
"""
def rewrite_cookie(self, name, morsel):
# if domain set, expand cookie to host prefix
if morsel.get('domain'):
del morsel['domain']
morsel['path'] = self.url_rewriter.rewrite('/')
# set cookie to rewritten path
elif morsel.get('path'):
morsel['path'] = self.url_rewriter.rewrite(morsel['path'])
self._remove_age_opts(morsel)
return morsel
#=================================================================
class ExactPathCookieRewriter(WbUrlBaseCookieRewriter):
"""
Rewrite cookies only using exact path, useful for live rewrite
without a timestamp and to minimize cookie pollution
If path or domain present, simply remove
"""
def rewrite_cookie(self, name, morsel):
if morsel.get('domain'):
del morsel['domain']
# else set cookie to rewritten path
if morsel.get('path'):
del morsel['path']
self._remove_age_opts(morsel)
return morsel
#=================================================================
class RootScopeCookieRewriter(WbUrlBaseCookieRewriter):
"""
Sometimes it is necessary to rewrite cookies to root scope
in order to work across time boundaries and modifiers
This rewriter simply sets all cookies to be in the root
"""
def rewrite_cookie(self, name, morsel):
# get root path
morsel['path'] = self.url_rewriter.root_path
# remove domain
if morsel.get('domain'):
del morsel['domain']
self._remove_age_opts(morsel)
return morsel
#=================================================================
def get_cookie_rewriter(cookie_scope):
if cookie_scope == 'root':
return RootScopeCookieRewriter
elif cookie_scope == 'exact':
return ExactPathCookieRewriter
elif cookie_scope == 'host':
return HostScopeCookieRewriter
elif cookie_scope == 'removeall':
return RemoveAllCookiesRewriter
elif cookie_scope == 'coll':
return MinimalScopeCookieRewriter
else:
return HostScopeCookieRewriter
| gpl-3.0 | -1,616,446,436,483,166,000 | 31.006494 | 75 | 0.564212 | false |
colloquium/spacewalk | client/solaris/smartpm/smart/interfaces/gtk/interactive.py | 1 | 31919 | #
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.transaction import INSTALL, REMOVE, UPGRADE, REINSTALL, KEEP, FIX
from smart.transaction import Transaction, ChangeSet, checkPackages
from smart.transaction import PolicyInstall, PolicyRemove, PolicyUpgrade
from smart.interfaces.gtk.channels import GtkChannels, GtkChannelSelector
from smart.interfaces.gtk.mirrors import GtkMirrors
from smart.interfaces.gtk.flags import GtkFlags
from smart.interfaces.gtk.priorities import GtkPriorities, GtkSinglePriority
from smart.interfaces.gtk.packageview import GtkPackageView
from smart.interfaces.gtk.packageinfo import GtkPackageInfo
from smart.interfaces.gtk.interface import GtkInterface
from smart.interfaces.gtk import getPixbuf
from smart.const import NEVER, VERSION
from smart.searcher import Searcher
from smart.cache import Package
from smart import *
import shlex, re
import fnmatch
import gtk
UI = """
<ui>
<menubar>
<menu action="file">
<menuitem action="update-selected-channels"/>
<menuitem action="update-channels"/>
<separator/>
<menuitem action="rebuild-cache"/>
<separator/>
<menuitem action="exec-changes"/>
<separator/>
<menuitem action="quit"/>
</menu>
<menu action="edit">
<menuitem action="undo"/>
<menuitem action="redo"/>
<menuitem action="clear-changes"/>
<separator/>
<menuitem action="upgrade-all"/>
<menuitem action="fix-all-problems"/>
<separator/>
<menuitem action="check-installed-packages"/>
<menuitem action="check-uninstalled-packages"/>
<menuitem action="check-all-packages"/>
<separator/>
<menuitem action="find"/>
<separator/>
<menuitem action="edit-channels"/>
<menuitem action="edit-mirrors"/>
<menuitem action="edit-flags"/>
<menuitem action="edit-priorities"/>
</menu>
<menu action="view">
<menuitem action="hide-non-upgrades"/>
<menuitem action="hide-installed"/>
<menuitem action="hide-uninstalled"/>
<menuitem action="hide-unmarked"/>
<menuitem action="hide-old"/>
<separator/>
<menuitem action="expand-all"/>
<menuitem action="collapse-all"/>
<separator/>
<menu action="tree-style">
<menuitem action="tree-style-groups"/>
<menuitem action="tree-style-channels"/>
<menuitem action="tree-style-channels-groups"/>
<menuitem action="tree-style-none"/>
</menu>
<separator/>
<menuitem action="summary-window"/>
<menuitem action="log-window"/>
</menu>
</menubar>
<toolbar>
<toolitem action="update-channels"/>
<separator/>
<toolitem action="exec-changes"/>
<separator/>
<toolitem action="undo"/>
<toolitem action="redo"/>
<toolitem action="clear-changes"/>
<separator/>
<toolitem action="upgrade-all"/>
<separator/>
<toolitem action="find"/>
</toolbar>
</ui>
"""
ACTIONS = [
("file", None, _("_File")),
("update-selected-channels", "gtk-refresh", _("Update _Selected Channels..."), None,
_("Update given channels"), "self.updateChannels(True)"),
("update-channels", "gtk-refresh", _("_Update Channels"), None,
_("Update channels"), "self.updateChannels()"),
("rebuild-cache", None, _("_Rebuild Cache"), None,
_("Reload package information"), "self.rebuildCache()"),
("exec-changes", "gtk-execute", _("_Execute Changes..."), "<control>c",
_("Apply marked changes"), "self.applyChanges()"),
("quit", "gtk-quit", _("_Quit"), "<control>q",
_("Quit application"), "gtk.main_quit()"),
("edit", None, _("_Edit")),
("undo", "gtk-undo", _("_Undo"), "<control>z",
_("Undo last change"), "self.undo()"),
("redo", "gtk-redo", _("_Redo"), "<control><shift>z",
_("Redo last undone change"), "self.redo()"),
("clear-changes", "gtk-clear", _("Clear Marked Changes"), None,
_("Clear all changes"), "self.clearChanges()"),
("check-installed-packages", None, _("Check Installed Packages..."), None,
_("Check installed packages"), "self.checkPackages()"),
("check-uninstalled-packages", None, _("Check Uninstalled Packages..."), None,
_("Check uninstalled packages"), "self.checkPackages(uninstalled=True)"),
("check-all-packages", None, _("Check All Packages..."), None,
_("Check all packages"), "self.checkPackages(all=True)"),
("upgrade-all", "gtk-go-up", _("Upgrade _All..."), None,
_("Upgrade all packages"), "self.upgradeAll()"),
("fix-all-problems", None, _("Fix All _Problems..."), None,
_("Fix all problems"), "self.fixAllProblems()"),
("find", "gtk-find", _("_Find..."), "<control>f",
_("Find packages"), "self.toggleSearch()"),
("edit-channels", None, _("_Channels"), None,
_("Edit channels"), "self.editChannels()"),
("edit-mirrors", None, _("_Mirrors"), None,
_("Edit mirrors"), "self.editMirrors()"),
("edit-flags", None, _("_Flags"), None,
_("Edit package flags"), "self.editFlags()"),
("edit-priorities", None, _("_Priorities"), None,
_("Edit package priorities"), "self.editPriorities()"),
("view", None, _("_View")),
("tree-style", None, _("_Tree Style")),
("expand-all", "gtk-open", _("_Expand All"), None,
_("Expand all items in the tree"), "self._pv.getTreeView().expand_all()"),
("collapse-all", "gtk-close", _("_Collapse All"), None,
_("Collapse all items in the tree"), "self._pv.getTreeView().collapse_all()"),
("summary-window", None, _("_Summary Window"), "<control>s",
_("Show summary window"), "self.showChanges()"),
("log-window", None, _("_Log Window"), None,
_("Show log window"), "self._log.show()"),
]
def compileActions(actions, globals):
newactions = []
for action in actions:
if len(action) > 5:
action = list(action)
code = compile(action[5], "<callback>", "exec")
def callback(action, code=code, globals=globals):
globals["action"] = action
exec code in globals
action[5] = callback
newactions.append(tuple(action))
return newactions
class GtkInteractiveInterface(GtkInterface):
def __init__(self, ctrl):
GtkInterface.__init__(self, ctrl)
self._changeset = None
self._window = gtk.Window()
self._window.set_title("Smart Package Manager %s" % VERSION)
self._window.set_position(gtk.WIN_POS_CENTER)
self._window.set_geometry_hints(min_width=640, min_height=480)
self._window.connect("destroy", lambda x: gtk.main_quit())
self._log.set_transient_for(self._window)
self._progress.set_transient_for(self._window)
self._hassubprogress.set_transient_for(self._window)
self._watch = gtk.gdk.Cursor(gtk.gdk.WATCH)
self._undo = []
self._redo = []
self._topvbox = gtk.VBox()
self._topvbox.show()
self._window.add(self._topvbox)
globals = {"self": self, "gtk": gtk}
self._actions = gtk.ActionGroup("Actions")
self._actions.add_actions(compileActions(ACTIONS, globals))
self._filters = {}
for name, label in [("hide-non-upgrades", _("Hide Non-upgrades")),
("hide-installed", _("Hide Installed")),
("hide-uninstalled", _("Hide Uninstalled")),
("hide-unmarked", _("Hide Unmarked")),
("hide-old", _("Hide Old"))]:
action = gtk.ToggleAction(name, label, "", "")
action.connect("toggled", lambda x, y: self.toggleFilter(y), name)
self._actions.add_action(action)
treestyle = sysconf.get("package-tree")
lastaction = None
for name, label in [("groups", _("Groups")),
("channels", _("Channels")),
("channels-groups", _("Channels & Groups")),
("none", _("None"))]:
action = gtk.RadioAction("tree-style-"+name, label, "", "", 0)
if name == treestyle:
action.set_active(True)
if lastaction:
action.set_group(lastaction)
lastaction = action
action.connect("toggled", lambda x, y: self.setTreeStyle(y), name)
self._actions.add_action(action)
self._ui = gtk.UIManager()
self._ui.insert_action_group(self._actions, 0)
self._ui.add_ui_from_string(UI)
self._menubar = self._ui.get_widget("/menubar")
self._topvbox.pack_start(self._menubar, False)
self._toolbar = self._ui.get_widget("/toolbar")
self._toolbar.set_style(gtk.TOOLBAR_ICONS)
self._topvbox.pack_start(self._toolbar, False)
self._window.add_accel_group(self._ui.get_accel_group())
self._execmenuitem = self._ui.get_action("/menubar/file/exec-changes")
self._execmenuitem.set_property("sensitive", False)
self._clearmenuitem = self._ui.get_action("/menubar/edit/clear-changes")
self._clearmenuitem.set_property("sensitive", False)
self._undomenuitem = self._ui.get_action("/menubar/edit/undo")
self._undomenuitem.set_property("sensitive", False)
self._redomenuitem = self._ui.get_action("/menubar/edit/redo")
self._redomenuitem.set_property("sensitive", False)
# Search bar
self._searchbar = gtk.Alignment()
self._searchbar.set(0, 0, 1, 1)
self._searchbar.set_padding(3, 3, 0, 0)
self._topvbox.pack_start(self._searchbar, False)
searchvp = gtk.Viewport()
searchvp.set_shadow_type(gtk.SHADOW_OUT)
searchvp.show()
self._searchbar.add(searchvp)
searchtable = gtk.Table(1, 1)
searchtable.set_row_spacings(5)
searchtable.set_col_spacings(5)
searchtable.set_border_width(5)
searchtable.show()
searchvp.add(searchtable)
label = gtk.Label(_("Search:"))
label.show()
searchtable.attach(label, 0, 1, 0, 1, 0, 0)
self._searchentry = gtk.Entry()
self._searchentry.connect("activate", lambda x: self.refreshPackages())
self._searchentry.show()
searchtable.attach(self._searchentry, 1, 2, 0, 1)
button = gtk.Button()
button.set_relief(gtk.RELIEF_NONE)
button.connect("clicked", lambda x: self.refreshPackages())
button.show()
searchtable.attach(button, 2, 3, 0, 1, 0, 0)
image = gtk.Image()
image.set_from_stock("gtk-find", gtk.ICON_SIZE_BUTTON)
image.show()
button.add(image)
align = gtk.Alignment()
align.set(1, 0, 0, 0)
align.set_padding(0, 0, 10, 0)
align.show()
searchtable.attach(align, 3, 4, 0, 1, gtk.FILL, gtk.FILL)
button = gtk.Button()
button.set_size_request(20, 20)
button.set_relief(gtk.RELIEF_NONE)
button.connect("clicked", lambda x: self.toggleSearch())
button.show()
align.add(button)
image = gtk.Image()
image.set_from_stock("gtk-close", gtk.ICON_SIZE_MENU)
image.show()
button.add(image)
hbox = gtk.HBox()
hbox.set_spacing(10)
hbox.show()
searchtable.attach(hbox, 1, 2, 1, 2)
self._searchname = gtk.RadioButton(None, _("Automatic"))
self._searchname.set_active(True)
self._searchname.connect("clicked", lambda x: self.refreshPackages())
self._searchname.show()
hbox.pack_start(self._searchname, False)
self._searchdesc = gtk.RadioButton(self._searchname, _("Description"))
self._searchdesc.connect("clicked", lambda x: self.refreshPackages())
self._searchdesc.show()
hbox.pack_start(self._searchdesc, False)
# Packages and information
self._vpaned = gtk.VPaned()
self._vpaned.show()
self._topvbox.pack_start(self._vpaned)
self._pv = GtkPackageView()
self._pv.show()
self._vpaned.pack1(self._pv, True)
self._pi = GtkPackageInfo()
self._pi.show()
self._pv.connect("package_selected",
lambda x, y: self._pi.setPackage(y))
self._pv.connect("package_activated",
lambda x, y: self.actOnPackages(y))
self._pv.connect("package_popup", self.packagePopup)
self._vpaned.pack2(self._pi, False)
self._status = gtk.Statusbar()
self._status.show()
self._topvbox.pack_start(self._status, False)
def showStatus(self, msg):
self._status.pop(0)
self._status.push(0, msg)
while gtk.events_pending():
gtk.main_iteration()
def hideStatus(self):
self._status.pop(0)
while gtk.events_pending():
gtk.main_iteration()
def run(self, command=None, argv=None):
self.setCatchExceptions(True)
self._window.set_icon(getPixbuf("smart"))
self._window.show()
self._ctrl.reloadChannels()
self._changeset = ChangeSet(self._ctrl.getCache())
self._pi.setChangeSet(self._changeset)
self._progress.hide()
self.refreshPackages()
gtk.main()
self.setCatchExceptions(False)
# Non-standard interface methods:
def getChangeSet(self):
return self._changeset
def updateChannels(self, selected=False, channels=None):
if selected:
aliases = GtkChannelSelector().show()
channels = [channel for channel in self._ctrl.getChannels()
if channel.getAlias() in aliases]
if not channels:
return
state = self._changeset.getPersistentState()
self._ctrl.reloadChannels(channels, caching=NEVER)
self._changeset.setPersistentState(state)
self.refreshPackages()
def rebuildCache(self):
state = self._changeset.getPersistentState()
self._ctrl.reloadChannels()
self._changeset.setPersistentState(state)
self.refreshPackages()
def applyChanges(self):
transaction = Transaction(self._ctrl.getCache(),
changeset=self._changeset)
if self._ctrl.commitTransaction(transaction):
del self._undo[:]
del self._redo[:]
self._redomenuitem.set_property("sensitive", False)
self._undomenuitem.set_property("sensitive", False)
self._changeset.clear()
self._ctrl.reloadChannels()
self.refreshPackages()
self.changedMarks()
self._progress.hide()
def clearChanges(self):
self.saveUndo()
self._changeset.clear()
self.changedMarks()
def showChanges(self):
return self._changes.showChangeSet(self._changeset)
def toggleFilter(self, filter):
if filter in self._filters:
del self._filters[filter]
else:
self._filters[filter] = True
self.refreshPackages()
def upgradeAll(self):
transaction = Transaction(self._ctrl.getCache())
transaction.setState(self._changeset)
for pkg in self._ctrl.getCache().getPackages():
if pkg.installed:
transaction.enqueue(pkg, UPGRADE)
transaction.setPolicy(PolicyUpgrade)
transaction.run()
changeset = transaction.getChangeSet()
if changeset != self._changeset:
if self.confirmChange(self._changeset, changeset):
self.saveUndo()
self._changeset.setState(changeset)
self.changedMarks()
if self.askYesNo(_("Apply marked changes now?"), True):
self.applyChanges()
else:
self.showStatus(_("No interesting upgrades available!"))
def actOnPackages(self, pkgs, op=None):
cache = self._ctrl.getCache()
transaction = Transaction(cache, policy=PolicyInstall)
transaction.setState(self._changeset)
changeset = transaction.getChangeSet()
if op is None:
if not [pkg for pkg in pkgs if pkg not in changeset]:
op = KEEP
else:
for pkg in pkgs:
if not pkg.installed:
op = INSTALL
break
else:
op = REMOVE
if op is REMOVE:
transaction.setPolicy(PolicyRemove)
policy = transaction.getPolicy()
for pkg in pkgs:
if op is KEEP:
transaction.enqueue(pkg, op)
elif op in (REMOVE, REINSTALL, FIX):
if pkg.installed:
transaction.enqueue(pkg, op)
if op is REMOVE:
for _pkg in cache.getPackages(pkg.name):
if not _pkg.installed:
policy.setLocked(_pkg, True)
elif op is INSTALL:
if not pkg.installed:
transaction.enqueue(pkg, op)
transaction.run()
if op is FIX:
expected = 0
else:
expected = 1
if self.confirmChange(self._changeset, changeset, expected):
self.saveUndo()
self._changeset.setState(changeset)
self.changedMarks()
def packagePopup(self, packageview, pkgs, event):
menu = gtk.Menu()
hasinstalled = bool([pkg for pkg in pkgs if pkg.installed
and self._changeset.get(pkg) is not REMOVE])
hasnoninstalled = bool([pkg for pkg in pkgs if not pkg.installed
and self._changeset.get(pkg) is not INSTALL])
image = gtk.Image()
image.set_from_pixbuf(getPixbuf("package-install"))
item = gtk.ImageMenuItem(_("Install"))
item.set_image(image)
item.connect("activate", lambda x: self.actOnPackages(pkgs, INSTALL))
if not hasnoninstalled:
item.set_sensitive(False)
menu.append(item)
image = gtk.Image()
image.set_from_pixbuf(getPixbuf("package-reinstall"))
item = gtk.ImageMenuItem(_("Reinstall"))
item.set_image(image)
item.connect("activate", lambda x: self.actOnPackages(pkgs, REINSTALL))
if not hasinstalled:
item.set_sensitive(False)
menu.append(item)
image = gtk.Image()
image.set_from_pixbuf(getPixbuf("package-remove"))
item = gtk.ImageMenuItem(_("Remove"))
item.set_image(image)
item.connect("activate", lambda x: self.actOnPackages(pkgs, REMOVE))
if not hasinstalled:
item.set_sensitive(False)
menu.append(item)
image = gtk.Image()
if not hasinstalled:
image.set_from_pixbuf(getPixbuf("package-available"))
else:
image.set_from_pixbuf(getPixbuf("package-installed"))
item = gtk.ImageMenuItem(_("Keep"))
item.set_image(image)
item.connect("activate", lambda x: self.actOnPackages(pkgs, KEEP))
if not [pkg for pkg in pkgs if pkg in self._changeset]:
item.set_sensitive(False)
menu.append(item)
image = gtk.Image()
image.set_from_pixbuf(getPixbuf("package-broken"))
item = gtk.ImageMenuItem(_("Fix problems"))
item.set_image(image)
item.connect("activate", lambda x: self.actOnPackages(pkgs, FIX))
if not hasinstalled:
item.set_sensitive(False)
menu.append(item)
inconsistent = False
thislocked = None
alllocked = None
names = pkgconf.getFlagTargets("lock")
if [pkg for pkg in pkgs if pkg in self._changeset]:
inconsistent = True
else:
for pkg in pkgs:
if (names and pkg.name in names and
("=", pkg.version) in names[pkg.name]):
newthislocked = True
newalllocked = len(names[pkg.name]) > 1
else:
newthislocked = False
newalllocked = pkgconf.testFlag("lock", pkg)
if (thislocked is not None and thislocked != newthislocked or
alllocked is not None and alllocked != newalllocked):
inconsistent = True
break
thislocked = newthislocked
alllocked = newalllocked
image = gtk.Image()
if thislocked:
item = gtk.ImageMenuItem(_("Unlock this version"))
if not hasnoninstalled:
image.set_from_pixbuf(getPixbuf("package-installed"))
else:
image.set_from_pixbuf(getPixbuf("package-available"))
def unlock_this(x):
for pkg in pkgs:
pkgconf.clearFlag("lock", pkg.name, "=", pkg.version)
self._pv.queue_draw()
self._pi.setPackage(pkgs[0])
item.connect("activate", unlock_this)
else:
item = gtk.ImageMenuItem(_("Lock this version"))
if not hasnoninstalled:
image.set_from_pixbuf(getPixbuf("package-installed-locked"))
else:
image.set_from_pixbuf(getPixbuf("package-available-locked"))
def lock_this(x):
for pkg in pkgs:
pkgconf.setFlag("lock", pkg.name, "=", pkg.version)
self._pv.queue_draw()
self._pi.setPackage(pkgs[0])
item.connect("activate", lock_this)
item.set_image(image)
if inconsistent:
item.set_sensitive(False)
menu.append(item)
image = gtk.Image()
if alllocked:
item = gtk.ImageMenuItem(_("Unlock all versions"))
if not hasnoninstalled:
image.set_from_pixbuf(getPixbuf("package-installed"))
else:
image.set_from_pixbuf(getPixbuf("package-available"))
def unlock_all(x):
for pkg in pkgs:
pkgconf.clearFlag("lock", pkg.name)
self._pv.queue_draw()
self._pi.setPackage(pkgs[0])
item.connect("activate", unlock_all)
else:
item = gtk.ImageMenuItem(_("Lock all versions"))
if not hasnoninstalled:
image.set_from_pixbuf(getPixbuf("package-installed-locked"))
else:
image.set_from_pixbuf(getPixbuf("package-available-locked"))
def lock_all(x):
for pkg in pkgs:
pkgconf.setFlag("lock", pkg.name)
self._pv.queue_draw()
self._pi.setPackage(pkgs[0])
item.connect("activate", lock_all)
item.set_image(image)
if inconsistent:
item.set_sensitive(False)
menu.append(item)
item = gtk.MenuItem(_("Priority"))
def priority(x):
GtkSinglePriority(self._window).show(pkgs[0])
self._pi.setPackage(pkgs[0])
item.connect("activate", priority)
if len(pkgs) != 1:
item.set_sensitive(False)
menu.append(item)
menu.show_all()
menu.popup(None, None, None, event.button, event.time)
def checkPackages(self, all=False, uninstalled=False):
cache = self._ctrl.getCache()
if checkPackages(cache, cache.getPackages(), report=True,
all=all, uninstalled=uninstalled):
self.info(_("All checked packages have correct relations."))
def fixAllProblems(self):
self.actOnPackages([pkg for pkg in self._ctrl.getCache().getPackages()
if pkg.installed], FIX)
def undo(self):
if self._undo:
state = self._undo.pop(0)
if not self._undo:
self._undomenuitem.set_property("sensitive", False)
self._redo.insert(0, self._changeset.getPersistentState())
self._redomenuitem.set_property("sensitive", True)
self._changeset.setPersistentState(state)
self.changedMarks()
def redo(self):
if self._redo:
state = self._redo.pop(0)
if not self._redo:
self._redomenuitem.set_property("sensitive", False)
self._undo.insert(0, self._changeset.getPersistentState())
self._undomenuitem.set_property("sensitive", True)
self._changeset.setPersistentState(state)
self.changedMarks()
def saveUndo(self):
self._undo.insert(0, self._changeset.getPersistentState())
del self._redo[:]
del self._undo[20:]
self._undomenuitem.set_property("sensitive", True)
self._redomenuitem.set_property("sensitive", False)
def setTreeStyle(self, mode):
if mode != sysconf.get("package-tree"):
sysconf.set("package-tree", mode)
self.refreshPackages()
def editChannels(self):
if GtkChannels(self._window).show():
self.rebuildCache()
def editMirrors(self):
GtkMirrors(self._window).show()
def editFlags(self):
GtkFlags(self._window).show()
def editPriorities(self):
GtkPriorities(self._window).show()
def setBusy(self, flag):
if flag:
self._window.window.set_cursor(self._watch)
while gtk.events_pending():
gtk.main_iteration()
else:
self._window.window.set_cursor(None)
def changedMarks(self):
if "hide-unmarked" in self._filters:
self.refreshPackages()
else:
self._pv.queue_draw()
self._execmenuitem.set_property("sensitive", bool(self._changeset))
self._clearmenuitem.set_property("sensitive", bool(self._changeset))
def toggleSearch(self):
visible = not self._searchbar.get_property('visible')
self._searchbar.set_property('visible', visible)
self.refreshPackages()
if visible:
self._searchentry.grab_focus()
def refreshPackages(self):
if not self._ctrl:
return
self.setBusy(True)
tree = sysconf.get("package-tree", "groups")
ctrl = self._ctrl
changeset = self._changeset
if self._searchbar.get_property("visible"):
searcher = Searcher()
dosearch = False
if self._searchdesc.get_active():
text = self._searchentry.get_text().strip()
if text:
dosearch = True
searcher.addDescription(text)
searcher.addSummary(text)
else:
try:
tokens = shlex.split(self._searchentry.get_text())
except ValueError:
pass
else:
if tokens:
dosearch = True
for tok in tokens:
searcher.addAuto(tok)
packages = []
if dosearch:
self._ctrl.getCache().search(searcher)
for ratio, obj in searcher.getResults():
if isinstance(obj, Package):
packages.append(obj)
else:
packages.extend(obj.packages)
else:
packages = ctrl.getCache().getPackages()
filters = self._filters
if filters:
if "hide-non-upgrades" in filters:
newpackages = {}
for pkg in packages:
if pkg.installed:
upgpkgs = {}
try:
for prv in pkg.provides:
for upg in prv.upgradedby:
for upgpkg in upg.packages:
if upgpkg.installed:
raise StopIteration
upgpkgs[upgpkg] = True
except StopIteration:
pass
else:
newpackages.update(upgpkgs)
packages = newpackages.keys()
if "hide-uninstalled" in filters:
packages = [x for x in packages if x.installed]
if "hide-unmarked" in filters:
packages = [x for x in packages if x in changeset]
if "hide-installed" in filters:
packages = [x for x in packages if not x.installed]
if "hide-old" in filters:
packages = pkgconf.filterByFlag("new", packages)
if tree == "groups":
groups = {}
done = {}
for pkg in packages:
lastgroup = None
for loader in pkg.loaders:
info = loader.getInfo(pkg)
group = info.getGroup()
donetuple = (group, pkg)
if donetuple not in done:
done[donetuple] = True
if group in groups:
groups[group].append(pkg)
else:
groups[group] = [pkg]
elif tree == "channels":
groups = {}
done = {}
for pkg in packages:
for loader in pkg.loaders:
channel = loader.getChannel()
group = channel.getName() or channel.getAlias()
donetuple = (group, pkg)
if donetuple not in done:
done[donetuple] = True
if group in groups:
groups[group].append(pkg)
else:
groups[group] = [pkg]
elif tree == "channels-groups":
groups = {}
done = {}
for pkg in packages:
for loader in pkg.loaders:
channel = loader.getChannel()
group = channel.getName() or channel.getAlias()
subgroup = loader.getInfo(pkg).getGroup()
donetuple = (group, subgroup, pkg)
if donetuple not in done:
done[donetuple] = True
if group in groups:
if subgroup in groups[group]:
groups[group][subgroup].append(pkg)
else:
groups[group][subgroup] = [pkg]
else:
groups[group] = {subgroup: [pkg]}
else:
groups = packages
self._pv.setPackages(groups, changeset, keepstate=True)
self.setBusy(False)
# vim:ts=4:sw=4:et
| gpl-2.0 | -5,670,332,782,027,367,000 | 37.135006 | 88 | 0.556534 | false |
qbuat/tauperf | old/eff_tools/DecisionTool.py | 1 | 2103 | from ROOT import TMVA
from array import array
from rootpy.extern import ordereddict
import logging
log = logging.getLogger('DecisionTool')
class DecisionTool:
def __init__(self,tree,name,weight_file,var_file,cutval):
""" A class to handle the decision of the BDT"""
TMVA.Tools.Instance()
self._reader = TMVA.Reader()
self._tree = tree
self._variables = {}
self._cutvalue = -1
self._bdtscore = -9999
self._name = name
self._weight_file = weight_file
self._var_file = var_file
self.SetReader(self._name,self._weight_file,self._var_file)
self.SetCutValue(cutval)
# --------------------------
def SetCutValue(self,val):
self._cutvalue = val
# --------------------------------------------
def SetReader(self,name,weight_file,var_file):
self._variables = self.InitVariables(var_file)
for varName, var in self._variables.iteritems():
self._reader.AddVariable(varName,var[1])
self._reader.BookMVA(name,weight_file)
# ----------------------
def InitVariables(self,var_file):
variables = ordereddict.OrderedDict()
file = open(var_file,'r')
for line in file:
if "#" in line: continue
words = line.strip().split(',')
variables[ words[0] ] = [ words[1],array( 'f',[0.]) ]
return variables
# -------------------------------------------------
def BDTScore(self):
for varName, var in self._variables.iteritems():
var[1][0] = getattr(self._tree,var[0])
log.info('{0}: {1}'.format(varName, var[1][0]))
return self._reader.EvaluateMVA(self._name)
# --------------------------------------------
def Decision(self):
self._bdtscore = self.BDTScore()
if self._bdtscore>=self._cutvalue:
return True
else:
return False
# ----------------------
def GetBDTScore(self):
self._bdtscore = self.BDTScore()
return self._bdtscore
| gpl-3.0 | -3,456,458,913,760,876,500 | 30.863636 | 67 | 0.514503 | false |
bioconda/bioconda-utils | bioconda_utils/bot/chat.py | 1 | 5842 | """
Chat with the bot via Gitter
"""
import asyncio
import logging
from typing import Any, Dict, List
import aiohttp
from .. import gitter
from ..gitter import AioGitterAPI
from .commands import command_routes
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
"""
https://webhooks.gitter.im/e/b9e5fad23b9cf034879083a
POST
{ message: 'message', level='error|normal' }
"""
class GitterListener:
"""Listens to messages in a Gitter chat room
Args:
app: Web Server Application
api: Gitter API object
rooms: Map containing rooms and their respective github user/repo
"""
def __init__(self, app: aiohttp.web.Application, token: str, rooms: Dict[str, str],
session: aiohttp.ClientSession, ghappapi) -> None:
self.rooms = rooms
self._ghappapi = ghappapi
self._api = AioGitterAPI(app['client_session'], token)
self._user: gitter.User = None
self._tasks: List[Any] = []
self._session = session
app.on_startup.append(self.start)
app.on_shutdown.append(self.shutdown)
def __str__(self) -> str:
return f"{self.__class__.__name__}"
async def start(self, app: aiohttp.web.Application) -> None:
"""Start listeners"""
self._user = await self._api.get_user()
logger.debug("%s: User Info: %s", self, self._user)
for room in await self._api.list_rooms():
logger.debug("%s: Room Info: %s", self, room)
logger.debug("%s: Groups Info: %s", self, await self._api.list_groups())
self._tasks = [app.loop.create_task(self.listen(room))
for room in self.rooms]
async def shutdown(self, _app: aiohttp.web.Application) -> None:
"""Send cancel signal to listener"""
logger.info("%s: Shutting down listeners", self)
for task in self._tasks:
task.cancel()
for task in self._tasks:
await task
logger.info("%s: Shut down all listeners", self)
async def listen(self, room_name: str) -> None:
"""Main run loop"""
try:
user, repo = self.rooms[room_name].split('/')
logger.error("Listening in %s for repo %s/%s", room_name, user, repo)
message = None
while True:
try:
room = await self._api.get_room(room_name)
logger.info("%s: joining %s", self, room_name)
await self._api.join_room(self._user, room)
logger.info("%s: listening in %s", self, room_name)
async for message in self._api.iter_chat(room):
# getting a new ghapi object for every message because our
# creds time out. Ideally, the api class would take care of that.
ghapi = await self._ghappapi.get_github_api(False, user, repo)
await self.handle_msg(room, message, ghapi)
# on timeouts, we just run log into the room again
except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
pass
# http errors just get logged
except aiohttp.ClientResponseError as exc:
logger.exception("HTTP Error Code %s while listening to room %s",
exc.code, room_name)
# asyncio cancellation needs to be passed up
except asyncio.CancelledError: # pylint: disable=try-except-raise
raise
# the rest, we just log so that we remain online after an error
except Exception: # pylint: disable=broad-except
logger.exception("Unexpected exception caught. Last message: '%s'", message)
await asyncio.sleep(1)
except asyncio.CancelledError:
logger.error("%s: stopped listening in %s", self, room_name)
# we need a new session here as the one we got passed might have been
# closed already when we get cancelled
async with aiohttp.ClientSession() as session:
self._api._session = session
await self._api.leave_room(self._user, room)
logger.error("%s: left room %s", self, room_name)
async def handle_msg(self, room: gitter.Room, message: gitter.Message, ghapi) -> None:
"""Parse Gitter message and dispatch via command_routes"""
await self._api.mark_as_read(self._user, room, [message.id])
if self._user.id not in (m.userId for m in message.mentions):
if self._user.username.lower() in (m.screenName.lower() for m in message.mentions):
await self._api.send_message(room, "@%s - are you talking to me?",
message.fromUser.username)
return
command = message.text.strip().lstrip('@'+self._user.username).strip()
if command == message.text.strip():
await self._api.send_message(room, "Hmm? Someone talking about me?",
message.fromUser.username)
return
cmd, *args = command.split()
issue_number = None
try:
if args[-1][0] == '#':
issue_number = int(args[-1][1:])
args.pop()
except (ValueError, IndexError):
pass
response = await command_routes.dispatch(cmd.lower(), ghapi, issue_number,
message.fromUser.username, *args)
if response:
await self._api.send_message(room, "@%s: %s", message.fromUser.username, response)
else:
await self._api.send_message(room, "@%s: command failed", message.fromUser.username)
| mit | 7,395,101,836,971,723,000 | 40.432624 | 96 | 0.566587 | false |
reshanie/roblox.py | roblox/asset.py | 1 | 12222 | """
Copyright (c) 2017 James Patrick Dill, reshanie
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import logging
from collections import namedtuple
from json import JSONDecodeError
import faste
from . import utils, enums, errors
log = logging.getLogger("roblox")
class Asset(object):
"""Roblox Asset object.
Use :meth:`RobloxSession.get_asset` to get a specific asset.
Attributes
----------
name : str
Asset name
description : str
Asset description
id : int
Asset ID
product_id : Optional[int]
Product ID
asset_type : :class:`roblox.AssetType`
Asset type
created : :class:`datetime.datetime`
When the asset was first created
updated : :class:`datetime.datetime`
When the asset was last updated
price : Optional[int]
Price of the asset in ROBUX
sales : Optional[int]
Total sales of the asset
is_new : bool
Whether Roblox considers the asset 'new'
for_sale : bool
Whether asset can be taken/bought
public_domain : bool
If the asset is public domain / publicly viewable
limited : bool
If the asset is limited
limited_unique : bool
If the asset is limited and unique
remaining : Optional[int]
How many are remaining, if the asset is limited
membership_level: :class:`roblox.Membership`
Minimum Builders Club needed to take the asset
"""
def __init__(self, client, asset_id=0):
"""param client: client
:type client: roblox.RobloxSession
"""
self.client = client
self.id = asset_id
self._update_info()
def _update_info(self):
try:
product_info = self.client.http.product_info(self.id)
except JSONDecodeError:
raise errors.BadRequest("Invalid asset, possibly deleted")
self.product_id = product_info.get("ProductId")
self.name = product_info.get("Name")
self.description = product_info.get("Description")
self.asset_type = enums.AssetType(product_info.get("AssetTypeId"))
self.icon_image_asset_id = product_info.get("IconImageAssetId")
self.created = utils.get_datetime(product_info.get("Created"))
self.updated = utils.get_datetime(product_info.get("Updated"))
self.price = product_info.get("PriceInRobux")
self.sales = product_info.get("Sales")
self.is_new = product_info.get("IsNew")
self.for_sale = product_info.get("IsForSale")
self.public_domain = product_info.get("IsPublicDomain")
self.unique = product_info.get("IsLimitedUnique")
self.limited = product_info.get("IsLimited") or self.unique
self.remaining = product_info.get("Remaining")
self.membership_level = enums.Membership(product_info.get("MinimumMembershipLevel"))
self.creator_id = product_info["Creator"]["CreatorTargetId"]
self.creator_type = product_info["Creator"]["CreatorType"]
def __hash__(self):
return self.id
def __repr__(self):
return "<roblox.Asset {0.asset_type.name} name={0.name!r} id={0.id!r}>".format(self)
def __str__(self):
return self.name
def __eq__(self, other):
"""
Returns True if two asset objects are the same asset.
"""
if type(other) != Asset:
return False
return self.id == other.id
@property
@faste.decor.rr_cache()
def creator(self):
"""Asset creator
:returns: :class:`User` or :class:`Group`"""
if self.creator_type == "User":
return self.client.get_user(user_id=self.creator_id)
else:
return self.client.get_group(self.creator_id)
def buy(self):
"""
Takes/buys asset.
:returns: `True` if successful
"""
return self.client.http.buy_product(self.product_id,
self.price,
self.creator_id)
def remove_from_inventory(self):
"""
Deletes asset from inventory of client user.
:returns: `True` if successful
"""
return self.client.http.delete_from_inventory(self.id)
def post_comment(self, content):
"""
Posts comment on asset
:param str content: Comment text
:return: :class:`Comment`
"""
if not content:
raise errors.BadRequest("Comment must have text.")
comment = self.client.http.post_comment(self.id, content)
return Comment(self, content=comment["Text"], created=comment["PostedDate"], author=self.client.me)
def owned_by(self, user):
"""
Checks if asset is owned by user.
:param user: User
:type user: :class:`User`
:returns: `True` if user owns asset
"""
return self.client.http.user_owns_asset(user.id, self.id)
@property
@faste.decor.rr_cache()
def icon(self):
"""Asset for icon
:returns: Optional[:class:`Asset`]"""
if self.icon_image_asset_id == 0:
return None
return self.client.get_asset(self.icon_image_asset_id)
@property
def favorites(self):
"""Favorite count of asset
:returns: int"""
return self.client.http.asset_favorites(self.id)
def is_favorited(self):
"""Whether asset is favorited by client
:returns: bool"""
return self.client.http.is_favorited(self.id)
def favorite(self):
"""Favorites asset if it isn't favorited already.
:returns: return value of :meth:`is_favorited` (bool)"""
if self.is_favorited():
return True
return self.client.http.toggle_favorite(self.id)
def unfavorite(self):
"""Unfavorites asset if it's favorited.
:returns: return value of :meth:`is_favorited` (bool)"""
if not self.is_favorited():
return False
return not self.client.http.toggle_favorite(self.id)
def recent_average_price(self):
"""Gets RAP of asset, if it is a collectible.
:returns: Optional[`int`]"""
return self.client.http.get_sales_data(self.id).get("AveragePrice")
def RAP(self):
"""Alias for :meth:recent_average_pice"""
return self.recent_average_price()
def sales_chart(self):
"""Gets :class:`SalesChart` for asset, if it's a collectible."""
return SalesChart(self.client, self)
class Game(Asset):
pass
sales_point = namedtuple("sales_day", "date price volume")
class SalesChart(object):
"""Asset sales chart, representing user sales of a collectible.
You can also iterate over this object, and index it. ``SalesChart[0]`` will return the first sales point.
You can also use ::
>>> list(chart)
>>> reversed(chart)
>>> dict(chart)
>>> len(chart)
>>> datetime.date in chart
The dict version and list versions' values are namedtuples representing sales points, with ``sales_point.date``
, ``sales_point.price`` , and ``sales_point.volume``
The dict's keys are :class:`datetime.date`
Attributes
----------
asset : :class:`Asset`
Asset the sales chart belongs to
chart_dict : dict
dict version of the sales chart
"""
def __init__(self, client, asset):
self.client = client
self.asset = asset
self.chart_dict = self._chart_dict()
def _chart_dict(self):
sales_data = self.client.http.get_sales_data(self.asset.id)
if not sales_data:
raise ValueError("{!r} isn't a collectible and has no sales data".format(self.asset))
sales_chart = sales_data.get("HundredEightyDaySalesChart").split("|")
volume_chart = sales_data.get("HundredEightyDayVolumeChart").split("|")
sales_chart_dict = {}
for sale in sales_chart:
ts = sale.split(",")
if not ts[0]:
break
k = int(ts[0][:-3])
sales_chart_dict[k] = int(ts[1])
volume_chart_dict = {}
for vol in volume_chart:
tv = vol.split(",")
if not tv[0]:
break
k = int(tv[0][:-3])
volume_chart_dict[k] = int(tv[1])
rtd = {}
for timestamp in sales_chart_dict:
nts = datetime.date.fromtimestamp(timestamp)
rtd[nts] = sales_point(
date=nts,
price=sales_chart_dict.get(timestamp),
volume=volume_chart_dict.get(timestamp) or 0,
)
return rtd
def __dict__(self):
return self.chart_dict
def __iter__(self):
return list(self.chart_dict.values())
def __getitem__(self, index):
return list(self.chart_dict.values())[index]
def __len__(self):
return len(self.chart_dict)
def __reversed__(self):
return reversed(list(self.chart_dict.values()))
def __contains__(self, item):
if isinstance(item, datetime.date):
return item in self.chart_dict.keys()
elif isinstance(item, sales_point):
return item in self.chart_dict.values()
return False
def __repr__(self):
return "<roblox.SalesChart asset={0.asset.name!r}>".format(self)
class Comment(object):
"""Asset comment.
Attributes
----------
asset : :class:`Asset`
Asset the comment belongs to
content : str
Comment content
created : :class:`datetime.datetime`
When the comment was posted"""
__slots__ = ["asset", "content", "created", "_user", "_user_cache"]
def __init__(self, asset, content=None, created=None, author=None):
"""
:type asset: :class:`Asset`
"""
self.asset = asset
self.content = content
self.created = utils.get_datetime(created) if created else None
self._user = author
self._user_cache = None
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.asset == other.asset and self.created == other.created and self.author == other.author
def __repr__(self):
return "<roblox.Comment asset={0.asset.name!r} author={0.author!r} created={0.created!r}>".format(self)
def __str__(self):
return self.content
@property
@faste.decor.rr_cache()
def author(self):
"""User who made the post.
:returns: :class:`User`"""
if type(self._user) == int:
return self.asset.client.get_user(user_id=self._user)
elif type(self._user) == str:
return self.asset.client.get_user(username=self._user)
return self._user
| mit | -1,756,698,657,007,532,500 | 28.708543 | 115 | 0.58722 | false |
jcsp/manila | manila/tests/scheduler/fakes.py | 1 | 12546 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For Scheduler tests.
"""
from oslo_utils import timeutils
import six
from manila.scheduler import filter_scheduler
from manila.scheduler import host_manager
SHARE_SERVICES_NO_POOLS = [
dict(id=1, host='host1', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=2, host='host2@back1', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=3, host='host2@back2', topic='share', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow()),
]
SERVICE_STATES_NO_POOLS = {
'host1': dict(share_backend_name='AAA',
total_capacity_gb=512, free_capacity_gb=200,
timestamp=None, reserved_percentage=0,
provisioned_capacity_gb=312,
max_over_subscription_ratio=1.0,
thin_provisioning=False,
driver_handles_share_servers=False),
'host2@back1': dict(share_backend_name='BBB',
total_capacity_gb=256, free_capacity_gb=100,
timestamp=None, reserved_percentage=0,
provisioned_capacity_gb=400,
max_over_subscription_ratio=2.0,
thin_provisioning=True,
driver_handles_share_servers=False),
'host2@back2': dict(share_backend_name='CCC',
total_capacity_gb=10000, free_capacity_gb=700,
timestamp=None, reserved_percentage=0,
provisioned_capacity_gb=50000,
max_over_subscription_ratio=20.0,
thin_provisioning=True,
driver_handles_share_servers=False),
}
SHARE_SERVICES_WITH_POOLS = [
dict(id=1, host='host1@AAA', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=2, host='host2@BBB', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=3, host='host3@CCC', topic='share', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow()),
dict(id=4, host='host4@DDD', topic='share', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
# service on host5 is disabled
dict(id=5, host='host5@EEE', topic='share', disabled=True,
availability_zone='zone4', updated_at=timeutils.utcnow()),
dict(id=5, host='host6@FFF', topic='share', disabled=True,
availability_zone='zone5', updated_at=timeutils.utcnow()),
]
SHARE_SERVICE_STATES_WITH_POOLS = {
'host1@AAA': dict(share_backend_name='AAA',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
snapshot_support=True,
pools=[dict(pool_name='pool1',
total_capacity_gb=51,
free_capacity_gb=41,
reserved_percentage=0,
provisioned_capacity_gb=10,
max_over_subscription_ratio=1.0,
thin_provisioning=False)]),
'host2@BBB': dict(share_backend_name='BBB',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
snapshot_support=True,
pools=[dict(pool_name='pool2',
total_capacity_gb=52,
free_capacity_gb=42,
reserved_percentage=0,
provisioned_capacity_gb=60,
max_over_subscription_ratio=2.0,
thin_provisioning=True)]),
'host3@CCC': dict(share_backend_name='CCC',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
snapshot_support=True,
pools=[dict(pool_name='pool3',
total_capacity_gb=53,
free_capacity_gb=43,
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=20.0,
thin_provisioning=True,
consistency_group_support='pool')]),
'host4@DDD': dict(share_backend_name='DDD',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
snapshot_support=True,
pools=[dict(pool_name='pool4a',
total_capacity_gb=541,
free_capacity_gb=441,
reserved_percentage=0,
provisioned_capacity_gb=800,
max_over_subscription_ratio=2.0,
thin_provisioning=True,
consistency_group_support='host'),
dict(pool_name='pool4b',
total_capacity_gb=542,
free_capacity_gb=442,
reserved_percentage=0,
provisioned_capacity_gb=2000,
max_over_subscription_ratio=10.0,
thin_provisioning=True,
consistency_group_support='host')]),
'host5@EEE': dict(share_backend_name='EEE',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
snapshot_support=True,
pools=[dict(pool_name='pool5a',
total_capacity_gb=551,
free_capacity_gb=451,
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False),
dict(pool_name='pool5b',
total_capacity_gb=552,
free_capacity_gb=452,
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False)]),
'host6@FFF': dict(share_backend_name='FFF',
timestamp=None, reserved_percentage=0,
driver_handles_share_servers=False,
pools=[dict(pool_name='pool6a',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False),
dict(pool_name='pool6b',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0,
provisioned_capacity_gb=100,
max_over_subscription_ratio=1.0,
thin_provisioning=False)]),
}
class FakeFilterScheduler(filter_scheduler.FilterScheduler):
def __init__(self, *args, **kwargs):
super(FakeFilterScheduler, self).__init__(*args, **kwargs)
self.host_manager = host_manager.HostManager()
class FakeHostManager(host_manager.HostManager):
def __init__(self):
super(FakeHostManager, self).__init__()
self.service_states = {
'host1': {'total_capacity_gb': 1024,
'free_capacity_gb': 1024,
'allocated_capacity_gb': 0,
'thin_provisioning': False,
'reserved_percentage': 10,
'timestamp': None},
'host2': {'total_capacity_gb': 2048,
'free_capacity_gb': 300,
'allocated_capacity_gb': 1748,
'provisioned_capacity_gb': 1748,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': True,
'reserved_percentage': 10,
'timestamp': None},
'host3': {'total_capacity_gb': 512,
'free_capacity_gb': 256,
'allocated_capacity_gb': 256,
'provisioned_capacity_gb': 256,
'max_over_subscription_ratio': 2.0,
'thin_provisioning': False,
'consistency_group_support': 'host',
'reserved_percentage': 0,
'timestamp': None},
'host4': {'total_capacity_gb': 2048,
'free_capacity_gb': 200,
'allocated_capacity_gb': 1848,
'provisioned_capacity_gb': 1848,
'max_over_subscription_ratio': 1.0,
'thin_provisioning': True,
'reserved_percentage': 5,
'timestamp': None},
'host5': {'total_capacity_gb': 2048,
'free_capacity_gb': 500,
'allocated_capacity_gb': 1548,
'provisioned_capacity_gb': 1548,
'max_over_subscription_ratio': 1.5,
'thin_provisioning': True,
'reserved_percentage': 5,
'timestamp': None,
'consistency_group_support': 'pool'},
'host6': {'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'allocated_capacity_gb': 1548,
'thin_provisioning': False,
'reserved_percentage': 5,
'timestamp': None},
}
class FakeHostState(host_manager.HostState):
def __init__(self, host, attribute_dict):
super(FakeHostState, self).__init__(host)
for (key, val) in six.iteritems(attribute_dict):
setattr(self, key, val)
def mock_host_manager_db_calls(mock_obj, disabled=None):
services = [
dict(id=1, host='host1', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=2, host='host2', topic='share', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=3, host='host3', topic='share', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow()),
dict(id=4, host='host4', topic='share', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
dict(id=5, host='host5', topic='share', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
dict(id=6, host='host6', topic='share', disabled=False,
availability_zone='zone4', updated_at=timeutils.utcnow()),
]
if disabled is None:
mock_obj.return_value = services
else:
mock_obj.return_value = [service for service in services
if service['disabled'] == disabled]
| apache-2.0 | 4,470,534,704,318,130,700 | 48.588933 | 78 | 0.492428 | false |
corredD/upy | transformation.py | 1 | 68489 | # -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006-2013, Christoph Gohlke
# Copyright (c) 2006-2013, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2013.06.29
Requirements
------------
* `CPython 2.7 or 3.3 <http://www.python.org>`_
* `Numpy 1.7 <http://www.numpy.org>`_
* `Transformations.c 2013.01.18 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for speedup of some functions)
Notes
-----
The API is not stable yet and is expected to change between revisions.
This Python code is not optimized for speed. Refer to the transformations.c
module for a faster implementation of some functions.
Documentation in HTML format can be generated with epydoc.
Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
numpy.dot(M, v) for shape (4, \*) column vectors, respectively
numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
This module follows the "column vectors on the right" and "row major storage"
(C contiguous) conventions. The translation components are in the right column
of the transformation matrix, i.e. M[:3, 3].
The transpose of the transformation matrices may have to be used to interface
with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
Calculations are carried out with numpy.float64 precision.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions w+ix+jy+kz are represented as [w, x, y, z].
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
(13) Quaternion in molecular modeling. CFF Karney.
J Mol Graph Mod, 25(5):595-604
(14) New method for extracting the quaternion from a rotation matrix.
Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
(15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
(16) Column Vectors vs. Row Vectors.
http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix([1, 2, 3])
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, [1, 2, 3])
True
>>> numpy.allclose(shear, [0, math.tan(beta), 0])
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
>>> v0, v1 = random_vector(3), random_vector(3)
>>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
>>> v2 = numpy.dot(v0, M[:3,:3].T)
>>> numpy.allclose(unit_vector(v1), unit_vector(v2))
True
"""
from __future__ import division, print_function
import math
import numpy
__version__ = '2013.06.29'
__docformat__ = 'restructuredtext en'
__all__ = []
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
w, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
>>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
True
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.diag([cosa, cosa, cosa])
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array([[ 0.0, -direction[2], direction[1]],
[ direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v[3] = 1
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.diag([factor, factor, factor, 1.0])
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix([0, 0, 0], [1, 0, 0])
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
w, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustum.
The frustum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustum.
If perspective is True the frustum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (divided by w coordinate).
>>> frustum = numpy.random.rand(6)
>>> frustum[1] += frustum[0]
>>> frustum[3] += frustum[2]
>>> frustum[5] += frustum[4]
>>> M = clip_matrix(perspective=False, *frustum)
>>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(perspective=True, *frustum)
>>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustum: near <= 0")
t = 2.0 * near
M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
[0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
[0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
[0.0, 0.0, -1.0, 0.0]]
else:
M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
[0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
[0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
[0.0, 0.0, 0.0, 1.0]]
return numpy.array(M)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("no two linear independent eigenvectors found %s" % w)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
w = vector_norm(n)
if w > lenorm:
lenorm = w
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix([1, 2, 3])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0.0, 0.0, 0.0, 1.0
if not numpy.linalg.det(P):
raise ValueError("matrix is singular")
scale = numpy.zeros((3, ))
shear = [0.0, 0.0, 0.0]
angles = [0.0, 0.0, 0.0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0.0, 0.0, 0.0, 1.0
else:
perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
translate = M[3, :3].copy()
M[3, :3] = 0.0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
numpy.negative(scale, scale)
numpy.negative(row, row)
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array([
[ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
[-a*sinb*co, b*sina, 0.0, 0.0],
[ a*cosb, b*cosa, c, 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
"""Return affine transform matrix to register two point sets.
v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
coordinates, where ndims is the dimensionality of the coordinate space.
If shear is False, a similarity transformation matrix is returned.
If also scale is False, a rigid/Euclidean transformation matrix
is returned.
By default the algorithm by Hartley and Zissermann [15] is used.
If usesvd is True, similarity and Euclidean transformation matrices
are calculated by minimizing the weighted sum of squared deviations
(RMSD) according to the algorithm by Kabsch [8].
Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
is used, which is slower when using this Python implementation.
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
>>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
>>> affine_matrix_from_points(v0, v1)
array([[ 0.14549, 0.00062, 675.50008],
[ 0.00048, 0.14094, 53.24971],
[ 0. , 0. , 1. ]])
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> S = scale_matrix(random.random())
>>> M = concatenate_matrices(T, R, S)
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
>>> M = affine_matrix_from_points(v0[:3], v1[:3])
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
More examples in superimposition_matrix()
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
ndims = v0.shape[0]
if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
raise ValueError("input arrays are of wrong shape or type")
# move centroids to origin
t0 = -numpy.mean(v0, axis=1)
M0 = numpy.identity(ndims+1)
M0[:ndims, ndims] = t0
v0 += t0.reshape(ndims, 1)
t1 = -numpy.mean(v1, axis=1)
M1 = numpy.identity(ndims+1)
M1[:ndims, ndims] = t1
v1 += t1.reshape(ndims, 1)
if shear:
# Affine transformation
A = numpy.concatenate((v0, v1), axis=0)
u, s, vh = numpy.linalg.svd(A.T)
vh = vh[:ndims].T
B = vh[:ndims]
C = vh[ndims:2*ndims]
t = numpy.dot(C, numpy.linalg.pinv(B))
t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
elif usesvd or ndims != 3:
# Rigid transformation via SVD of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(ndims+1)
M[:ndims, :ndims] = R
else:
# Rigid transformation matrix via quaternion
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = [[xx+yy+zz, 0.0, 0.0, 0.0],
[yz-zy, xx-yy-zz, 0.0, 0.0],
[zx-xz, xy+yx, yy-xx-zz, 0.0],
[xy-yx, zx+xz, yz+zy, zz-xx-yy]]
# quaternion: eigenvector corresponding to most positive eigenvalue
w, V = numpy.linalg.eigh(N)
q = V[:, numpy.argmax(w)]
q /= vector_norm(q) # unit quaternion
# homogeneous transformation matrix
M = quaternion_matrix(q)
if scale and not shear:
# Affine transformation; scale is ratio of RMS deviations from centroid
v0 *= v0
v1 *= v1
M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# move centroids back
M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
M /= M[ndims, ndims]
return M
def superimposition_matrix(v0, v1, scale=False, usesvd=True):
"""Return matrix to transform given 3D point set into second point set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
The parameters scale and usesvd are explained in the more general
affine_matrix_from_points function.
The returned matrix is a similarity or Euclidean transformation matrix.
This function has a fast C implementation in transformations.c.
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
>>> v0[3] = 1
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scale=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3))
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
return affine_matrix_from_points(v0, v1, shear=False,
scale=scale, usesvd=usesvd)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print(axes, "failed")
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_TUPLE2AXES[axes] # validation
firstaxis, parity, repetition, frame = axes
i = firstaxis + 1
j = _NEXT_AXIS[i+parity-1] + 1
k = _NEXT_AXIS[i-parity] + 1
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
q = numpy.empty((4, ))
if repetition:
q[0] = cj*(cc - ss)
q[i] = cj*(cs + sc)
q[j] = sj*(cc + ss)
q[k] = sj*(cs - sc)
else:
q[0] = cj*cc + sj*ss
q[i] = cj*sc - sj*cs
q[j] = cj*ss + sj*cc
q[k] = cj*cs - sj*sc
if parity:
q[j] *= -1.0
return q
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, [1, 0, 0])
>>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
True
"""
q = numpy.array([0.0, axis[0], axis[1], axis[2]])
qlen = vector_norm(q)
if qlen > _EPS:
q *= math.sin(angle/2.0) / qlen
q[0] = math.cos(angle/2.0)
return q
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
>>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
True
>>> M = quaternion_matrix([1, 0, 0, 0])
>>> numpy.allclose(M, numpy.identity(4))
True
>>> M = quaternion_matrix([0, 1, 0, 0])
>>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
n = numpy.dot(q, q)
if n < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / n)
q = numpy.outer(q, q)
return numpy.array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
[ 0.0, 0.0, 0.0, 1.0]])
def quaternion_from_matrix(matrix, isprecise=False):
"""Return quaternion from rotation matrix.
If isprecise is True, the input matrix is assumed to be a precise rotation
matrix and a faster algorithm is used.
>>> q = quaternion_from_matrix(numpy.identity(4), True)
>>> numpy.allclose(q, [1, 0, 0, 0])
True
>>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
>>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
True
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R, True)
>>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
True
>>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
True
>>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
True
>>> R = random_rotation_matrix()
>>> q = quaternion_from_matrix(R)
>>> is_same_transform(R, quaternion_matrix(q))
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
if isprecise:
q = numpy.empty((4, ))
t = numpy.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = numpy.linalg.eigh(K)
q = V[[3, 0, 1, 2], numpy.argmax(w)]
if q[0] < 0.0:
numpy.negative(q, q)
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
>>> numpy.allclose(q, [28, -44, -14, 48])
True
"""
w0, x0, y0, z0 = quaternion0
w1, x1, y1, z1 = quaternion1
return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0,
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
True
"""
q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
numpy.negative(q[1:], q[1:])
return q / numpy.dot(q, q)
def quaternion_real(quaternion):
"""Return real part of quaternion.
>>> quaternion_real([3, 0, 1, 2])
3.0
"""
return float(quaternion[0])
def quaternion_imag(quaternion):
"""Return imaginary part of quaternion.
>>> quaternion_imag([3, 0, 1, 2])
array([ 0., 1., 2.])
"""
return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
numpy.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> len(q.shape), q.shape[0]==4
(1, True)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
numpy.cos(t1)*r1, numpy.sin(t2)*r2])
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rand: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[1, 0, 0, 0])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1, 1, 0], [-1, 1, 0])
>>> ball.constrain = True
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0.0, 0.0, 1.0])
self._constrain = False
if initial is None:
self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
@property
def constrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
@constrain.setter
def constrain(self, value):
"""Set state of constrain to axis mode."""
self._constrain = bool(value)
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v0 = (point[0] - center[0]) / radius
v1 = (center[1] - point[1]) / radius
n = v0*v0 + v1*v1
if n > 1.0:
# position outside of sphere
n = math.sqrt(n)
return numpy.array([v0/n, v1/n, 0.0])
else:
return numpy.array([v0, v1, math.sqrt(1.0 - n)])
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
numpy.negative(v, v)
v /= n
return v
if a[2] == 1.0:
return numpy.array([1.0, 0.0, 0.0])
return unit_vector([-a[1], a[0], 0.0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3))
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0) and numpy.all(v < 1)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def vector_product(v0, v1, axis=0):
"""Return vector perpendicular to vectors.
>>> v = vector_product([2, 0, 0], [0, 3, 0])
>>> numpy.allclose(v, [0, 0, 6])
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> v = vector_product(v0, v1)
>>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> v = vector_product(v0, v1, axis=1)
>>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
True
"""
return numpy.cross(v0, v1, axis=axis)
def angle_between_vectors(v0, v1, directed=True, axis=0):
"""Return angle between vectors.
If directed is False, the input vectors are interpreted as undirected axes,
i.e. the maximum angle is pi/2.
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
>>> numpy.allclose(a, math.pi)
True
>>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
>>> numpy.allclose(a, 0)
True
>>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
>>> v1 = [[3], [0], [0]]
>>> a = angle_between_vectors(v0, v1)
>>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
True
>>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
>>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
>>> a = angle_between_vectors(v0, v1, axis=1)
>>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
dot = numpy.sum(v0 * v1, axis=axis)
dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
return numpy.arccos(dot if directed else numpy.fabs(dot))
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def matrixToEuler(mat):
"""
code from 'http://www.euclideanspace.com/maths/geometry/rotations/conversions/'
notes : this conversion uses conventions as described on page:
'http://www.euclideanspace.com/maths/geometry/rotations/euler/index.htm'
Coordinate System: right hand
Positive angle: right hand
Order of euler angles: heading first, then attitude, then bank
matrix row column ordering:
[m00 m01 m02]
[m10 m11 m12]
[m20 m21 m22]
@type mat: 4x4array
@param mat: the matrix to convert in euler angle (heading,attitude,bank)
@rtype: 3d array
@return: the computed euler angle from the matrice
"""
#Assuming the angles are in radians.
#3,3 matrix m[0:3,0:3]
#return heading,attitude,bank Y,Z,X
import math
if (mat[1][0] > 0.998) : # singularity at north pole
heading = math.atan2(mat[0][2],mat[2][2])
attitude = math.pi/2.
bank = 0
return (heading,attitude,bank)
if (mat[1][0] < -0.998) : # singularity at south pole
heading = math.atan2(mat[0][2],mat[2][2])
attitude = -math.pi/2.
bank = 0
return (heading,attitude,bank)
heading = math.atan2(-mat[2][0],mat[0][0])
bank = math.atan2(-mat[1][2],mat[1][1])
attitude = math.asin(mat[1][0])
if mat[0][0] < 0 :
if (attitude < 0.) and (math.degrees(attitude) > -90.):
attitude = -math.pi-attitude
elif (attitude > 0.) and (math.degrees(attitude) < 90.):
attitude = math.pi-attitude
return (heading,attitude,bank)
def unbiasedRotationXYZ(ex,ey,ez):
M=numpy.identity(3)
e=math.sqrt((ex*ex)+(ey*ey)+(ez*ez))
e2=(ex*ex)+(ey*ey)+(ez*ez)
M[0,0] = M11 = ((ey*ey)+(ez*ez)*math.cos(e)+(ex*ex))/(e*e)
M[0,1] = M12 = ((ex*ey)/(e*e))*(1-math.cos(e))-(ez/e)*math.sin(e)
M[0,2] = M13 = ((ex*ez)/(e*e))*(1-math.cos(e))-(ey/e)*math.sin(e)
M[1,0] = M21 = ((ex*ey)/(e*e))*(1-math.cos(e))+(ez/e)*math.sin(e)
M[1,1] = M22 = ((ex*ex)+(ez*ez)*math.cos(e)+(ey*ey))/(e*e)
M[1,2] = M23 = ((ey*ez)/(e*e))*(1-math.cos(e))-(ex/e)*math.sin(e)
M[2,0] = M31 = ((ex*ez)/(e*e))*(1-math.cos(e))-(ey/e)*math.sin(e)
M[2,1] = M32 = ((ey*ez)/(e*e))*(1-math.cos(e))+(ex/e)*math.sin(e)
M[2,2] = M33 = ((ex*ex)+(ey*ey)*math.cos(e)+(ez*ez))/(e*e)
return M
def ApplyMatrix(coords,mat):
"""
Apply the 4x4 transformation matrix to the given list of 3d points.
@type coords: array
@param coords: the list of point to transform.
@type mat: 4x4array
@param mat: the matrix to apply to the 3d points
@rtype: array
@return: the transformed list of 3d points
"""
#4x4matrix"
mat = numpy.array(mat)
coords = numpy.array(coords)
one = numpy.ones( (coords.shape[0], 1), coords.dtype.char )
c = numpy.concatenate( (coords, one), 1 )
return numpy.dot(c, numpy.transpose(mat))[:, :3]
#def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
# """Try import all public attributes from module into global namespace.
#
# Existing attributes with name clashes are renamed with prefix.
# Attributes starting with underscore are ignored by default.
#
# Return True on successful import.
#
# """
# import warnings
# from importlib import import_module
# try:
# if not package:
# module = import_module(name)
# else:
# module = import_module('.' + name, package=package)
# except ImportError:
# if warn:
# warnings.warn("failed to import module %s" % name)
# else:
# for attr in dir(module):
# if ignore and attr.startswith(ignore):
# continue
# if prefix:
# if attr in globals():
# globals()[prefix + attr] = globals()[attr]
# elif warn:
# warnings.warn("no Python implementation of " + attr)
# globals()[attr] = getattr(module, attr)
# return True
#
#
#_import_module('_transformations')
#
#if __name__ == "__main__":
# import doctest
# import random # used in doctests
# numpy.set_printoptions(suppress=True, precision=5)
# doctest.testmod() | gpl-3.0 | -2,078,898,188,954,768,100 | 33.503778 | 83 | 0.578706 | false |
bepress/xavier | xavier/taskqueue.py | 1 | 2517 | """
Offline Manager for Xavier
"""
import logging
import jsonpickle
logger = logging.getLogger(__name__)
class Task(object):
def __init__(self, func):
self.func = func
self.path = '%s.%s' % (func.__name__, func.__module__)
self.publish_event = None
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def delay(self, *args, **kwargs):
event = jsonpickle.dumps((self.path, args, kwargs))
if not self.publish_event:
logger.error("This task has not yet been registered with a task queue")
return False
self.publish_event(event)
return True
def register_with_queue(self, publish_event):
self.publish_event = publish_event
def __repr__(self):
return self.__unicode__()
def __unicode__(self):
return "BackgroundTask(path='{}')".format(self.path)
class TaskQueue(object):
def __init__(self, publish_event):
self.functions = {}
self.publish_event = publish_event
self.schedules = {}
def process_event(self, event):
name, args, kwargs = jsonpickle.loads(event)
func = self.functions.get(name)
if not func:
logger.info("processing event - missing function name: %s", name)
raise Exception("Missing function")
try:
func(*args, **kwargs)
except Exception as e:
return False
return True
def process_schedule(self, schedule):
if schedule not in self.schedules:
logger.info("Trying to process schedule for unknown schedule: %s", schedule)
return
scheduled_functions = self.schedules[schedule]
logger.info("Running schedule %s registered functions: %s", schedule, scheduled_functions)
for func in scheduled_functions:
func.delay()
def register_task(self, task, schedules):
self.functions[task.path] = task
for schedule in schedules:
if schedule not in self.schedules:
self.schedules[schedule] = []
if task.path not in self.schedules[schedule]:
self.schedules[schedule].append(task)
task.register_with_queue(self.publish_event)
def task(self, schedules=None):
schedules = schedules if schedules else []
def wrapper(func):
func = Task(func)
self.register_task(func, schedules)
return func
return wrapper
| mit | -8,391,339,796,450,315,000 | 26.064516 | 98 | 0.594358 | false |
zestrada/nova-cs498cc | nova/cells/manager.py | 1 | 15722 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Service Manager
"""
import datetime
import time
from oslo.config import cfg
from nova.cells import messaging
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova import manager
from nova.openstack.common import importutils
from nova.openstack.common import timeutils
cell_manager_opts = [
cfg.StrOpt('driver',
default='nova.cells.rpc_driver.CellsRPCDriver',
help='Cells communication driver to use'),
cfg.IntOpt("instance_updated_at_threshold",
default=3600,
help="Number of seconds after an instance was updated "
"or deleted to continue to update cells"),
cfg.IntOpt("instance_update_num_instances",
default=1,
help="Number of instances to update per periodic task run")
]
CONF = cfg.CONF
CONF.register_opts(cell_manager_opts, group='cells')
class CellsManager(manager.Manager):
"""The nova-cells manager class. This class defines RPC
methods that the local cell may call. This class is NOT used for
messages coming from other cells. That communication is
driver-specific.
Communication to other cells happens via the messaging module. The
MessageRunner from that module will handle routing the message to
the correct cell via the communications driver. Most methods below
create 'targeted' (where we want to route a message to a specific cell)
or 'broadcast' (where we want a message to go to multiple cells)
messages.
Scheduling requests get passed to the scheduler class.
"""
RPC_API_VERSION = '1.6'
def __init__(self, *args, **kwargs):
# Mostly for tests.
cell_state_manager = kwargs.pop('cell_state_manager', None)
super(CellsManager, self).__init__(*args, **kwargs)
if cell_state_manager is None:
cell_state_manager = cells_state.CellStateManager
self.state_manager = cell_state_manager()
self.msg_runner = messaging.MessageRunner(self.state_manager)
cells_driver_cls = importutils.import_class(
CONF.cells.driver)
self.driver = cells_driver_cls()
self.instances_to_heal = iter([])
def post_start_hook(self):
"""Have the driver start its consumers for inter-cell communication.
Also ask our child cells for their capacities and capabilities so
we get them more quickly than just waiting for the next periodic
update. Receiving the updates from the children will cause us to
update our parents. If we don't have any children, just update
our parents immediately.
"""
# FIXME(comstud): There's currently no hooks when services are
# stopping, so we have no way to stop consumers cleanly.
self.driver.start_consumers(self.msg_runner)
ctxt = context.get_admin_context()
if self.state_manager.get_child_cells():
self.msg_runner.ask_children_for_capabilities(ctxt)
self.msg_runner.ask_children_for_capacities(ctxt)
else:
self._update_our_parents(ctxt)
@manager.periodic_task
def _update_our_parents(self, ctxt):
"""Update our parent cells with our capabilities and capacity
if we're at the bottom of the tree.
"""
self.msg_runner.tell_parents_our_capabilities(ctxt)
self.msg_runner.tell_parents_our_capacities(ctxt)
@manager.periodic_task
def _heal_instances(self, ctxt):
"""Periodic task to send updates for a number of instances to
parent cells.
On every run of the periodic task, we will attempt to sync
'CONF.cells.instance_update_num_instances' number of instances.
When we get the list of instances, we shuffle them so that multiple
nova-cells services aren't attempting to sync the same instances
in lockstep.
If CONF.cells.instance_update_at_threshold is set, only attempt
to sync instances that have been updated recently. The CONF
setting defines the maximum number of seconds old the updated_at
can be. Ie, a threshold of 3600 means to only update instances
that have modified in the last hour.
"""
if not self.state_manager.get_parent_cells():
# No need to sync up if we have no parents.
return
info = {'updated_list': False}
def _next_instance():
try:
instance = self.instances_to_heal.next()
except StopIteration:
if info['updated_list']:
return
threshold = CONF.cells.instance_updated_at_threshold
updated_since = None
if threshold > 0:
updated_since = timeutils.utcnow() - datetime.timedelta(
seconds=threshold)
self.instances_to_heal = cells_utils.get_instances_to_sync(
ctxt, updated_since=updated_since, shuffle=True,
uuids_only=True)
info['updated_list'] = True
try:
instance = self.instances_to_heal.next()
except StopIteration:
return
return instance
rd_context = ctxt.elevated(read_deleted='yes')
for i in xrange(CONF.cells.instance_update_num_instances):
while True:
# Yield to other greenthreads
time.sleep(0)
instance_uuid = _next_instance()
if not instance_uuid:
return
try:
instance = self.db.instance_get_by_uuid(rd_context,
instance_uuid)
except exception.InstanceNotFound:
continue
self._sync_instance(ctxt, instance)
break
def _sync_instance(self, ctxt, instance):
"""Broadcast an instance_update or instance_destroy message up to
parent cells.
"""
if instance['deleted']:
self.instance_destroy_at_top(ctxt, instance)
else:
self.instance_update_at_top(ctxt, instance)
def schedule_run_instance(self, ctxt, host_sched_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s)
and forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(ctxt, our_cell,
host_sched_kwargs)
def get_cell_info_for_neighbors(self, _ctxt):
"""Return cell information for our neighbor cells."""
return self.state_manager.get_cell_info_for_neighbors()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
response = self.msg_runner.run_compute_api_method(ctxt,
cell_name,
method_info,
call)
if call:
return response.value_or_raise()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
self.msg_runner.instance_update_at_top(ctxt, instance)
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
self.msg_runner.instance_destroy_at_top(ctxt, instance)
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
self.msg_runner.instance_delete_everywhere(ctxt, instance,
delete_type)
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
self.msg_runner.instance_fault_create_at_top(ctxt, instance_fault)
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
self.msg_runner.bw_usage_update_at_top(ctxt, bw_update_info)
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
self.msg_runner.sync_instances(ctxt, project_id, updated_since,
deleted)
def service_get_all(self, ctxt, filters):
"""Return services in this cell and in all child cells."""
responses = self.msg_runner.service_get_all(ctxt, filters)
ret_services = []
# 1 response per cell. Each response is a list of services.
for response in responses:
services = response.value_or_raise()
for service in services:
cells_utils.add_cell_to_service(service, response.cell_name)
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, ctxt, host_name):
"""Return a service entry for a compute host in a certain cell."""
cell_name, host_name = cells_utils.split_cell_and_item(host_name)
response = self.msg_runner.service_get_by_compute_host(ctxt,
cell_name,
host_name)
service = response.value_or_raise()
cells_utils.add_cell_to_service(service, response.cell_name)
return service
def proxy_rpc_to_manager(self, ctxt, topic, rpc_message, call, timeout):
"""Proxy an RPC message as-is to a manager."""
compute_topic = CONF.compute_topic
cell_and_host = topic[len(compute_topic) + 1:]
cell_name, host_name = cells_utils.split_cell_and_item(cell_and_host)
response = self.msg_runner.proxy_rpc_to_manager(ctxt, cell_name,
host_name, topic, rpc_message, call, timeout)
return response.value_or_raise()
def task_log_get_all(self, ctxt, task_name, period_beginning,
period_ending, host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'host' is not None, host will be of the format 'cell!name@host',
with '@host' being optional. The query will be directed to the
appropriate cell and return all task logs, or task logs matching
the host if specified.
'state' also may be None. If it's not, filter by the state as well.
"""
if host is None:
cell_name = None
else:
cell_name, host = cells_utils.split_cell_and_item(host)
# If no cell name was given, assume that the host name is the
# cell_name and that the target is all hosts
if cell_name is None:
cell_name, host = host, cell_name
responses = self.msg_runner.task_log_get_all(ctxt, cell_name,
task_name, period_beginning, period_ending,
host=host, state=state)
# 1 response per cell. Each response is a list of task log
# entries.
ret_task_logs = []
for response in responses:
task_logs = response.value_or_raise()
for task_log in task_logs:
cells_utils.add_cell_to_task_log(task_log,
response.cell_name)
ret_task_logs.append(task_log)
return ret_task_logs
def compute_node_get(self, ctxt, compute_id):
"""Get a compute node by ID in a specific cell."""
cell_name, compute_id = cells_utils.split_cell_and_item(
compute_id)
response = self.msg_runner.compute_node_get(ctxt, cell_name,
compute_id)
node = response.value_or_raise()
cells_utils.add_cell_to_compute_node(node, cell_name)
return node
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all cells."""
responses = self.msg_runner.compute_node_get_all(ctxt,
hypervisor_match=hypervisor_match)
# 1 response per cell. Each response is a list of compute_node
# entries.
ret_nodes = []
for response in responses:
nodes = response.value_or_raise()
for node in nodes:
cells_utils.add_cell_to_compute_node(node,
response.cell_name)
ret_nodes.append(node)
return ret_nodes
def compute_node_stats(self, ctxt):
"""Return compute node stats totals from all cells."""
responses = self.msg_runner.compute_node_stats(ctxt)
totals = {}
for response in responses:
data = response.value_or_raise()
for key, val in data.iteritems():
totals.setdefault(key, 0)
totals[key] += val
return totals
def actions_get(self, ctxt, cell_name, instance_uuid):
response = self.msg_runner.actions_get(ctxt, cell_name, instance_uuid)
return response.value_or_raise()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
response = self.msg_runner.action_get_by_request_id(ctxt, cell_name,
instance_uuid,
request_id)
return response.value_or_raise()
def action_events_get(self, ctxt, cell_name, action_id):
response = self.msg_runner.action_events_get(ctxt, cell_name,
action_id)
return response.value_or_raise()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
self.msg_runner.consoleauth_delete_tokens(ctxt, instance_uuid)
def validate_console_port(self, ctxt, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
instance = self.db.instance_get_by_uuid(ctxt, instance_uuid)
if not instance['cell_name']:
raise exception.InstanceUnknownCell(instance_uuid=instance_uuid)
response = self.msg_runner.validate_console_port(ctxt,
instance['cell_name'], instance_uuid, console_port,
console_type)
return response.value_or_raise()
| apache-2.0 | -814,845,744,529,827,200 | 42.551247 | 78 | 0.596871 | false |
rapidpro/tracpro | tracpro/orgs_ext/tests/test_forms.py | 1 | 15878 | from __future__ import unicode_literals
import datetime
import mock
from django.core.exceptions import NON_FIELD_ERRORS
from django.forms import model_to_dict
from django.test import TestCase
from django.test import override_settings
from django.utils.timezone import now
from tracpro.orgs_ext.forms import FetchRunsForm, OrgExtForm
from tracpro.polls.models import SAMEDAY_LAST, SAMEDAY_SUM, Question
from tracpro.test import factories
from tracpro.test.cases import TracProTest, TracProDataTest
from .. import forms
@mock.patch('tracpro.contacts.models.DataFieldManager.sync')
@override_settings(LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
('fr', 'French'),
])
class TestOrgExtForm(TracProTest):
form_class = forms.OrgExtForm
def setUp(self):
super(TestOrgExtForm, self).setUp()
self.user = factories.User()
self.data = {
'name': 'Organization',
'language': 'en',
'available_languages': ['en', 'es'],
'timezone': 'UTC',
'created_by': self.user.pk,
'modified_by': self.user.pk,
'editors': [self.user.pk],
'viewers': [self.user.pk],
'administrators': [self.user.pk],
'show_spoof_data': True,
'subdomain': 'org',
'how_to_handle_sameday_responses': SAMEDAY_LAST,
}
def test_subdomain_required(self, mock_sync):
self.data.pop('subdomain')
form = self.form_class(data=self.data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors.keys(), ['subdomain'])
self.assertEqual(form.errors['subdomain'], ['This field is required.'])
def test_available_languages_initial_for_create(self, mock_sync):
"""Available languages should default to empty list when creating an org."""
form = self.form_class(instance=None)
self.assertEqual(form.fields['available_languages'].initial, [])
def test_available_languages_initial_for_update(self, mock_sync):
"""Available languages should be set from the instance to update."""
org = factories.Org(available_languages=['en', 'es'])
form = self.form_class(instance=org)
self.assertEqual(form.fields['available_languages'].initial, ['en', 'es'])
def test_default_language_required_for_create(self, mock_sync):
"""Form should require a default language for new orgs."""
self.data.pop('language')
form = self.form_class(data=self.data, instance=None)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors.keys(), ['language'])
self.assertEqual(form.errors['language'], ['This field is required.'])
def test_default_language_required_for_update(self, mock_sync):
"""Form should require a default language when updating an org."""
self.data.pop('language')
form = self.form_class(data=self.data, instance=factories.Org())
self.assertFalse(form.is_valid())
self.assertEqual(form.errors.keys(), ['language'])
self.assertEqual(form.errors['language'], ['This field is required.'])
def test_available_languages_required_for_create(self, mock_sync):
"""Form should require available languages for new orgs."""
self.data.pop('available_languages')
form = self.form_class(data=self.data, instance=None)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors.keys(), ['available_languages'])
self.assertEqual(form.errors['available_languages'],
['This field is required.'])
def test_available_languages_required_for_update(self, mock_sync):
"""Form should require available languages for new orgs."""
self.data.pop('available_languages')
form = self.form_class(data=self.data, instance=factories.Org())
self.assertFalse(form.is_valid())
self.assertEqual(form.errors.keys(), ['available_languages'])
self.assertEqual(form.errors['available_languages'],
['This field is required.'])
def test_default_language_not_in_available_languages(self, mock_sync):
"""Form should require that default language is in available languages."""
self.data['language'] = 'fr'
form = self.form_class(data=self.data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors.keys(), [NON_FIELD_ERRORS])
self.assertEqual(form.errors[NON_FIELD_ERRORS],
['Default language must be one of the languages '
'available for this organization.'])
def test_available_languages_no_change(self, mock_sync):
"""Form should allow available languages to remain unchanged."""
org = factories.Org(
available_languages=['en', 'es'],
language='en',
)
form = self.form_class(data=self.data, instance=org)
self.assertTrue(form.is_valid(), form.errors)
form.save()
org.refresh_from_db()
self.assertEqual(org.available_languages, ['en', 'es'])
self.assertEqual(org.language, 'en')
def test_add_available_languages(self, mock_sync):
"""Form should allow addition of available language(s)."""
org = factories.Org(
available_languages=['en', 'es'],
language='en',
)
self.data['available_languages'] = ['en', 'es', 'fr']
form = self.form_class(data=self.data, instance=org)
self.assertTrue(form.is_valid(), form.errors)
form.save()
org.refresh_from_db()
self.assertEqual(org.available_languages, ['en', 'es', 'fr'])
self.assertEqual(org.language, 'en')
def test_remove_available_languages(self, mock_sync):
"""Form should allow removal of available language(s)."""
org = factories.Org(
available_languages=['en', 'es'],
language='en',
)
self.data['available_languages'] = ['en']
form = self.form_class(data=self.data, instance=org)
self.assertTrue(form.is_valid(), form.errors)
form.save()
org.refresh_from_db()
self.assertEqual(org.available_languages, ['en'])
self.assertEqual(org.language, 'en')
def test_remove_default_from_available(self, mock_sync):
"""Form should error if default language is removed from available languages."""
org = factories.Org(
available_languages=['en', 'es'],
language='en',
)
self.data['available_languages'] = ['es']
form = self.form_class(data=self.data, instance=org)
self.assertFalse(form.is_valid())
self.assertTrue(form.errors.keys(), [NON_FIELD_ERRORS])
self.assertEqual(form.errors[NON_FIELD_ERRORS],
['Default language must be one of the languages '
'available for this organization.'])
def test_initial_google_analytics(self, mock_sync):
# The form gets initialized with the org's current code
org = factories.Org(google_analytics='UA-12345')
form = self.form_class(instance=org)
self.assertEqual(form.fields['google_analytics'].initial, 'UA-12345')
def test_change_google_analytics(self, mock_sync):
# You can change the GA code
org = factories.Org(google_analytics='UA-12345')
self.data['google_analytics'] = 'UA-54321'
form = self.form_class(instance=org, data=self.data)
self.assertTrue(form.is_valid())
form.save()
org.refresh_from_db()
self.assertEqual('UA-54321', org.google_analytics)
def test_validate_google_analytics(self, mock_sync):
# If entered, GA code must start with "UA"
self.data['google_analytics'] = 'XA-54321'
form = self.form_class(data=self.data)
self.assertFalse(form.is_valid())
self.assertIn('google_analytics', form.errors)
def test_unset_google_analytics(self, mock_sync):
# You can remove the tracking code by blanking the input field
org = factories.Org(google_analytics='UA-12345')
self.data['google_analytics'] = ''
form = self.form_class(instance=org, data=self.data)
self.assertTrue(form.is_valid())
form.save()
org.refresh_from_db()
self.assertEqual('', org.google_analytics)
class FetchRunsFormTest(TestCase):
def test_no_input(self):
# Should fail to validate
form = FetchRunsForm(data={})
self.assertFalse(form.is_valid())
self.assertIn('days', form.errors)
def test_all_zeroes(self):
# Should fail to validate
form = FetchRunsForm(data={'days': '0'})
self.assertFalse(form.is_valid())
self.assertIn('days', form.errors)
def test_non_numeric(self):
# Should fail to validate
form = FetchRunsForm(data={'days': 'zero'})
self.assertFalse(form.is_valid())
self.assertIn('days', form.errors)
def test_non_integer(self):
# should fail to validate
form = FetchRunsForm(data={'days': '1.1'})
self.assertFalse(form.is_valid())
self.assertIn('days', form.errors)
def test_some_numbers(self):
# should validate, return integers
form = FetchRunsForm(data={'days': '1'})
self.assertTrue(form.is_valid())
self.assertEqual({'days': 1}, form.cleaned_data)
class TestChangingHowRepeatedAnswersAreHandledFromLatestToSum(TracProDataTest):
how_to_handle_sameday_responses = SAMEDAY_LAST
def setUp(self):
super(TestChangingHowRepeatedAnswersAreHandledFromLatestToSum, self).setUp()
org = self.unicef
# 2 responses to the same question
self.response1 = factories.Response(
pollrun__poll__org=org,
contact__org=org,
created_on=now() - datetime.timedelta(minutes=1)
)
self.answer1 = factories.Answer(
response=self.response1,
question__poll=self.response1.pollrun.poll,
question__question_type=Question.TYPE_NUMERIC,
value=str(1.0),
submitted_on=now() - datetime.timedelta(minutes=1),
)
self.response2 = factories.Response(
pollrun__poll__org=org,
contact=self.response1.contact,
contact__org=org,
created_on=now()
)
self.answer2 = factories.Answer(
response=self.response2,
question=self.answer1.question,
value=str(3.0),
submitted_on=now()
)
def test_unchanged(self):
self.assertEqual(SAMEDAY_LAST, self.unicef.how_to_handle_sameday_responses)
with mock.patch('tracpro.orgs_ext.forms.DataField'):
data = model_to_dict(self.unicef)
data.update(dict(
administrators=list(self.unicef.administrators.values_list('pk', flat=True)),
viewers=[self.superuser.pk],
available_languages=self.unicef.available_languages,
modified_by=self.unicef.modified_by_id,
name=self.unicef.name,
language=self.unicef.language,
how_to_handle_sameday_responses=SAMEDAY_LAST,
))
form = OrgExtForm(instance=self.unicef, data=data)
self.assertTrue(form.is_valid(), form.errors.as_data())
self.unicef.refresh_from_db()
self.assertEqual(SAMEDAY_LAST, self.unicef.how_to_handle_sameday_responses)
self.answer1.refresh_from_db()
self.assertEqual(str(3.0), self.answer1.value_to_use)
self.answer2.refresh_from_db()
self.assertEqual(str(3.0), self.answer2.value_to_use)
def test_changed_last_to_sum_sets_new_value(self):
self.assertEqual(SAMEDAY_LAST, self.unicef.how_to_handle_sameday_responses)
with mock.patch('tracpro.orgs_ext.forms.DataField'):
data = model_to_dict(self.unicef)
data.update(dict(
administrators=list(self.unicef.administrators.values_list('pk', flat=True)),
viewers=[self.superuser.pk],
available_languages=self.unicef.available_languages,
modified_by=self.unicef.modified_by_id,
name=self.unicef.name,
language=self.unicef.language,
how_to_handle_sameday_responses=SAMEDAY_SUM,
))
form = OrgExtForm(instance=self.unicef, data=data)
self.assertTrue(form.is_valid(), form.errors.as_data())
# Changed - should try to update answers
form.save()
self.unicef.refresh_from_db()
self.assertEqual(SAMEDAY_SUM, self.unicef.how_to_handle_sameday_responses)
self.answer1.refresh_from_db()
self.assertEqual(SAMEDAY_SUM, self.answer1.org.how_to_handle_sameday_responses)
self.assertEqual(str(4.0), self.answer1.value_to_use)
self.answer2.refresh_from_db()
self.assertEqual(SAMEDAY_SUM, self.answer2.org.how_to_handle_sameday_responses)
self.assertEqual(str(4.0), self.answer2.value_to_use)
class TestChangingHowRepeatedAnswersAreHandledFromSumToLatest(TracProDataTest):
how_to_handle_sameday_responses = SAMEDAY_SUM
def setUp(self):
super(TestChangingHowRepeatedAnswersAreHandledFromSumToLatest, self).setUp()
org = self.unicef
# 2 responses to the same question
self.response1 = factories.Response(
pollrun__poll__org=org,
contact__org=org,
created_on=now() - datetime.timedelta(minutes=1)
)
self.answer1 = factories.Answer(
response=self.response1,
question__poll=self.response1.pollrun.poll,
question__question_type=Question.TYPE_NUMERIC,
value=str(1.0),
submitted_on=now() - datetime.timedelta(minutes=1),
)
self.response2 = factories.Response(
pollrun__poll__org=org,
contact=self.response1.contact,
contact__org=org,
created_on=now()
)
self.answer2 = factories.Answer(
response=self.response2,
question=self.answer1.question,
value=str(3.0),
submitted_on=now()
)
def test_changed_sum_to_last_changes_value(self):
self.assertEqual(SAMEDAY_SUM, self.unicef.how_to_handle_sameday_responses)
self.answer1.refresh_from_db()
self.assertEqual(str(4.0), self.answer1.value_to_use)
self.answer2.refresh_from_db()
self.assertEqual(str(4.0), self.answer2.value_to_use)
with mock.patch('tracpro.orgs_ext.forms.DataField'):
data = model_to_dict(self.unicef)
data.update(dict(
administrators=list(self.unicef.administrators.values_list('pk', flat=True)),
viewers=[self.superuser.pk],
available_languages=self.unicef.available_languages,
modified_by=self.unicef.modified_by_id,
name=self.unicef.name,
language=self.unicef.language,
how_to_handle_sameday_responses=SAMEDAY_LAST,
))
form = OrgExtForm(instance=self.unicef, data=data)
self.assertTrue(form.is_valid(), form.errors.as_data())
# Changed - should try to update answers
form.save()
self.unicef.refresh_from_db()
self.assertEqual(SAMEDAY_LAST, self.unicef.how_to_handle_sameday_responses)
self.answer1.refresh_from_db()
self.assertEqual(str(3.0), self.answer1.value_to_use)
self.answer2.refresh_from_db()
self.assertEqual(str(3.0), self.answer2.value_to_use)
| bsd-3-clause | -6,527,262,072,748,828,000 | 41.568365 | 93 | 0.617836 | false |
gitcommitpush/django-mailtrail | runtests.py | 1 | 1253 | #!/usr/bin/env python
import os
import sys
import subprocess
import django
from coverage import coverage
from django.conf import settings
from django.test.utils import get_runner
FLAKE8_ARGS = ['mailtrail', 'tests/', '--ignore=E501']
def exit_on_failure(command, message=None):
if command:
sys.exit(command)
def flake8_main(args):
print('Running: flake8', FLAKE8_ARGS)
command = subprocess.call(['flake8'] + args)
print("" if command else "Success. flake8 passed.")
return command
def run_tests_coverage():
if __name__ == "__main__":
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
# Setup Coverage
cov = coverage(source=["mailtrail"], omit=["mailtrail/__init__.py"])
cov.start()
failures = test_runner.run_tests(["tests"])
if bool(failures):
cov.erase()
sys.exit("Tests Failed. Coverage Cancelled.")
# If success show coverage results
cov.stop()
cov.save()
cov.report()
cov.html_report(directory='covhtml')
exit_on_failure(flake8_main(FLAKE8_ARGS))
exit_on_failure(run_tests_coverage())
| mit | 252,067,481,946,622,180 | 24.06 | 76 | 0.630487 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.